Commit 465e776c authored by Weerasinghe D.N.H's avatar Weerasinghe D.N.H

BACKEND : backend is replaced

parent 7d930a87
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TemplatesService">
<option name="TEMPLATE_CONFIGURATION" value="Jinja2" />
</component>
</module>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<Languages>
<language minSize="149" name="Python" />
</Languages>
</inspection_tool>
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="E128" />
<option value="E501" />
</list>
</option>
</inspection_tool>
</profile>
</component>
\ No newline at end of file
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/backend.iml" filepath="$PROJECT_DIR$/.idea/backend.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
</component>
</project>
\ No newline at end of file
...@@ -83,7 +83,7 @@ def class_details_detection(image_nparray, boxes, index, class_comp_id, class_ty ...@@ -83,7 +83,7 @@ def class_details_detection(image_nparray, boxes, index, class_comp_id, class_ty
methods_attributes = [] methods_attributes = []
_image, cl_ymin, cl_xmin, cl_ymax, cl_xmax = crop_image_(image_nparray, boxes, index) _image, cl_ymin, cl_xmin, cl_ymax, cl_xmax = crop_image_(image_nparray, boxes, index)
cv2.imwrite('image_1.jpg', _image) # cv2.imwrite('image_1.jpg', _image)
mdl2_path = app.CLASS_COMP_SAVED_MODEL_PATH mdl2_path = app.CLASS_COMP_SAVED_MODEL_PATH
lbl2_path = app.CLASS_COMP_SAVED_LABEL_PATH lbl2_path = app.CLASS_COMP_SAVED_LABEL_PATH
...@@ -102,7 +102,7 @@ def class_details_detection(image_nparray, boxes, index, class_comp_id, class_ty ...@@ -102,7 +102,7 @@ def class_details_detection(image_nparray, boxes, index, class_comp_id, class_ty
# print(category, 'line 96 - inside attributes') # print(category, 'line 96 - inside attributes')
class_attributes, y_min, x_min, y_max, x_max = crop_image_(_image, boxes_class, j) class_attributes, y_min, x_min, y_max, x_max = crop_image_(_image, boxes_class, j)
class_attributes = cv2.resize(class_attributes, None, fx=2, fy=2) class_attributes = cv2.resize(class_attributes, None, fx=2, fy=2)
cv2.imwrite('image.jpg', class_attributes) # cv2.imwrite('image.jpg', class_attributes)
text = text_extraction(class_attributes) text = text_extraction(class_attributes)
attr = save_attributes_methods(text, 'attribute') attr = save_attributes_methods(text, 'attribute')
methods_attributes.append(attr) methods_attributes.append(attr)
...@@ -270,5 +270,5 @@ def save_class_interface(class_type, comp_name, cl_ymin, cl_xmin, cl_ymax, cl_xm ...@@ -270,5 +270,5 @@ def save_class_interface(class_type, comp_name, cl_ymin, cl_xmin, cl_ymax, cl_xm
y_max=cl_ymax) y_max=cl_ymax)
db.session.add(comp) db.session.add(comp)
db.session.commit() db.session.commit()
print(comp, 'line 261 comp') # print(comp, 'line 261 comp')
return comp return comp
class Admin:
def login_to_the_system(self):
pass
def view_and_manage_cart_items(self):
pass
def check_the_payments_and_sold_items(self):
pass
def reply_to_Customer_feedbacks(self):
pass
digraph "classes_jkmhqhttfr" {
rankdir=BT
charset="utf-8"
"jkmhqhttfr.Admin" [color="black", fontcolor="black", label="{Admin|\l|check_the_payments_and_sold_items()\llogin_to_the_system()\lreply_to_Customer_feedbacks()\lview_and_manage_cart_items()\l}", shape="record", style="solid"];
}
digraph G {
rankdir=LR;
labelloc="b";
peripheries=0;
node [shape=plaintext]
subgraph Admin {label="Admin"; admin};
admin [image="D:\Nanduni.Bsc\research_code\2022-158\backend/stick.png";peripheries=0;];
node [shape=ellipse, style=solid];
login_to_the_system [label="Login To The System"];
check_the_home_page [label="Check The Home Page"];
play_the_site_demo_video [label="Play The Site Demo Video"];
add_items_to_the_cart [label="Add Items To The Cart"];
pay_the_items [label="Pay The Items"];
give_a_feedback_for_the_system [label="Give A Feedback For The System"];
view_the_seller_details [label="View The Seller Details"];
contact_the_seller_to_request_more_items [label="Contact The Seller To Request More Items"];
login_to_the_system [label="Login To The System"];
view_and_manage_cart_items [label="View And Manage Cart Items"];
check_the_payments_and_sold_items [label="Check The Payments And Sold Items"];
reply_to_customer_feedbacks [label="Reply To Customer Feedbacks"];
edge [arrowhead="none"];
customer->login_to_the_system;
customer->check_the_home_page;
customer->play_the_site_demo_video;
customer->add_items_to_the_cart;
customer->pay_the_items;
customer->give_a_feedback_for_the_system;
customer->view_the_seller_details;
customer->contact_the_seller_to_request_more_items;
admin->login_to_the_system;
admin->view_and_manage_cart_items;
admin->check_the_payments_and_sold_items;
admin->reply_to_customer_feedbacks;
edge [arrowtail="vee", label="<<extend>>", style=dashed];
give_a_feedback_for_the_system->rate_the_service;
edge [arrowtail="vee", label="<<include>>", style=dashed];
pay_the_items->recieve_the_confirmation_email;
}
\ No newline at end of file
import operator
import os
import re
import cv2
import numpy as np
import pytesseract as ts
from PIL import Image
from models.attribute_model import Attribute
from object_detection.utils import label_map_util
import app
import tensorflow as tf
import spacy
from config.database import db
from models.class_component_model import Component
from models.method_model import Method
ts.pytesseract.tesseract_cmd = r'C:\Users\DELL\AppData\Local\Programs\Tesseract-OCR\tesseract.exe'
def component_separation(filename, class_comp_id):
mdl1_path = app.CLASS_SAVED_MODEL_PATH
lbl1_path = app.CLASS_SAVED_LABEL_PATH
img1_path = app.SUBMISSION_PATH_CLASS + '/' + filename
image_nparray = np.array(Image.open(img1_path))
# print(img1_path)
boxes, num_entities, accurate_indexes, num_entities, category_index, class_id = class_object_detection(mdl1_path,
lbl1_path,
image_nparray)
# Convert the class id in their name
if num_entities > 0:
for index in range(0, len(accurate_indexes)):
if category_index[class_id[index]]['name'] == 'class':
print(filename)
_image = crop_image_(image_nparray, boxes, index)
_image = cv2.resize(_image, None, fx=2, fy=2)
class_details_detection(_image, class_comp_id)
elif category_index[class_id[index]]['name'] == 'interface':
_image = crop_image_(image_nparray, boxes, index)
_image = cv2.resize(_image, None, fx=2, fy=2)
def class_object_detection(model_path, label_path, image_nparray):
detect_fn = tf.saved_model.load(model_path)
category_index = label_map_util.create_category_index_from_labelmap(label_path, use_display_name=True)
image_np = image_nparray
input_tensor = tf.convert_to_tensor(image_np)
input_tensor = input_tensor[tf.newaxis, ...]
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
accurate_indexes = [k for k, v in enumerate(detections['detection_scores']) if (v > 0.7)]
num_entities = len(accurate_indexes)
class_id = operator.itemgetter(*accurate_indexes)(detections['detection_classes'])
boxes = detections['detection_boxes']
return boxes, num_entities, accurate_indexes, num_entities, category_index, class_id
def class_details_detection(_image, class_comp_id):
attributes_methods = []
mdl2_path = app.CLASS_COMP_SAVED_MODEL_PATH
lbl2_path = app.CLASS_COMP_SAVED_LABEL_PATH
boxes_class, num_entities, accurate_indexes, num_entities, category_index, class_id = class_object_detection(
mdl2_path, lbl2_path, _image)
comp = class_name_detection(class_comp_id, _image, boxes_class, accurate_indexes)
if num_entities > 1:
for j in range(0, len(accurate_indexes)):
if category_index[class_id[j]]['name'] == 'class_attributes':
class_attributes = crop_image_(_image, boxes_class, j)
text = text_extraction(class_attributes)
attributes = save_attributes_methods(text, 'attribute')
alter_attributes_methods(attributes, comp.id)
elif category_index[class_id[j]]['name'] == 'class_methods':
class_methods = crop_image_(_image, boxes_class, j)
text = text_extraction(class_methods)
print(text)
methods = save_attributes_methods(text, 'method')
alter_attributes_methods(methods, comp.id)
print(text)
def crop_image_(image, boxes, index):
# image = cv2.imread(path)
height, width, c = image.shape
# crop box format: xmin, ymin, xmax, ymax
ymin = boxes[index][0] * height
xmin = boxes[index][1] * width
ymax = boxes[index][2] * height
xmax = boxes[index][3] * width
cropped_image = image[int(ymin):int(ymax), int(xmin):int(xmax)]
# image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)
# image = cv2.resize(image, (800, 500))
return cropped_image
def text_extraction(image):
config = '-l eng --oem 1 --psm 4'
text = ts.image_to_string(image, config=config)
text = text.splitlines()
text = [x.strip(' ') for x in text]
text = list(filter(None, text))
return text
def save_attributes_methods(text, typ):
global saved_data
nlp = spacy.load('en_core_web_sm')
for element in text:
print(element)
# removable = str.maketrans('', '', '()')
nlp_ner = spacy.load('ner_models/model-best')
nlp_output = nlp_ner(element)
attr = Attribute()
method = Method()
for token in nlp_output.ents:
if typ == 'attribute':
if token.label_ == 'ATTRIBUTE_NAME':
attr.name = token.text
elif token.label_ == 'ACCESS_SP':
attr.access_spec = covert_to_access_specifier(token.text)
elif token.label_ == 'DATA_TYPE':
attr.data_type = token.text
elif typ == 'method':
if token.label_ == 'METHOD_NAME':
method.name = token.text
elif token.label_ == 'ACCESS_SP':
method.access_spec = covert_to_access_specifier(token.text)
elif token.label_ == 'DATA_TYPE':
method.return_type = token.text
if typ == 'attribute':
print(attr)
db.session.add(attr)
db.session.commit()
saved_data.append(attr)
else:
print(method)
db.session.add(method)
db.session.commit()
saved_data.append(method)
return saved_data
def alter_attributes_methods(element_list, class_id):
for element in element_list:
print(class_id)
print(element_list)
element.class_id = class_id
db.session.commit()
def covert_to_access_specifier(access):
if access == "-":
return "Private"
elif access == "#":
return "Protected"
if access == "+":
return "Public"
elif access == "~":
return "Package"
else:
return ''
def crop_and_hide(image, boxes, index):
height, width, c = image.shape
for i in range(0, len(index)):
ymin = boxes[i][0] * height
xmin = boxes[i][1] * width
ymax = boxes[i][2] * height
xmax = boxes[i][3] * width
cv2.rectangle(image, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 255, 255), -1)
return image
def class_name_detection(class_comp_id, image, boxes, index):
image = crop_and_hide(image, boxes, index)
class_name = text_extraction(image)
if ''.join(class_name) != '':
if "interface" in ''.join(class_name):
name = ''.join(class_name).replace("<<interface>>", "")
comp = Component(class_answer=class_comp_id, name=name, type="interface")
else:
name = ''.join(class_name)
comp = Component(class_answer=class_comp_id, name=name, type="class")
db.session.add(comp)
db.session.commit()
return comp
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment