Commit e9c5b55c authored by Weerasinghe D.N.H's avatar Weerasinghe D.N.H

BACKEND :modified class_object_detection(method),added...

BACKEND :modified class_object_detection(method),added component_separation(method) & crop_and_image_resolution(method) in class_model_detection_service.py
parent a73728cc
......@@ -21,10 +21,14 @@ OUTPUTS_GENERATED_CLASS_FILES_PATH = os.path.join('outputs', 'generated_class_fi
OUTPUTS_FOLDER = os.path.join(APP_ROOT, 'outputs')
UML_GENERATOR_UPLOAD_FOLDER = os.path.join(APP_ROOT, 'uploads')
SUBMISSION_PATH = os.path.join(APP_ROOT, 'submissions/use_case')
SUBMISSION_PATH_CLASS = os.path.join(APP_ROOT, 'submissions/class')
USE_CASE_SAVED_MODEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/use_case/model')
USE_CASE_SAVED_LABEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/use_case/label')
CLASS_SAVED_MODEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/class/model')
CLASS_SAVED_LABEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/class/label')
CLASS_SAVED_LABEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/class/label/label_map.pbtxt')
CLASS_COMP_SAVED_MODEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/class_component/model')
CLASS_COMP_SAVED_LABEL_PATH = os.path.join(APP_ROOT, 'tensorflow_models/class_component/label/label_map.pbtxt')
app = Flask(__name__, static_folder='outputs')
CORS(app)
......
......@@ -13,12 +13,11 @@ import tensorflow as tf
from config.database import db
def model_object_detection(filename, class_id):
detect_fn = tf.saved_model.load(app.CLASS_SAVED_MODEL_PATH)
category_index = label_map_util.create_category_index_from_labelmap(
app.CLASS_SAVED_LABEL_PATH + "/label_map.pbtxt",
use_display_name=True)
image_np = np.array(Image.open(app.SUBMISSION_PATH + '/' + filename))
def class_object_detection(filename, class_comp_id, model_path, label_path, image_path):
detect_fn = tf.saved_model.load(model_path)
category_index = label_map_util.create_category_index_from_labelmap(label_path, use_display_name=True)
image_np = np.array(Image.open(image_path))
input_tensor = tf.convert_to_tensor(image_np)
input_tensor = input_tensor[tf.newaxis, ...]
......@@ -32,8 +31,64 @@ def model_object_detection(filename, class_id):
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
accurate_indexes = [k for k, v in enumerate(detections['detection_scores']) if (v > 0.4)]
accurate_indexes = [k for k, v in enumerate(detections['detection_scores']) if (v > 0.6)]
num_entities = len(accurate_indexes)
class_id = operator.itemgetter(*accurate_indexes)(detections['detection_classes'])
boxes = detections['detection_boxes']
text_extraction(filename, class_id, boxes, accurate_indexes, category_index, class_id)
\ No newline at end of file
return boxes, num_entities, accurate_indexes, num_entities, category_index, class_id
def component_separation(filename, class_comp_id):
boxes, num_entities, accurate_indexes, num_entities, category_index, class_id = class_object_detection(filename,
class_comp_id,
app.CLASS_SAVED_MODEL_PATH,
app.CLASS_SAVED_LABEL_PATH,
app.SUBMISSION_PATH_CLASS + '/' + filename)
# Convert the class id in their name
if num_entities > 0:
for i in range(0, len(accurate_indexes)):
if category_index[class_id]['name'] == 'class':
class_image = crop_and_image_resolution(filename, boxes)
cv2.imwrite(app.SUBMISSION_PATH_CLASS, "class" + str(i))
# boxes,num_entities,accurate_indexes,num_entities,category_index,class_id = class_object_detection(class_image, class_comp_id, app.CLASS_SAVED_MODEL_PATH, app.CLASS_SAVED_LABEL_PATH,app.SUBMISSION_PATH_CLASS+'/'+"class"+str(i))
elif category_index[class_id]['name'] == 'interface':
image = crop_and_image_resolution(filename, boxes)
def crop_and_image_resolution(filename, boxes):
image = cv2.imread(app.SUBMISSION_PATH + '/' + filename)
height, width, c = image.shape
# crop box format: xmin, ymin, xmax, ymax
ymin = boxes[0] * height
xmin = boxes[1] * width
ymax = boxes[2] * height
xmax = boxes[3] * width
cropped_image = image[int(ymin):int(ymax), int(xmin):int(xmax)]
image = cv2.imwrite('image.jpg', cropped_image)
# convert values to int
black = (0, 0, 0)
white = (255, 255, 255)
threshold = (160, 160, 160)
# Open input image in grayscale mode and get its pixels.
img = Image.open("image.jpg").convert("LA")
pixels = img.getdata()
new_pixels = []
# Compare each pixel
for pixel in pixels:
if pixel < threshold:
new_pixels.append(black)
else:
new_pixels.append(white)
# Create and save new image.
new_img = Image.new("RGB", img.size)
new_img.putdata(new_pixels)
return image
item {
id: 1
name: 'class_attributes'
}
item {
id: 2
name: 'class_methods'
}
item {
id: 3
name: 'class_name'
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment