Commit 1931d23b authored by I.K Seneviratne's avatar I.K Seneviratne

Committing some modifications in several files.

.
parent 91f1c100
...@@ -15,9 +15,7 @@ main methods include ...@@ -15,9 +15,7 @@ main methods include
from tensorflow.keras.models import load_model from tensorflow.keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2 import cv2
import os import os
import numpy as np import numpy as np
......
...@@ -15,13 +15,10 @@ main methods include ...@@ -15,13 +15,10 @@ main methods include
import tensorflow as tf import tensorflow as tf
import tensorflow.keras import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np import numpy as np
import cv2 import cv2
import os import os
import shutil
from .custom_sorter import * from .custom_sorter import *
from ..MongoModels import *
from ..serializers import * from ..serializers import *
from . import id_generator as ig from . import id_generator as ig
from . import utilities as ut from . import utilities as ut
......
class PoseResponse:
# directory = ''
# image_name = ''
# label = ''
def __init__(self, directory, image_name, label):
self.directory = directory
self.image_name = image_name
self.labels = label
from imutils import face_utils
import os
import cv2
import dlib
import numpy as np
import imutils
def get2DPoints(image):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers")
detector_path = os.path.join(CLASSIFIER_DIR, "shape_predictor_68_face_landmarks.dat")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(detector_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
left_corner_arr = None
right_corner_arr = None
nose_tip_arr = None
right_mouth_arr = None
left_mouth_arr = None
chin_arr = None
face_center_arr = None
face_center_top_arr = None
face_center_bottom_arr = None
count = 0
print('no of faces: ', len(rects))
if (len(rects)):
left_corner_arr = np.zeros((len(rects), 2))
right_corner_arr = np.zeros((len(rects), 2))
nose_tip_arr = np.zeros((len(rects), 2))
right_mouth_arr = np.zeros((len(rects), 2))
left_mouth_arr = np.zeros((len(rects), 2))
chin_arr = np.zeros((len(rects), 2))
face_center_top_arr = np.zeros((len(rects), 2))
face_center_bottom_arr = np.zeros((len(rects), 2))
for (i, rect) in enumerate(rects):
left_corner = None
right_corner = None
nose_tip = None
right_mouth = None
left_mouth = None
chin = None
(fx, fy, fw, fh) = face_utils.rect_to_bb(rect)
cv2.rectangle(image, (fx, fy), (fx+fw, fy+fh), (0, 255, 0), 2)
face_center_top = [int(fx + fw/2), int(fy)]
face_center_bottom = [int(fx + fw/2), int(fy + fh)]
cv2.line(image, (int(fx + fw/2), int(fy)), (int(fx + fw/2), int(fy + fh)), (0, 255, 0), 2)
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# looping through each facial landmark category
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# clone the original image so we can draw on it, then
# display the name of the face part on the image
clone = image
# loop over the subset of facial landmarks, drawing the
# specific face part
for (x, y) in shape[i:j]:
if (name == 'left_eye'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
left_corner = np.amax(shape[i:j], axis=0)
elif (name == 'right_eye'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_corner = np.amin(shape[i:j], axis=0)
elif (name == 'jaw'):
minArr = np.array(shape[i:j][8], dtype=int)
chin = np.array(shape[i:j][8], dtype=int)
# cv2.putText(clone, "Chin", (int(minArr[0]), int(minArr[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.circle(image, (int(minArr[0]), int(minArr[1])), 3, (255, 0, 255), -1)
# cv2.circle(clone, (int(minArr[0]), int(minArr[1])), 3, (0, 255, 255), -1)
elif (name == 'nose'):
# nose_tip = np.array(shape[i:j][3], dtype=int)
nose_tip = np.array(shape[i:j][3], dtype=int)
# cv2.putText(clone, "Nose tip", (int(nose_tip[0]), int(nose_tip[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
# 2)
# cv2.circle(clone, (int(nose_tip[0]), int(nose_tip[1])), 3, (255, 0, 255), -1)
elif (name == 'inner_mouth'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_mouth = np.amin(shape[i:j], axis=0)
left_mouth = np.amax(shape[i:j], axis=0)
# cv2.circle(clone, (maxArr[0], maxArr[1]), 3, (127, 0, 255), -1)
# cv2.circle(clone, (minArr[0], minArr[1]), 3, (127, 0, 255), -1)
# else:
# cv2.circle(image, (x, y), 3, (255, 0, 255), -1)
left_corner_arr[count] = left_corner
right_corner_arr[count] = right_corner
nose_tip_arr[count] = nose_tip
right_mouth_arr[count] = right_mouth
left_mouth_arr[count] = left_mouth
chin_arr[count] = chin
face_center_top_arr[count] = face_center_top
face_center_bottom_arr[count] = face_center_bottom
count += 1
return left_corner_arr, right_corner_arr, nose_tip_arr, right_mouth_arr, left_mouth_arr, chin_arr, face_center_top_arr, face_center_bottom_arr, count
# extract the ROI of the face region as a separate image
# (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
# roi = image[y:y + h, x:x + w]
# roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
#
# # show the particular face part
# cv2.imshow("ROI", roi)
# cv2.imshow("Image", clone)
# cv2.waitKey(0)
#
# # visualize all facial landmarks with a transparent overlay
# output = face_utils.visualize_facial_landmarks(image, shape)
# cv2.imshow("Image", output)
# cv2.waitKey(0)
\ No newline at end of file
...@@ -13,19 +13,14 @@ main methods include ...@@ -13,19 +13,14 @@ main methods include
""" """
from decimal import Decimal from decimal import Decimal
from . custom_sorter import *
import cv2 import cv2
import numpy as np import numpy as np
import math
from . face_detector import get_face_detector, find_faces from . face_detector import get_face_detector, find_faces
from . face_landmarks import get_landmark_model, detect_marks from . face_landmarks import get_landmark_model, detect_marks
import os import os
import shutil
import math import math
import pandas as pd import pandas as pd
from ..MongoModels import *
from ..serializers import * from ..serializers import *
from . import id_generator as ig from . import id_generator as ig
from . import utilities as ut from . import utilities as ut
......
from rest_framework import serializers """
from .models import Teachers, RegisterUser
from .MongoModels import *
from .logic import classes
from . models import VideoMeta
class TeachersSerializer(serializers.ModelSerializer): This file is responsible for implementing the serializer classes for each model classes
class Meta: each serializer class extended by the djangorestframework's serializers class
model = Teachers
fields = ('firstName', 'lastName')
# fields = __all__
class RegisterUserSerializer(serializers.ModelSerializer): there should be an inner class named "Meta" that needs to implemented inside each serializer class
class Meta: there are two fields inside "Meta" class, as follows.
model = RegisterUser model: the relevant model class that needs to be serialized
fields = ('firstName', 'lastName', 'email', 'password') fields: fields that need to be displayed when serializing the model class
('__all__' indicates that all the fields are required to be displayed)
"""
# image serializer
class ImageSerializer(serializers.Serializer):
metaData = serializers.CharField()
# image serializer
class PoseSerializer(serializers.Serializer):
directory = serializers.CharField() from rest_framework import serializers
image_name = serializers.CharField() from .MongoModels import *
text = serializers.CharField() from . models import VideoMeta
# lecture serializer # lecture serializer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment