Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2020-101
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Sachith Fernando
2020-101
Commits
1931d23b
Commit
1931d23b
authored
Nov 05, 2020
by
I.K Seneviratne
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Committing some modifications in several files.
.
parent
91f1c100
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
12 additions
and
183 deletions
+12
-183
FirstApp/emotion_detector.py
FirstApp/emotion_detector.py
+0
-2
FirstApp/logic/activity_recognition.py
FirstApp/logic/activity_recognition.py
+0
-3
FirstApp/logic/classes/pose.py
FirstApp/logic/classes/pose.py
+0
-11
FirstApp/logic/facial_landmarks.py
FirstApp/logic/facial_landmarks.py
+0
-140
FirstApp/logic/head_gaze_estimation.py
FirstApp/logic/head_gaze_estimation.py
+0
-5
FirstApp/serializers.py
FirstApp/serializers.py
+12
-22
No files found.
FirstApp/emotion_detector.py
View file @
1931d23b
...
...
@@ -15,9 +15,7 @@ main methods include
from
tensorflow.keras.models
import
load_model
from
time
import
sleep
from
keras.preprocessing.image
import
img_to_array
from
keras.preprocessing
import
image
import
cv2
import
os
import
numpy
as
np
...
...
FirstApp/logic/activity_recognition.py
View file @
1931d23b
...
...
@@ -15,13 +15,10 @@ main methods include
import
tensorflow
as
tf
import
tensorflow.keras
from
PIL
import
Image
,
ImageOps
import
numpy
as
np
import
cv2
import
os
import
shutil
from
.custom_sorter
import
*
from
..MongoModels
import
*
from
..serializers
import
*
from
.
import
id_generator
as
ig
from
.
import
utilities
as
ut
...
...
FirstApp/logic/classes/pose.py
deleted
100644 → 0
View file @
91f1c100
class
PoseResponse
:
# directory = ''
# image_name = ''
# label = ''
def
__init__
(
self
,
directory
,
image_name
,
label
):
self
.
directory
=
directory
self
.
image_name
=
image_name
self
.
labels
=
label
FirstApp/logic/facial_landmarks.py
deleted
100644 → 0
View file @
91f1c100
from
imutils
import
face_utils
import
os
import
cv2
import
dlib
import
numpy
as
np
import
imutils
def
get2DPoints
(
image
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers"
)
detector_path
=
os
.
path
.
join
(
CLASSIFIER_DIR
,
"shape_predictor_68_face_landmarks.dat"
)
detector
=
dlib
.
get_frontal_face_detector
()
predictor
=
dlib
.
shape_predictor
(
detector_path
)
gray
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2GRAY
)
# detect faces in the grayscale image
rects
=
detector
(
gray
,
1
)
left_corner_arr
=
None
right_corner_arr
=
None
nose_tip_arr
=
None
right_mouth_arr
=
None
left_mouth_arr
=
None
chin_arr
=
None
face_center_arr
=
None
face_center_top_arr
=
None
face_center_bottom_arr
=
None
count
=
0
print
(
'no of faces: '
,
len
(
rects
))
if
(
len
(
rects
)):
left_corner_arr
=
np
.
zeros
((
len
(
rects
),
2
))
right_corner_arr
=
np
.
zeros
((
len
(
rects
),
2
))
nose_tip_arr
=
np
.
zeros
((
len
(
rects
),
2
))
right_mouth_arr
=
np
.
zeros
((
len
(
rects
),
2
))
left_mouth_arr
=
np
.
zeros
((
len
(
rects
),
2
))
chin_arr
=
np
.
zeros
((
len
(
rects
),
2
))
face_center_top_arr
=
np
.
zeros
((
len
(
rects
),
2
))
face_center_bottom_arr
=
np
.
zeros
((
len
(
rects
),
2
))
for
(
i
,
rect
)
in
enumerate
(
rects
):
left_corner
=
None
right_corner
=
None
nose_tip
=
None
right_mouth
=
None
left_mouth
=
None
chin
=
None
(
fx
,
fy
,
fw
,
fh
)
=
face_utils
.
rect_to_bb
(
rect
)
cv2
.
rectangle
(
image
,
(
fx
,
fy
),
(
fx
+
fw
,
fy
+
fh
),
(
0
,
255
,
0
),
2
)
face_center_top
=
[
int
(
fx
+
fw
/
2
),
int
(
fy
)]
face_center_bottom
=
[
int
(
fx
+
fw
/
2
),
int
(
fy
+
fh
)]
cv2
.
line
(
image
,
(
int
(
fx
+
fw
/
2
),
int
(
fy
)),
(
int
(
fx
+
fw
/
2
),
int
(
fy
+
fh
)),
(
0
,
255
,
0
),
2
)
shape
=
predictor
(
gray
,
rect
)
shape
=
face_utils
.
shape_to_np
(
shape
)
# looping through each facial landmark category
for
(
name
,
(
i
,
j
))
in
face_utils
.
FACIAL_LANDMARKS_IDXS
.
items
():
# clone the original image so we can draw on it, then
# display the name of the face part on the image
clone
=
image
# loop over the subset of facial landmarks, drawing the
# specific face part
for
(
x
,
y
)
in
shape
[
i
:
j
]:
if
(
name
==
'left_eye'
):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
left_corner
=
np
.
amax
(
shape
[
i
:
j
],
axis
=
0
)
elif
(
name
==
'right_eye'
):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_corner
=
np
.
amin
(
shape
[
i
:
j
],
axis
=
0
)
elif
(
name
==
'jaw'
):
minArr
=
np
.
array
(
shape
[
i
:
j
][
8
],
dtype
=
int
)
chin
=
np
.
array
(
shape
[
i
:
j
][
8
],
dtype
=
int
)
# cv2.putText(clone, "Chin", (int(minArr[0]), int(minArr[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2
.
circle
(
image
,
(
int
(
minArr
[
0
]),
int
(
minArr
[
1
])),
3
,
(
255
,
0
,
255
),
-
1
)
# cv2.circle(clone, (int(minArr[0]), int(minArr[1])), 3, (0, 255, 255), -1)
elif
(
name
==
'nose'
):
# nose_tip = np.array(shape[i:j][3], dtype=int)
nose_tip
=
np
.
array
(
shape
[
i
:
j
][
3
],
dtype
=
int
)
# cv2.putText(clone, "Nose tip", (int(nose_tip[0]), int(nose_tip[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
# 2)
# cv2.circle(clone, (int(nose_tip[0]), int(nose_tip[1])), 3, (255, 0, 255), -1)
elif
(
name
==
'inner_mouth'
):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_mouth
=
np
.
amin
(
shape
[
i
:
j
],
axis
=
0
)
left_mouth
=
np
.
amax
(
shape
[
i
:
j
],
axis
=
0
)
# cv2.circle(clone, (maxArr[0], maxArr[1]), 3, (127, 0, 255), -1)
# cv2.circle(clone, (minArr[0], minArr[1]), 3, (127, 0, 255), -1)
# else:
# cv2.circle(image, (x, y), 3, (255, 0, 255), -1)
left_corner_arr
[
count
]
=
left_corner
right_corner_arr
[
count
]
=
right_corner
nose_tip_arr
[
count
]
=
nose_tip
right_mouth_arr
[
count
]
=
right_mouth
left_mouth_arr
[
count
]
=
left_mouth
chin_arr
[
count
]
=
chin
face_center_top_arr
[
count
]
=
face_center_top
face_center_bottom_arr
[
count
]
=
face_center_bottom
count
+=
1
return
left_corner_arr
,
right_corner_arr
,
nose_tip_arr
,
right_mouth_arr
,
left_mouth_arr
,
chin_arr
,
face_center_top_arr
,
face_center_bottom_arr
,
count
# extract the ROI of the face region as a separate image
# (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
# roi = image[y:y + h, x:x + w]
# roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
#
# # show the particular face part
# cv2.imshow("ROI", roi)
# cv2.imshow("Image", clone)
# cv2.waitKey(0)
#
# # visualize all facial landmarks with a transparent overlay
# output = face_utils.visualize_facial_landmarks(image, shape)
# cv2.imshow("Image", output)
# cv2.waitKey(0)
\ No newline at end of file
FirstApp/logic/head_gaze_estimation.py
View file @
1931d23b
...
...
@@ -13,19 +13,14 @@ main methods include
"""
from
decimal
import
Decimal
from
.
custom_sorter
import
*
import
cv2
import
numpy
as
np
import
math
from
.
face_detector
import
get_face_detector
,
find_faces
from
.
face_landmarks
import
get_landmark_model
,
detect_marks
import
os
import
shutil
import
math
import
pandas
as
pd
from
..MongoModels
import
*
from
..serializers
import
*
from
.
import
id_generator
as
ig
from
.
import
utilities
as
ut
...
...
FirstApp/serializers.py
View file @
1931d23b
from
rest_framework
import
serializers
from
.models
import
Teachers
,
RegisterUser
from
.MongoModels
import
*
from
.logic
import
classes
from
.
models
import
VideoMeta
"""
class
TeachersSerializer
(
serializers
.
ModelSerializer
):
This file is responsible for implementing the serializer classes for each model classes
class
Meta
:
model
=
Teachers
fields
=
(
'firstName'
,
'lastName'
)
# fields = __all__
each serializer class extended by the djangorestframework's serializers class
class
RegisterUserSerializer
(
serializers
.
ModelSerializer
):
there should be an inner class named "Meta" that needs to implemented inside each serializer class
class
Meta
:
model
=
RegisterUser
fields
=
(
'firstName'
,
'lastName'
,
'email'
,
'password'
)
there are two fields inside "Meta" class, as follows.
model: the relevant model class that needs to be serialized
fields: fields that need to be displayed when serializing the model class
('__all__' indicates that all the fields are required to be displayed)
"""
# image serializer
class
ImageSerializer
(
serializers
.
Serializer
):
metaData
=
serializers
.
CharField
()
# image serializer
class
PoseSerializer
(
serializers
.
Serializer
):
directory
=
serializers
.
CharField
()
image_name
=
serializers
.
CharField
()
text
=
serializers
.
CharField
()
from
rest_framework
import
serializers
from
.MongoModels
import
*
from
.
models
import
VideoMeta
# lecture serializer
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment