Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2020-101
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Sachith Fernando
2020-101
Commits
765ece4e
Commit
765ece4e
authored
Oct 18, 2020
by
I.K Seneviratne
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Committing the partial implementation of the workflow for lecture student behavior.
parent
033e0193
Changes
5
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
432 additions
and
161 deletions
+432
-161
FirstApp/api.py
FirstApp/api.py
+53
-113
FirstApp/emotion_detector.py
FirstApp/emotion_detector.py
+86
-1
FirstApp/logic/activity_recognition.py
FirstApp/logic/activity_recognition.py
+106
-46
FirstApp/logic/head_gaze_estimation.py
FirstApp/logic/head_gaze_estimation.py
+85
-0
FirstApp/logic/video_extraction.py
FirstApp/logic/video_extraction.py
+102
-1
No files found.
FirstApp/api.py
View file @
765ece4e
This diff is collapsed.
Click to expand it.
FirstApp/emotion_detector.py
View file @
765ece4e
...
@@ -5,11 +5,17 @@ from keras.preprocessing import image
...
@@ -5,11 +5,17 @@ from keras.preprocessing import image
import
cv2
import
cv2
import
os
import
os
import
numpy
as
np
import
numpy
as
np
from
.MongoModels
import
*
from
.
models
import
VideoMeta
from
.
models
import
VideoMeta
from
.
logic
import
custom_sorter
as
cs
from
.
logic
import
custom_sorter
as
cs
from
.logic
import
id_generator
as
ig
# emotion recognition method
# emotion recognition method
from
.serializers
import
LectureEmotionSerializer
def
emotion_recognition
(
classifier
,
face_classifier
,
image
):
def
emotion_recognition
(
classifier
,
face_classifier
,
image
):
label
=
""
label
=
""
class_labels
=
[
'Angry'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
]
class_labels
=
[
'Angry'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
]
...
@@ -548,3 +554,82 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
...
@@ -548,3 +554,82 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# return the dictionary
# return the dictionary
return
frame_group_dict
,
emotion_labels
return
frame_group_dict
,
emotion_labels
# this section will handle some database operations
def
save_frame_recognitions
(
video_name
):
# retrieve the lecture emotion id
lec_emotion
=
LectureEmotionReport
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_emotion_ser
=
LectureEmotionSerializer
(
lec_emotion
,
many
=
True
)
lec_emotion_data
=
lec_emotion_ser
.
data
[
0
]
lec_emotion_id
=
lec_emotion_data
[
'id'
]
# create a new lecture activity frame detections id
last_lec_emotion_frame_recognitions
=
LectureEmotionFrameRecognitions
.
objects
.
order_by
(
'lecture_emotion_frame_recognition_id'
)
.
last
()
new_lecture_emotion_frame_recognitions_id
=
"LEFR00001"
if
(
last_lec_emotion_frame_recognitions
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_emotion_frame_recognitions
.
lecture_emotion_frame_recognition_id
)
# calculate the frame detections
frame_detections
=
get_frame_emotion_recognition
(
video_name
)
frame_recognition_details
=
[]
# save the new lecture activity frame recognitions
for
detection
in
frame_detections
:
lec_emotion_frame_recognition_details
=
LectureEmotionFrameRecognitionDetails
()
lec_emotion_frame_recognition_details
.
frame_name
=
detection
[
'frame_name'
]
lec_emotion_frame_recognition_details
.
happy_perct
=
detection
[
'happy_perct'
]
lec_emotion_frame_recognition_details
.
sad_perct
=
detection
[
'sad_perct'
]
lec_emotion_frame_recognition_details
.
angry_perct
=
detection
[
'angry_perct'
]
lec_emotion_frame_recognition_details
.
surprise_perct
=
detection
[
'surprise_perct'
]
lec_emotion_frame_recognition_details
.
neutral_perct
=
detection
[
'neutral_perct'
]
frame_recognition_details
.
append
(
lec_emotion_frame_recognition_details
)
lec_emotion_frame_recognitions
=
LectureEmotionFrameRecognitions
()
lec_emotion_frame_recognitions
.
lecture_emotion_frame_recognition_id
=
new_lecture_emotion_frame_recognitions_id
lec_emotion_frame_recognitions
.
lecture_emotion_id_id
=
lec_emotion_id
lec_emotion_frame_recognitions
.
frame_recognition_details
=
frame_recognition_details
lec_emotion_frame_recognitions
.
save
()
# now return the frame recognitions
return
frame_detections
# this method will save the emotion frame groupings to the database
def
save_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
frame_group_percentages
,
emotion_labels
=
emotion_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
)
# save the frame group details into db
last_lec_emotion_frame_grouping
=
LectureEmotionFrameGroupings
.
objects
.
order_by
(
'lecture_emotion_frame_groupings_id'
)
.
last
()
new_lecture_emotion_frame_grouping_id
=
"LEFG00001"
if
(
last_lec_emotion_frame_grouping
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_emotion_frame_grouping
.
lecture_emotion_frame_groupings_id
)
# retrieve the lecture emotion id
lec_emotion
=
LectureEmotionReport
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_emotion_ser
=
LectureEmotionSerializer
(
lec_emotion
,
many
=
True
)
lec_emotion_id
=
lec_emotion_ser
.
data
[
0
][
'id'
]
# create the frame group details
frame_group_details
=
[]
for
key
in
frame_group_percentages
.
keys
():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_emotion_frame_group_details
=
LectureEmotionFrameGroupDetails
()
lec_emotion_frame_group_details
.
frame_group
=
key
lec_emotion_frame_group_details
.
frame_group_percentages
=
frame_group_percentages
[
key
]
frame_group_details
.
append
(
lec_emotion_frame_group_details
)
new_lec_emotion_frame_groupings
=
LectureEmotionFrameGroupings
()
new_lec_emotion_frame_groupings
.
lecture_emotion_frame_groupings_id
=
new_lecture_emotion_frame_grouping_id
new_lec_emotion_frame_groupings
.
lecture_emotion_id_id
=
lec_emotion_id
new_lec_emotion_frame_groupings
.
frame_group_details
=
frame_group_details
# save
new_lec_emotion_frame_groupings
.
save
()
FirstApp/logic/activity_recognition.py
View file @
765ece4e
This diff is collapsed.
Click to expand it.
FirstApp/logic/head_gaze_estimation.py
View file @
765ece4e
...
@@ -16,6 +16,10 @@ import os
...
@@ -16,6 +16,10 @@ import os
import
shutil
import
shutil
import
math
import
math
from
..MongoModels
import
*
from
..serializers
import
*
from
.
import
id_generator
as
ig
def
get_2d_points
(
img
,
rotation_vector
,
translation_vector
,
camera_matrix
,
val
):
def
get_2d_points
(
img
,
rotation_vector
,
translation_vector
,
camera_matrix
,
val
):
"""Return the 3D points present as 2D for making annotation box"""
"""Return the 3D points present as 2D for making annotation box"""
...
@@ -846,3 +850,84 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
...
@@ -846,3 +850,84 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# return the dictionary
# return the dictionary
return
frame_group_dict
,
labels
return
frame_group_dict
,
labels
# this section will handle some database operations
def
save_frame_detections
(
video_name
):
# retrieve the lecture emotion id
lec_gaze
=
LectureGazeEstimation
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_gaze_ser
=
LectureGazeEstimationSerializer
(
lec_gaze
,
many
=
True
)
lec_gaze_data
=
lec_gaze_ser
.
data
[
0
]
lec_gaze_id
=
lec_gaze_data
[
'id'
]
# create a new lecture activity frame detections id
last_lec_gaze_frame_recognitions
=
LectureGazeFrameRecognitions
.
objects
.
order_by
(
'lecture_gaze_frame_recognition_id'
)
.
last
()
new_lecture_gaze_frame_recognitions_id
=
"LGFR00001"
if
(
last_lec_gaze_frame_recognitions
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_gaze_frame_recognitions
.
lecture_gaze_frame_recognition_id
)
# calculate the frame detections
frame_detections
,
frame_rate
=
get_lecture_gaze_esrimation_for_frames
(
video_name
)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details
=
[]
# save the new lecture activity frame recognitions
for
detection
in
frame_detections
:
lec_gaze_frame_recognition_details
=
LectureGazeFrameRecognitionDetails
()
lec_gaze_frame_recognition_details
.
frame_name
=
detection
[
'frame_name'
]
lec_gaze_frame_recognition_details
.
upright_perct
=
detection
[
'upright_perct'
]
lec_gaze_frame_recognition_details
.
upleft_perct
=
detection
[
'upleft_perct'
]
lec_gaze_frame_recognition_details
.
downright_perct
=
detection
[
'downright_perct'
]
lec_gaze_frame_recognition_details
.
downleft_perct
=
detection
[
'downleft_perct'
]
lec_gaze_frame_recognition_details
.
front_perct
=
detection
[
'front_perct'
]
frame_recognition_details
.
append
(
lec_gaze_frame_recognition_details
)
lec_gaze_frame_recognitions
=
LectureGazeFrameRecognitions
()
lec_gaze_frame_recognitions
.
lecture_gaze_frame_recognition_id
=
new_lecture_gaze_frame_recognitions_id
lec_gaze_frame_recognitions
.
lecture_gaze_id_id
=
lec_gaze_id
lec_gaze_frame_recognitions
.
frame_recognition_details
=
frame_recognition_details
lec_gaze_frame_recognitions
.
save
()
# now return the frame recognitions
return
frame_detections
# this method will save gaze frame groupings to the database
def
save_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
frame_group_percentages
,
gaze_labels
=
gaze_estimation_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
)
# save the frame group details into db
last_lec_gaze_frame_grouping
=
LectureGazeFrameGroupings
.
objects
.
order_by
(
'lecture_gaze_frame_groupings_id'
)
.
last
()
new_lecture_gaze_frame_grouping_id
=
"LGFG00001"
if
(
last_lec_gaze_frame_grouping
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_gaze_frame_grouping
.
lecture_gaze_frame_groupings_id
)
# retrieve the lecture activity id
lec_gaze
=
LectureGazeEstimation
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_gaze_ser
=
LectureGazeEstimationSerializer
(
lec_gaze
,
many
=
True
)
lec_gaze_id
=
lec_gaze_ser
.
data
[
0
][
'id'
]
# create the frame group details
frame_group_details
=
[]
for
key
in
frame_group_percentages
.
keys
():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_gaze_frame_group_details
=
LectureGazeFrameGroupDetails
()
lec_gaze_frame_group_details
.
frame_group
=
key
lec_gaze_frame_group_details
.
frame_group_percentages
=
frame_group_percentages
[
key
]
frame_group_details
.
append
(
lec_gaze_frame_group_details
)
new_lec_gaze_frame_groupings
=
LectureGazeFrameGroupings
()
new_lec_gaze_frame_groupings
.
lecture_gaze_frame_groupings_id
=
new_lecture_gaze_frame_grouping_id
new_lec_gaze_frame_groupings
.
lecture_gaze_id_id
=
lec_gaze_id
new_lec_gaze_frame_groupings
.
frame_group_details
=
frame_group_details
# save
new_lec_gaze_frame_groupings
.
save
()
FirstApp/logic/video_extraction.py
View file @
765ece4e
...
@@ -3,6 +3,11 @@ import cv2
...
@@ -3,6 +3,11 @@ import cv2
import
shutil
import
shutil
import
datetime
import
datetime
from
FirstApp.MongoModels
import
*
from
FirstApp.serializers
import
*
from
.
import
id_generator
as
ig
def
VideoExtractor
(
request
):
def
VideoExtractor
(
request
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
...
@@ -193,3 +198,99 @@ def getFrameLandmarks(video_name, category):
...
@@ -193,3 +198,99 @@ def getFrameLandmarks(video_name, category):
'front_count'
:
0
,
'detection_count'
:
0
}
'front_count'
:
0
,
'detection_count'
:
0
}
return
frame_landmarks
,
frame_group_dict
return
frame_landmarks
,
frame_group_dict
# this section will handle some database operations
def
save_time_landmarks
(
video_name
):
last_lec_video_time_landmarks
=
LectureVideoTimeLandmarks
.
objects
.
order_by
(
'lecture_video_time_landmarks_id'
)
.
last
()
new_lecture_video_time_landmarks_id
=
"LVTL00001"
if
(
last_lec_video_time_landmarks
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_video_time_landmarks
.
lecture_video_time_landmarks_id
)
# retrieve lecture video details
lec_video
=
LectureVideo
.
objects
.
filter
(
video_name
=
video_name
)
lec_video_ser
=
LectureVideoSerializer
(
lec_video
,
many
=
True
)
lec_video_id
=
lec_video_ser
.
data
[
0
][
'id'
]
# save the landmark details in the db
time_landmarks
=
getTimeLandmarks
(
video_name
)
db_time_landmarks
=
[]
# loop through the time landmarks
for
landmark
in
time_landmarks
:
landmark_obj
=
Landmarks
()
landmark_obj
.
landmark
=
landmark
db_time_landmarks
.
append
(
landmark_obj
)
new_lec_video_time_landmarks
=
LectureVideoTimeLandmarks
()
new_lec_video_time_landmarks
.
lecture_video_time_landmarks_id
=
new_lecture_video_time_landmarks_id
new_lec_video_time_landmarks
.
lecture_video_id_id
=
lec_video_id
new_lec_video_time_landmarks
.
time_landmarks
=
db_time_landmarks
new_lec_video_time_landmarks
.
save
()
# this method will save frame landmarks to the database
def
save_frame_landmarks
(
video_name
):
# retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks
=
LectureVideoFrameLandmarks
.
objects
.
order_by
(
'lecture_video_frame_landmarks_id'
)
.
last
()
new_lecture_video_frame_landmarks_id
=
"LVFL00001"
if
(
last_lec_video_frame_landmarks
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_video_frame_landmarks
.
lecture_video_frame_landmarks_id
)
frame_landmarks
,
frame_group_dict
=
getFrameLandmarks
(
video_name
,
"Activity"
)
# retrieve lecture video details
lec_video
=
LectureVideo
.
objects
.
filter
(
video_name
=
video_name
)
lec_video_ser
=
LectureVideoSerializer
(
lec_video
,
many
=
True
)
lec_video_id
=
lec_video_ser
.
data
[
0
][
'id'
]
# save the frame landmarks details into db
db_frame_landmarks
=
[]
for
landmark
in
frame_landmarks
:
landmark_obj
=
Landmarks
()
landmark_obj
.
landmark
=
landmark
db_frame_landmarks
.
append
(
landmark_obj
)
new_lec_video_frame_landmarks
=
LectureVideoFrameLandmarks
()
new_lec_video_frame_landmarks
.
lecture_video_frame_landmarks_id
=
new_lecture_video_frame_landmarks_id
new_lec_video_frame_landmarks
.
lecture_video_id_id
=
lec_video_id
new_lec_video_frame_landmarks
.
frame_landmarks
=
db_frame_landmarks
new_lec_video_frame_landmarks
.
save
()
# now return the frame landmarks and the frame group dictionary
return
frame_landmarks
,
frame_group_dict
# this method will retrieve the frame landmarks from the database
def
get_frame_landmarks
(
video_name
):
frame_landmarks
=
[]
# retrieve frame landmarks from db
lec_video_frame_landmarks
=
LectureVideoFrameLandmarks
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_video_frame_landmarks_ser
=
LectureVideoFrameLandmarksSerializer
(
lec_video_frame_landmarks
,
many
=
True
)
lec_video_frame_landmarks_data
=
lec_video_frame_landmarks_ser
.
data
[
0
]
retrieved_frame_landmarks
=
lec_video_frame_landmarks_data
[
"frame_landmarks"
]
# creating a new list to display in the frontend
for
landmark
in
retrieved_frame_landmarks
:
frame_landmarks
.
append
(
landmark
[
'landmark'
])
# now return the frame landmarks
return
frame_landmarks
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment