Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2020-101
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Sachith Fernando
2020-101
Commits
d8f6824a
Commit
d8f6824a
authored
Nov 02, 2020
by
LiniEisha
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'QA_RELEASE' into IT17100908
parents
51d22c43
a2d180df
Changes
30
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
3210 additions
and
2330 deletions
+3210
-2330
FirstApp/MongoModels.py
FirstApp/MongoModels.py
+16
-1
FirstApp/admin.py
FirstApp/admin.py
+3
-1
FirstApp/api.py
FirstApp/api.py
+190
-12
FirstApp/emotion_detector.py
FirstApp/emotion_detector.py
+212
-83
FirstApp/forms.py
FirstApp/forms.py
+48
-1
FirstApp/logic/activity_recognition.py
FirstApp/logic/activity_recognition.py
+164
-65
FirstApp/logic/head_gaze_estimation.py
FirstApp/logic/head_gaze_estimation.py
+140
-31
FirstApp/logic/utilities.py
FirstApp/logic/utilities.py
+15
-0
FirstApp/logic/video_extraction.py
FirstApp/logic/video_extraction.py
+16
-3
FirstApp/migrations/0015_auto_20201020_2157.py
FirstApp/migrations/0015_auto_20201020_2157.py
+34
-0
FirstApp/templates/FirstApp/401.html
FirstApp/templates/FirstApp/401.html
+95
-0
FirstApp/templates/FirstApp/Home.html
FirstApp/templates/FirstApp/Home.html
+422
-35
FirstApp/templates/FirstApp/activity.html
FirstApp/templates/FirstApp/activity.html
+318
-732
FirstApp/templates/FirstApp/admin_login.html
FirstApp/templates/FirstApp/admin_login.html
+100
-0
FirstApp/templates/FirstApp/emotion.html
FirstApp/templates/FirstApp/emotion.html
+382
-745
FirstApp/templates/FirstApp/gaze.html
FirstApp/templates/FirstApp/gaze.html
+293
-442
FirstApp/templates/FirstApp/template.html
FirstApp/templates/FirstApp/template.html
+37
-1
FirstApp/templates/FirstApp/user_direct.html
FirstApp/templates/FirstApp/user_direct.html
+101
-0
FirstApp/urls.py
FirstApp/urls.py
+33
-2
FirstApp/views.py
FirstApp/views.py
+174
-80
MonitorLecturerApp/api.py
MonitorLecturerApp/api.py
+41
-0
MonitorLecturerApp/logic/classroom_activity.py
MonitorLecturerApp/logic/classroom_activity.py
+166
-0
MonitorLecturerApp/migrations/0005_lectureractivityframerecognitions.py
...rApp/migrations/0005_lectureractivityframerecognitions.py
+25
-0
MonitorLecturerApp/migrations/0006_lectureractivityframerecognitions_fps.py
.../migrations/0006_lectureractivityframerecognitions_fps.py
+18
-0
MonitorLecturerApp/models.py
MonitorLecturerApp/models.py
+24
-0
MonitorLecturerApp/serializers.py
MonitorLecturerApp/serializers.py
+34
-2
MonitorLecturerApp/templates/MonitorLecturerApp/index.html
MonitorLecturerApp/templates/MonitorLecturerApp/index.html
+32
-42
MonitorLecturerApp/templates/MonitorLecturerApp/lecVideo.html
...torLecturerApp/templates/MonitorLecturerApp/lecVideo.html
+3
-1
MonitorLecturerApp/urls.py
MonitorLecturerApp/urls.py
+6
-0
MonitorLecturerApp/views.py
MonitorLecturerApp/views.py
+68
-51
No files found.
FirstApp/MongoModels.py
View file @
d8f6824a
...
...
@@ -40,6 +40,15 @@ class Lecturer(models.Model):
return
self
.
lecturer_id
# admin model
class
Admin
(
models
.
Model
):
admin_id
=
models
.
CharField
(
max_length
=
10
)
name
=
models
.
CharField
(
max_length
=
20
)
email
=
models
.
EmailField
()
def
__str__
(
self
):
return
self
.
admin_id
# Lecturer_subject model
class
LecturerSubject
(
models
.
Model
):
lec_subject_id
=
models
.
CharField
(
max_length
=
10
)
...
...
@@ -56,6 +65,12 @@ class LecturerCredentials(models.Model):
password
=
models
.
CharField
(
max_length
=
15
)
# admin credential details
class
AdminCredentialDetails
(
models
.
Model
):
username
=
models
.
ForeignKey
(
Admin
,
on_delete
=
models
.
CASCADE
)
password
=
models
.
CharField
(
max_length
=
15
)
# timetable based on daily basis
class
DailyTimeTable
(
models
.
Model
):
slot_id
=
models
.
AutoField
(
auto_created
=
True
,
primary_key
=
True
)
...
...
@@ -285,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model):
# POSE section
# lecture
pos
e estimation
# lecture
gaz
e estimation
class
LectureGazeEstimation
(
models
.
Model
):
lecture_gaze_id
=
models
.
CharField
(
max_length
=
10
)
lecture_video_id
=
models
.
ForeignKey
(
LectureVideo
,
on_delete
=
models
.
CASCADE
)
...
...
FirstApp/admin.py
View file @
d8f6824a
...
...
@@ -12,4 +12,6 @@ admin.site.register(LecturerCredentials)
admin
.
site
.
register
(
FacultyTimetable
)
admin
.
site
.
register
(
LectureVideo
)
admin
.
site
.
register
(
LectureActivity
)
admin
.
site
.
register
(
LectureGazeEstimation
)
\ No newline at end of file
admin
.
site
.
register
(
LectureGazeEstimation
)
admin
.
site
.
register
(
Admin
)
admin
.
site
.
register
(
AdminCredentialDetails
)
\ No newline at end of file
FirstApp/api.py
View file @
d8f6824a
from
rest_framework.permissions
import
IsAuthenticated
,
IsAdminUser
from
rest_framework.authentication
import
SessionAuthentication
,
BasicAuthentication
from
MonitorLecturerApp.models
import
LectureRecordedVideo
,
LecturerVideoMetaData
from
MonitorLecturerApp.serializers
import
LectureRecordedVideoSerializer
,
LecturerVideoMetaDataSerializer
from
.MongoModels
import
*
from
rest_framework.views
import
*
from
.ImageOperations
import
saveImage
...
...
@@ -300,7 +302,6 @@ class LectureActivityProcess(APIView):
LectureActivity
(
lecture_activity_id
=
new_lecture_activity_id
,
lecture_video_id_id
=
lec_video_id
,
talking_perct
=
percentages
[
'talking_perct'
],
phone_perct
=
percentages
[
'phone_perct'
],
listening_perct
=
percentages
[
'listening_perct'
],
writing_perct
=
percentages
[
'writing_perct'
]
...
...
@@ -473,16 +474,18 @@ class LectureEmotionProcess(APIView):
pass
def
save_emotion_report
(
self
,
lec_video_id
,
percentages
):
lec_video
=
LectureVideo
.
objects
.
get
(
lecture_video_id
=
lec_video_id
)
lec_video
=
LectureVideo
.
objects
.
filter
(
lecture_video_id
=
lec_video_id
)
lec_video_serializer
=
LectureVideoSerializer
(
lec_video
,
many
=
True
)
lec_video_data
=
lec_video_serializer
.
data
[
0
]
last_lec_emotion
=
LectureEmotionReport
.
objects
.
order_by
(
'lecture_emotion_id'
)
.
last
()
new_lecture_emotion_id
=
ig
.
generate_new_id
(
last_lec_emotion
.
lecture_emotion_id
)
lecture_video_id
=
lec_video_data
[
'id'
]
# creating a new lecture emotion report
LectureEmotionReport
(
lecture_emotion_id
=
new_lecture_emotion_id
,
lecture_video_id
=
lec_video
,
lecture_video_id
_id
=
lecture_video_id
,
happy_perct
=
percentages
.
happy_perct
,
sad_perct
=
percentages
.
sad_perct
,
angry_perct
=
percentages
.
angry_perct
,
...
...
@@ -511,8 +514,6 @@ class GetLectureEmotionReportViewSet(APIView):
def
get
(
self
,
request
):
lecture_video_id
=
request
.
query_params
.
get
(
'lecture_video_id'
)
lecture_video_name
=
request
.
query_params
.
get
(
'lecture_video_name'
)
# retrieve the extracted frames
extracted
=
ar
.
getExtractedFrames
(
lecture_video_name
)
lecture_emotions
=
LectureEmotionReport
.
objects
.
filter
(
lecture_video_id__lecture_video_id
=
lecture_video_id
)
serializer
=
LectureEmotionSerializer
(
lecture_emotions
,
many
=
True
)
...
...
@@ -521,7 +522,6 @@ class GetLectureEmotionReportViewSet(APIView):
return
Response
({
"response"
:
serializer
.
data
,
"extracted"
:
extracted
})
...
...
@@ -685,17 +685,23 @@ class ProcessLectureGazeEstimation(APIView):
pass
def
estimate_gaze
(
self
,
lec_video_id
,
percentages
):
lec_video
=
LectureVideo
.
objects
.
get
(
lecture_video_id
=
lec_video_id
)
lec_video
=
LectureVideo
.
objects
.
filter
(
lecture_video_id
=
lec_video_id
)
last_lec_gaze
=
LectureGazeEstimation
.
objects
.
order_by
(
'lecture_gaze_id'
)
.
last
()
lec_video_serializer
=
LectureVideoSerializer
(
lec_video
,
many
=
True
)
lec_video_data
=
lec_video_serializer
.
data
[
0
]
new_lecture_gaze_id
=
"LG000001"
if
(
last_lec_gaze
is
None
)
else
ig
.
generate_new_id
(
last_lec_gaze
.
lecture_gaze_id
)
new_lecture_gaze_primary_id
=
1
if
(
last_lec_gaze
is
None
)
else
int
(
last_lec_gaze
.
id
)
+
1
# get the video id
lecture_video_id
=
lec_video_data
[
'id'
]
# creating a new lecture gaze estimation
LectureGazeEstimation
(
id
=
new_lecture_gaze_primary_id
,
lecture_gaze_id
=
new_lecture_gaze_id
,
lecture_video_id
=
lec_video
,
lecture_video_id
_id
=
lecture_video_id
,
looking_up_and_right_perct
=
percentages
[
'head_up_right_perct'
],
looking_up_and_left_perct
=
percentages
[
'head_up_left_perct'
],
looking_down_and_right_perct
=
percentages
[
'head_down_right_perct'
],
...
...
@@ -722,8 +728,6 @@ class GetLectureGazeEstimationViewSet(APIView):
def
get
(
self
,
request
):
lecture_video_id
=
request
.
query_params
.
get
(
'lecture_video_id'
)
lecture_video_name
=
request
.
query_params
.
get
(
'lecture_video_name'
)
# retrieve the extracted frames
extracted
=
hge
.
getExtractedFrames
(
lecture_video_name
)
lecture_gaze_estimations
=
LectureGazeEstimation
.
objects
.
filter
(
lecture_video_id__lecture_video_id
=
lecture_video_id
)
...
...
@@ -731,7 +735,6 @@ class GetLectureGazeEstimationViewSet(APIView):
return
Response
({
"response"
:
serializer
.
data
,
"extracted"
:
extracted
})
...
...
@@ -1269,4 +1272,179 @@ class GetLectureGazeSummary(APIView):
"frame_landmarks"
:
frame_landmarks
,
"frame_group_percentages"
:
frame_group_percentages
,
"gaze_labels"
:
gaze_labels
})
\ No newline at end of file
})
# =====OTHERS=====
class
GetLecturerRecordedVideo
(
APIView
):
def
get
(
self
,
request
):
lecturer
=
request
.
query_params
.
get
(
'lecturer'
)
subject
=
request
.
query_params
.
get
(
'subject'
)
date
=
request
.
query_params
.
get
(
'date'
)
# retrieve data
lec_recorded_video
=
LectureRecordedVideo
.
objects
.
filter
(
lecturer_id
=
lecturer
,
subject__subject_code
=
subject
,
lecturer_date
=
date
)
lec_recorded_video_ser
=
LectureRecordedVideoSerializer
(
lec_recorded_video
,
many
=
True
)
lec_recorded_video_data
=
lec_recorded_video_ser
.
data
[
0
]
video_name
=
lec_recorded_video_data
[
'lecture_video_name'
]
print
(
'lecturer recorded video name: '
,
video_name
)
return
Response
({
"video_name"
:
video_name
})
# this API will get lecture activity correlations
class
GetLectureActivityCorrelations
(
APIView
):
def
get
(
self
,
request
):
option
=
request
.
query_params
.
get
(
'option'
)
lecturer
=
request
.
query_params
.
get
(
'lecturer'
)
int_option
=
int
(
option
)
current_date
=
datetime
.
datetime
.
now
()
.
date
()
option_date
=
datetime
.
timedelta
(
days
=
int_option
)
previous_date
=
current_date
-
option_date
individual_lec_activities
=
[]
activity_correlations
=
[]
# retrieving lecture activities
lec_activity
=
LectureActivity
.
objects
.
filter
(
lecture_video_id__date__gte
=
previous_date
,
lecture_video_id__date__lte
=
current_date
,
lecture_video_id__lecturer
=
lecturer
)
if
len
(
lec_activity
)
>
0
:
isRecordFound
=
True
activity_serializer
=
LectureActivitySerializer
(
lec_activity
,
many
=
True
)
activity_data
=
activity_serializer
.
data
_
,
individual_lec_activities
,
_
=
ar
.
get_student_activity_summary_for_period
(
activity_data
)
# retrieving lecturer recorded activities
lec_recorded_activity
=
LecturerVideoMetaData
.
objects
.
filter
(
lecturer_video_id__lecturer_date__gte
=
previous_date
,
lecturer_video_id__lecturer_date__lte
=
current_date
,
lecturer_video_id__lecturer
=
lecturer
)
if
len
(
lec_recorded_activity
)
>
0
:
lec_recorded_activity_ser
=
LecturerVideoMetaDataSerializer
(
lec_recorded_activity
,
many
=
True
)
lec_recorded_activity_data
=
lec_recorded_activity_ser
.
data
activity_correlations
=
ar
.
get_activity_correlations
(
individual_lec_activities
,
lec_recorded_activity_data
)
print
(
'activity correlations: '
,
activity_correlations
)
return
Response
({
"correlations"
:
activity_correlations
})
# this API will get lecture emotion correlations
class
GetLectureEmotionCorrelations
(
APIView
):
def
get
(
self
,
request
):
option
=
request
.
query_params
.
get
(
'option'
)
lecturer
=
request
.
query_params
.
get
(
'lecturer'
)
int_option
=
int
(
option
)
current_date
=
datetime
.
datetime
.
now
()
.
date
()
option_date
=
datetime
.
timedelta
(
days
=
int_option
)
previous_date
=
current_date
-
option_date
individual_lec_emotions
=
[]
emotion_correlations
=
[]
# retrieving lecture activities
lec_emotion
=
LectureEmotionReport
.
objects
.
filter
(
lecture_video_id__date__gte
=
previous_date
,
lecture_video_id__date__lte
=
current_date
,
lecture_video_id__lecturer
=
lecturer
)
# if there are lecture emotions
if
len
(
lec_emotion
)
>
0
:
emotion_serializer
=
LectureEmotionSerializer
(
lec_emotion
,
many
=
True
)
emotion_data
=
emotion_serializer
.
data
_
,
individual_lec_emotions
,
_
=
ed
.
get_student_emotion_summary_for_period
(
emotion_data
)
# retrieving lecturer recorded activities
lec_recorded_activity
=
LecturerVideoMetaData
.
objects
.
filter
(
lecturer_video_id__lecturer_date__gte
=
previous_date
,
lecturer_video_id__lecturer_date__lte
=
current_date
,
lecturer_video_id__lecturer
=
lecturer
)
# if there are any recorded lectures
if
len
(
lec_recorded_activity
)
>
0
:
lec_recorded_activity_ser
=
LecturerVideoMetaDataSerializer
(
lec_recorded_activity
,
many
=
True
)
lec_recorded_activity_data
=
lec_recorded_activity_ser
.
data
emotion_correlations
=
ed
.
get_emotion_correlations
(
individual_lec_emotions
,
lec_recorded_activity_data
)
return
Response
({
"correlations"
:
emotion_correlations
})
# this API will get lecture gaze correlations
class
GetLectureGazeCorrelations
(
APIView
):
def
get
(
self
,
request
):
option
=
request
.
query_params
.
get
(
'option'
)
lecturer
=
request
.
query_params
.
get
(
'lecturer'
)
int_option
=
int
(
option
)
current_date
=
datetime
.
datetime
.
now
()
.
date
()
option_date
=
datetime
.
timedelta
(
days
=
int_option
)
previous_date
=
current_date
-
option_date
individual_lec_gaze
=
[]
gaze_correlations
=
[]
# retrieving lecture activities
lec_gaze
=
LectureGazeEstimation
.
objects
.
filter
(
lecture_video_id__date__gte
=
previous_date
,
lecture_video_id__date__lte
=
current_date
,
lecture_video_id__lecturer
=
lecturer
)
# if there are gaze estimations
if
len
(
lec_gaze
)
>
0
:
gaze_serializer
=
LectureGazeEstimationSerializer
(
lec_gaze
,
many
=
True
)
gaze_data
=
gaze_serializer
.
data
_
,
individual_lec_gaze
,
_
=
hge
.
get_student_gaze_estimation_summary_for_period
(
gaze_data
)
# retrieving lecturer recorded activities
lec_recorded_activity
=
LecturerVideoMetaData
.
objects
.
filter
(
lecturer_video_id__lecturer_date__gte
=
previous_date
,
lecturer_video_id__lecturer_date__lte
=
current_date
,
lecturer_video_id__lecturer
=
lecturer
)
# if there are any recorded lectures
if
len
(
lec_recorded_activity
)
>
0
:
lec_recorded_activity_ser
=
LecturerVideoMetaDataSerializer
(
lec_recorded_activity
,
many
=
True
)
lec_recorded_activity_data
=
lec_recorded_activity_ser
.
data
# find the correlations between lecture gaze estimations and recorded lecture
gaze_correlations
=
hge
.
get_gaze_correlations
(
individual_lec_gaze
,
lec_recorded_activity_data
)
return
Response
({
"correlations"
:
gaze_correlations
})
FirstApp/emotion_detector.py
View file @
d8f6824a
...
...
@@ -10,12 +10,13 @@ from .MongoModels import *
from
.
models
import
VideoMeta
from
.
logic
import
custom_sorter
as
cs
from
.logic
import
id_generator
as
ig
# emotion recognition method
from
.logic
import
activity_recognition
as
ar
from
.logic
import
utilities
as
ut
from
.serializers
import
LectureEmotionSerializer
import
pandas
as
pd
# emotion recognition method
def
emotion_recognition
(
classifier
,
face_classifier
,
image
):
label
=
""
class_labels
=
[
'Angry'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
]
...
...
@@ -47,7 +48,6 @@ def detect_emotion(video):
face_classifier
=
cv2
.
CascadeClassifier
(
os
.
path
.
join
(
BASE_DIR
,
'FirstApp
\
classifiers
\
haarcascade_frontalface_default.xml'
))
classifier_path
=
os
.
path
.
join
(
BASE_DIR
,
'FirstApp
\
classifiers
\
Emotion_little_vgg.h5'
)
classifier
=
load_model
(
classifier_path
)
path
=
''
meta_data
=
VideoMeta
()
class_labels
=
[
'Angry'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
]
...
...
@@ -65,6 +65,9 @@ def detect_emotion(video):
count_neutral
=
0
count_surprise
=
0
# for testing purposes
print
(
'starting the emotion recognition process'
)
while
(
count_frames
<
frame_count
):
# Grab a single frame of video
ret
,
frame
=
cap
.
read
()
...
...
@@ -72,52 +75,34 @@ def detect_emotion(video):
gray
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
faces
=
face_classifier
.
detectMultiScale
(
gray
,
1.3
,
5
)
label
=
emotion_recognition
(
classifier
,
face_classifier
,
frame
)
for
(
x
,
y
,
w
,
h
)
in
faces
:
cv2
.
rectangle
(
frame
,
(
x
,
y
),
(
x
+
w
,
y
+
h
),
(
255
,
0
,
0
),
2
)
roi_gray
=
gray
[
y
:
y
+
h
,
x
:
x
+
w
]
roi_gray
=
cv2
.
resize
(
roi_gray
,
(
48
,
48
),
interpolation
=
cv2
.
INTER_AREA
)
# rect,face,image = face_detector(frame)
# counting the number of frames for each label, to calculate the percentage for each emotion later on...
if
np
.
sum
([
roi_gray
])
!=
0
:
roi
=
roi_gray
.
astype
(
'float'
)
/
255.0
roi
=
img_to_array
(
roi
)
roi
=
np
.
expand_dims
(
roi
,
axis
=
0
)
if
(
label
==
'Anger'
)
:
count_angry
+=
1
# path = os.path.join(BASE_DIR, 'static\\images\\Anger'
)
# cv2.imwrite(os.path.join(path, 'Anger-{0}.jpg'.format(count)), frame
)
# make a prediction on the ROI, then lookup the class
elif
(
label
==
'Happy'
):
count_happy
+=
1
# path = os.path.join(BASE_DIR, 'static\\images\\Happy')
# cv2.imwrite(os.path.join(path, 'Happy-{0}.jpg'.format(count)), frame)
preds
=
classifier
.
predict
(
roi
)[
0
]
label
=
class_labels
[
preds
.
argmax
()]
elif
(
label
==
'Neutral'
):
count_neutral
+=
1
# path = os.path.join(BASE_DIR, 'static\\images\\Neutral')
# cv2.imwrite(os.path.join(path, 'Neutral-{0}.jpg'.format(count)), frame)
# counting the number of frames for each label, to calculate the percentage for each emotion later on...
elif
(
label
==
'Sad'
):
count_sad
+=
1
if
(
label
==
'Anger'
):
count_angry
+=
1
# path = os.path.join(BASE_DIR, 'static\\images\\Anger')
# cv2.imwrite(os.path.join(path, 'Anger-{0}.jpg'.format(count)), frame)
elif
(
label
==
'Surprise'
):
count_surprise
+=
1
elif
(
label
==
'Happy'
):
count_happy
+=
1
# path = os.path.join(BASE_DIR, 'static\\images\\Happy')
# cv2.imwrite(os.path.join(path, 'Happy-{0}.jpg'.format(count)), frame)
elif
(
label
==
'Neutral'
):
count_neutral
+=
1
# path = os.path.join(BASE_DIR, 'static\\images\\Neutral')
# cv2.imwrite(os.path.join(path, 'Neutral-{0}.jpg'.format(count)), frame)
elif
(
label
==
'Sad'
):
count_sad
+=
1
elif
(
label
==
'Surprise'
):
count_surprise
+=
1
label_position
=
(
x
,
y
)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# cv2.imwrite("".format(label, count), frame)
else
:
cv2
.
putText
(
frame
,
'No Face Found'
,
(
20
,
60
),
cv2
.
FONT_HERSHEY_SIMPLEX
,
2
,
(
0
,
255
,
0
),
3
)
# for testing purposes
print
(
'emotion frame count: '
,
count_frames
)
count_frames
+=
1
...
...
@@ -132,6 +117,9 @@ def detect_emotion(video):
cap
.
release
()
cv2
.
destroyAllWindows
()
# for testing purposes
print
(
'ending the emotion recognition process'
)
return
meta_data
...
...
@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will
def
get_frame_emotion_recognition
(
video_name
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_name
))
face_classifier
=
cv2
.
CascadeClassifier
(
os
.
path
.
join
(
BASE_DIR
,
'FirstApp
\
classifiers
\
haarcascade_frontalface_default.xml'
))
classifier_path
=
os
.
path
.
join
(
BASE_DIR
,
'FirstApp
\
classifiers
\
Emotion_little_vgg.h5'
)
classifier
=
load_model
(
classifier_path
)
EXTRACTED_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
activity
\\
{}"
.
format
(
video_name
))
# files required for person detection
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
model_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.caffemodel"
)
# load our serialized persosn detection model from disk
print
(
"[INFO] loading model..."
)
net
=
cv2
.
dnn
.
readNetFromCaffe
(
config_file
,
model_file
)
cap
=
cv2
.
VideoCapture
(
VIDEO_DIR
)
no_of_frames
=
int
(
cap
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
))
# initializing the count variables
frame_count
=
0
...
...
@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name):
# frame activity recognitions
frame_emotion_recognitions
=
[]
# # class labels
class_labels
=
[
'Angry'
,
'Happy'
,
'Neutral'
,
'Sad'
,
'Surprise'
]
for
frame
in
os
.
listdir
(
EXTRACTED_DIR
):
# derive the frame folder path
FRAME_FOLDER
=
os
.
path
.
join
(
EXTRACTED_DIR
,
frame
)
# for testing purposes
print
(
'starting the emotion frame recognition process'
)
while
(
frame_count
<
no_of_frames
):
ret
,
image
=
cap
.
read
()
frame_name
=
"frame-{}"
.
format
(
frame_count
)
frame_details
=
{}
frame_details
[
'frame_name'
]
=
frame
frame_details
[
'frame_name'
]
=
frame
_name
# initialize the count variables for a frame
happy_count
=
0
...
...
@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name):
neutral_count
=
0
surprise_count
=
0
# get the detections
detections
=
ar
.
person_detection
(
image
,
net
)
# to count the extracted detections for a frame
detection_count
=
0
for
detections
in
os
.
listdir
(
FRAME_FOLDER
):
# if there are detections
if
(
len
(
detections
)
>
0
):
# loop through the detections
for
detection
in
detections
:
# only take the images with the student name
if
"frame"
not
in
detections
:
# get the label for this image
IMAGE_PATH
=
os
.
path
.
join
(
FRAME_FOLDER
,
detections
)
image
=
cv2
.
imread
(
IMAGE_PATH
)
label
=
emotion_recognition
(
classifier
,
face_classifier
,
image
)
label
=
emotion_recognition
(
classifier
,
face_classifier
,
detection
)
# checking for the label
if
label
==
class_labels
[
0
]:
...
...
@@ -324,26 +331,40 @@ def get_frame_emotion_recognition(video_name):
# calculating the percentages for the frame
happy_perct
=
float
(
happy_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
sad_perct
=
float
(
sad_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
angry_perct
=
float
(
angry_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
neutral_perct
=
float
(
neutral_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
surprise_perct
=
float
(
surprise_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
# calculating the percentages for the frame
happy_perct
=
float
(
happy_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
sad_perct
=
float
(
sad_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
angry_perct
=
float
(
angry_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
neutral_perct
=
float
(
neutral_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
surprise_perct
=
float
(
surprise_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
# this dictionary will be returned
frame_details
[
'happy_perct'
]
=
happy_perct
frame_details
[
'sad_perct'
]
=
sad_perct
frame_details
[
'angry_perct'
]
=
angry_perct
frame_details
[
'neutral_perct'
]
=
neutral_perct
frame_details
[
'surprise_perct'
]
=
surprise_perct
# this dictionary will be returned
frame_details
[
'happy_perct'
]
=
happy_perct
frame_details
[
'sad_perct'
]
=
sad_perct
frame_details
[
'angry_perct'
]
=
angry_perct
frame_details
[
'neutral_perct'
]
=
neutral_perct
frame_details
[
'surprise_perct'
]
=
surprise_perct
# push to all the frame details
frame_emotion_recognitions
.
append
(
frame_details
)
# push to all the frame details
frame_emotion_recognitions
.
append
(
frame_details
)
else
:
break
# for testing purposes
print
(
'emotion frame recognition count: '
,
frame_count
)
# increment the frame count
frame_count
+=
1
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions
=
cs
.
custom_object_sorter
(
frame_emotion_recognitions
)
# for testing purposes
print
(
'ending the emotion frame recognition process'
)
# return the detected frame percentages
return
sorted_activity_frame_recognitions
...
...
@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions):
# this method will retrieve activity frame groupings for a lecture
def
emotion_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
EXTRACTED_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
activity
\\
{}"
.
format
(
video_name
))
# load the models
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
)))
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_name
))
face_classifier
=
cv2
.
CascadeClassifier
(
os
.
path
.
join
(
BASE_DIR
,
'FirstApp
\
classifiers
\
haarcascade_frontalface_default.xml'
))
classifier_path
=
os
.
path
.
join
(
BASE_DIR
,
'FirstApp
\
classifiers
\
Emotion_little_vgg.h5'
)
classifier
=
load_model
(
classifier_path
)
# files required for person detection
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
model_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.caffemodel"
)
# load our serialized persosn detection model from disk
print
(
"[INFO] loading model..."
)
net
=
cv2
.
dnn
.
readNetFromCaffe
(
config_file
,
model_file
)
cap
=
cv2
.
VideoCapture
(
VIDEO_DIR
)
no_of_frames
=
int
(
cap
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
))
# initializing the count variables
...
...
@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# looping through the frames
for
frame
in
os
.
listdir
(
EXTRACTED_DIR
):
# getting the frame folder
FRAME_FOLDER
=
os
.
path
.
join
(
EXTRACTED_DIR
,
frame
)
while
(
frame_count
<
no_of_frames
):
# get the current frame
ret
,
image
=
cap
.
read
()
# initializing the variables
happy_count
=
0
...
...
@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count
=
0
detection_count
=
0
# looping through the detections in each frame
for
detections
in
os
.
listdir
(
FRAME_FOLDER
):
detections
=
ar
.
person_detection
(
image
,
net
)
# if there are detections
if
(
len
(
detections
)
>
0
):
# looping through the detections in each frame
for
detection
in
detections
:
# checking whether the image contains only one person
if
"frame"
not
in
detections
:
# get the label for this image
IMAGE_PATH
=
os
.
path
.
join
(
FRAME_FOLDER
,
detections
)
image
=
cv2
.
imread
(
IMAGE_PATH
)
# run the model and get the emotion label
label
=
emotion_recognition
(
classifier
,
face_classifier
,
image
)
label
=
emotion_recognition
(
classifier
,
face_classifier
,
detection
)
# increment the count based on the label
if
label
==
class_labels
[
0
]:
...
...
@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict
[
frame_name
][
'neutral_count'
]
+=
neutral_count
frame_group_dict
[
frame_name
][
'detection_count'
]
+=
detection_count
else
:
break
# for testing purposes
print
(
'emotion frame groupings count: '
,
frame_count
)
# increment the frame count
frame_count
+=
1
...
...
@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle some database operations
def
save_frame_recognitions
(
video_name
):
# for testing purposes
print
(
'starting the saving emotion frame recognition process'
)
# retrieve the lecture emotion id
lec_emotion
=
LectureEmotionReport
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_emotion_ser
=
LectureEmotionSerializer
(
lec_emotion
,
many
=
True
)
...
...
@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name):
lec_emotion_frame_recognitions
.
save
()
# for testing purposes
print
(
'ending the saving emotion frame recognition process'
)
# now return the frame recognitions
return
frame_detections
...
...
@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name):
# this method will save the emotion frame groupings to the database
def
save_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
# for testing purposes
print
(
'starting the saving emotion frame grouoings process'
)
frame_group_percentages
,
emotion_labels
=
emotion_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
)
# save the frame group details into db
...
...
@@ -631,5 +681,84 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings
.
lecture_emotion_id_id
=
lec_emotion_id
new_lec_emotion_frame_groupings
.
frame_group_details
=
frame_group_details
# for testing purposes
print
(
'ending the saving emotion frame groupings process'
)
# save
new_lec_emotion_frame_groupings
.
save
()
# this method will get emotion correlations
def
get_emotion_correlations
(
individual_lec_emotions
,
lec_recorded_activity_data
):
# this variable will be used to store the correlations
correlations
=
[]
limit
=
10
data_index
=
[
'lecture-{}'
.
format
(
i
+
1
)
for
i
in
range
(
len
(
individual_lec_emotions
))]
# student activity labels
student_emotion_labels
=
[
'Happy'
,
'Sad'
,
'Angry'
,
'Surprise'
,
'Neutral'
]
lecturer_activity_labels
=
[
'seated'
,
'standing'
,
'walking'
]
# lecturer recorded data list (lecturer)
sitting_perct_list
=
[]
standing_perct_list
=
[]
walking_perct_list
=
[]
# lecture activity data list (student)
happy_perct_list
=
[]
sad_perct_list
=
[]
angry_perct_list
=
[]
surprise_perct_list
=
[]
neutral_perct_list
=
[]
# loop through the lecturer recorded data (lecturer)
for
data
in
lec_recorded_activity_data
:
sitting_perct_list
.
append
(
int
(
data
[
'seated_count'
]))
standing_perct_list
.
append
(
int
(
data
[
'standing_count'
]))
walking_perct_list
.
append
(
int
(
data
[
'walking_count'
]))
# loop through the lecturer recorded data (student)
for
data
in
individual_lec_emotions
:
happy_perct_list
.
append
(
int
(
data
[
'happy_perct'
]))
sad_perct_list
.
append
(
int
(
data
[
'sad_perct'
]))
angry_perct_list
.
append
(
int
(
data
[
'angry_perct'
]))
surprise_perct_list
.
append
(
int
(
data
[
'surprise_perct'
]))
neutral_perct_list
.
append
(
int
(
data
[
'neutral_perct'
]))
corr_data
=
{
'Happy'
:
happy_perct_list
,
'Sad'
:
sad_perct_list
,
'Angry'
:
angry_perct_list
,
'Surprise'
:
surprise_perct_list
,
'Neutral'
:
neutral_perct_list
,
'seated'
:
sitting_perct_list
,
'standing'
:
standing_perct_list
,
'walking'
:
walking_perct_list
}
# create the dataframe
df
=
pd
.
DataFrame
(
corr_data
,
index
=
data_index
)
# calculate the correlation
pd_series
=
ut
.
get_top_abs_correlations
(
df
,
limit
)
print
(
'====correlated variables====='
)
print
(
pd_series
)
for
i
in
range
(
limit
):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict
=
{}
index
=
pd_series
.
index
[
i
]
# check whether the first index is a student activity
isStudentEmotion
=
index
[
0
]
in
student_emotion_labels
# check whether the second index is a lecturer activity
isLecturerAct
=
index
[
1
]
in
lecturer_activity_labels
# if both are student and lecturer activities, add to the dictionary
if
isStudentEmotion
&
isLecturerAct
:
corr_dict
[
'index'
]
=
index
corr_dict
[
'value'
]
=
pd_series
.
values
[
i
]
# append the dictionary to the 'correlations' list
correlations
.
append
(
corr_dict
)
# return the list
return
correlations
FirstApp/forms.py
View file @
d8f6824a
...
...
@@ -57,4 +57,51 @@ class LecturerCredentialsForm(forms.ModelForm):
fields
=
'__all__'
widgets
=
{
'password'
:
forms
.
PasswordInput
()
}
\ No newline at end of file
}
# admin login form
class
AdminLoginForm
(
forms
.
Form
):
# username = forms.CharField(max_length=100)
email
=
forms
.
EmailField
()
password
=
forms
.
CharField
(
widget
=
forms
.
PasswordInput
())
def
clean
(
self
):
# cleaned_username = self.cleaned_data.get('username')
cleaned_email
=
self
.
cleaned_data
.
get
(
'email'
)
cleaned_password
=
self
.
cleaned_data
.
get
(
'password'
)
admin
=
Admin
.
objects
.
get
(
email
=
cleaned_email
)
# if an admin is already in the system
if
(
admin
):
# retrieve the User object
user
=
User
.
objects
.
get
(
email
=
cleaned_email
)
is_user
=
user
.
check_password
(
cleaned_password
)
# if the password is correct
if
(
is_user
):
# lec_credentials = LecturerCredentials.objects.filter(username_id=lecturer.id)
admin_credentials
=
AdminCredentialDetails
.
objects
.
get
(
username_id
=
admin
.
id
)
print
(
'credentials: '
,
admin_credentials
)
# if lecture credentials are already created
if
(
admin_credentials
):
admin_credentials
.
password
=
user
.
password
admin_credentials
.
save
(
force_update
=
True
)
else
:
LecturerCredentials
(
username_id
=
admin
.
id
,
password
=
user
.
password
)
.
save
()
else
:
raise
forms
.
ValidationError
(
"Username or password is incorrect"
)
else
:
print
(
'the admin does not exist'
)
raise
forms
.
ValidationError
(
"The admin does not exist"
)
return
super
(
AdminLoginForm
,
self
)
.
clean
()
FirstApp/logic/activity_recognition.py
View file @
d8f6824a
...
...
@@ -9,6 +9,9 @@ from .custom_sorter import *
from
..MongoModels
import
*
from
..serializers
import
*
from
.
import
id_generator
as
ig
from
.
import
utilities
as
ut
import
pandas
as
pd
def
activity_recognition
(
video_path
):
...
...
@@ -50,38 +53,21 @@ def activity_recognition(video_path):
frame_count
=
0
total_detections
=
0
phone_checking_count
=
0
talking_count
=
0
note_taking_count
=
0
listening_count
=
0
# video activity directory
VIDEO_ACTIVITY_DIR
=
os
.
path
.
join
(
ACTIVITY_DIR
,
video_path
)
# creating the directory for the video
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
# for testing purposes
print
(
'starting the activity recognition process'
)
while
(
frame_count
<
no_of_frames
):
ret
,
image
=
video
.
read
()
FRAME_DIR
=
os
.
path
.
join
(
VIDEO_ACTIVITY_DIR
,
"frame-{}"
.
format
(
frame_count
))
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
# os.mkdir(FRAME_DIR)
image
=
cv2
.
resize
(
image
,
size
)
detections
=
person_detection
(
image
,
net
)
image
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2GRAY
)
# cv2.imwrite(FRAME_IMG, image)
# this is for testing purposes
print
(
'frame count: '
,
frame_count
)
# if there are any person detections
if
(
len
(
detections
)
>
0
):
...
...
@@ -90,6 +76,7 @@ def activity_recognition(video_path):
detection_count
=
0
# looping through the person detections of the frame
for
detection
in
detections
:
detection
=
cv2
.
resize
(
detection
,
size
)
...
...
@@ -113,43 +100,33 @@ def activity_recognition(video_path):
elif
(
label
==
class_labels
[
2
]):
note_taking_count
+=
1
# saving the detection for the particular frame
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count
+=
1
frame_count
+=
1
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label
phone_perct
=
float
(
phone_checking_count
/
total_detections
)
*
100
if
total_detections
>
0
else
0
talking_perct
=
float
(
talking_count
/
total_detections
)
*
100
if
total_detections
>
0
else
0
#
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct
=
float
(
note_taking_count
/
total_detections
)
*
100
if
total_detections
>
0
else
0
listening_perct
=
float
(
listening_count
/
total_detections
)
*
100
if
total_detections
>
0
else
0
# assigning the percentages to the dictionary
percentages
[
"phone_perct"
]
=
phone_perct
percentages
[
"talking_perct"
]
=
talking_perct
#
percentages["talking_perct"] = talking_perct
percentages
[
"writing_perct"
]
=
note_perct
percentages
[
"listening_perct"
]
=
listening_perct
# for testing purposes
print
(
'activity recognition process is over'
)
return
percentages
def
person_detection
(
image
,
net
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
model_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.caffemodel"
)
threshold
=
0.2
detected_person
=
[]
...
...
@@ -391,14 +368,27 @@ def get_student_activity_evaluation(video_name):
# recognize the activity for each frame
def
get_frame_activity_recognition
(
video_name
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
EXTRACTED_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
activity
\\
{}"
.
format
(
video_name
))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_name
))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_04.h5"
)
ACTIVITY_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"static
\\
FirstApp
\\
activity"
)
# files required for person detection
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
model_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.caffemodel"
)
# load our serialized persosn detection model from disk
print
(
"[INFO] loading model..."
)
net
=
cv2
.
dnn
.
readNetFromCaffe
(
config_file
,
model_file
)
np
.
set_printoptions
(
suppress
=
True
)
# load the model
# class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels
=
[
'Phone checking'
,
'Listening'
,
'Note taking'
]
model
=
tensorflow
.
keras
.
models
.
load_model
(
CLASSIFIER_DIR
)
model
.
compile
(
optimizer
=
'adam'
,
loss
=
tf
.
keras
.
losses
.
SparseCategoricalCrossentropy
(
from_logits
=
True
),
...
...
@@ -407,45 +397,54 @@ def get_frame_activity_recognition(video_name):
data
=
np
.
ndarray
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
np
.
float32
)
size
=
(
224
,
224
)
#
class labels
class_labels
=
[
'Phone checking'
,
'Listening'
,
'Note taking'
]
#
iteration
video
=
cv2
.
VideoCapture
(
VIDEO_DIR
)
no_of_frames
=
video
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
)
frame_count
=
0
# total_detections = 10
# frame activity recognitions
frame_activity_recognitions
=
[]
# for testing purposes
print
(
'starting the frame activity recognition process'
)
# looping through the frames
for
frame
in
os
.
listdir
(
EXTRACTED_DIR
):
while
(
frame_count
<
no_of_frames
):
# define the count variables for each frame
phone_checking_count
=
0
listening_count
=
0
note_taking_count
=
0
ret
,
image
=
video
.
read
()
# derive the frame folder path
FRAME_FOLDER
=
os
.
path
.
join
(
EXTRACTED_DIR
,
frame
)
# FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
frame_name
=
"frame-{}"
.
format
(
frame_count
)
frame_details
=
{}
frame_details
[
'frame_name'
]
=
frame
frame_details
[
'frame_name'
]
=
frame
_name
# to count the extracted detections for a frame
detection_count
=
0
detected_percentages
=
[]
# loop through each detection in the frame
for
detection
in
os
.
listdir
(
FRAME_FOLDER
):
detections
=
person_detection
(
image
,
net
)
DETECTION_PATH
=
os
.
path
.
join
(
FRAME_FOLDER
,
detection
)
# check whether the image is not the frame itself
if
"frame"
not
in
detection
:
image
=
cv2
.
imread
(
DETECTION_PATH
)
# if there are detections
if
(
len
(
detections
)
>
0
):
image
=
cv2
.
resize
(
image
,
size
)
# loop through each detection in the frame
for
detection
in
detections
:
image_array
=
np
.
asarray
(
image
)
detection
=
cv2
.
resize
(
detection
,
size
)
image_array
=
np
.
asarray
(
detection
)
normalized_image_array
=
(
image_array
.
astype
(
np
.
float32
)
/
127.0
)
-
1
# Load the image into the array
...
...
@@ -467,26 +466,40 @@ def get_frame_activity_recognition(video_name):
# increment the detection count
detection_count
+=
1
# calculating the percentages for the frame
phone_checking_perct
=
float
(
phone_checking_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
listening_perct
=
float
(
listening_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
note_taking_perct
=
float
(
note_taking_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
# adding the percentage values to the frame details
frame_details
[
'phone_perct'
]
=
phone_checking_perct
frame_details
[
'listening_perct'
]
=
listening_perct
frame_details
[
'note_perct'
]
=
note_taking_perct
# calculating the percentages for the frame
phone_checking_perct
=
float
(
phone_checking_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
listening_perct
=
float
(
listening_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
note_taking_perct
=
float
(
note_taking_count
/
detection_count
)
*
100
if
detection_count
>
0
else
0
# adding the percentage values to the frame details
frame_details
[
'phone_perct'
]
=
phone_checking_perct
frame_details
[
'listening_perct'
]
=
listening_perct
frame_details
[
'note_perct'
]
=
note_taking_perct
# push to all the frame details
frame_activity_recognitions
.
append
(
frame_details
)
else
:
break
print
(
'current frame: '
,
frame_count
)
# increment frame count
frame_count
+=
1
# push to all the frame details
frame_activity_recognitions
.
append
(
frame_details
)
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions
=
custom_object_sorter
(
frame_activity_recognitions
)
# for testing purposes
print
(
'ending the frame activity recognition process'
)
# return the detected frame percentages
return
sorted_activity_frame_recognitions
# this method will retrieve individual student evaluation
def
get_individual_student_evaluation
(
video_name
,
student_name
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
...
...
@@ -753,6 +766,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle saving activity entities to the database
def
save_frame_recognition
(
video_name
):
# for testing purposes
print
(
'starting the saving activity frame recognition process'
)
# retrieve the lecture activity id
lec_activity
=
LectureActivity
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_activity_ser
=
LectureActivitySerializer
(
lec_activity
,
many
=
True
)
...
...
@@ -787,6 +804,9 @@ def save_frame_recognition(video_name):
lec_activity_frame_recognitions
.
save
()
# for testing purposes
print
(
'ending the saving activity frame recognition process'
)
# now return the frame detections
return
frame_detections
...
...
@@ -794,6 +814,8 @@ def save_frame_recognition(video_name):
# this method will save the activity frame groupings to the database
def
save_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
# for testing purposes
print
(
'starting the saving activity frame groupings process'
)
frame_group_percentages
,
activity_labels
=
activity_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
)
...
...
@@ -825,5 +847,82 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings
.
lecture_activity_id_id
=
lec_activity_id
new_lec_activity_frame_groupings
.
frame_group_details
=
frame_group_details
# for testing purposes
print
(
'ending the saving activity frame groupings process'
)
# save
new_lec_activity_frame_groupings
.
save
()
# this method will get activity correlations
def
get_activity_correlations
(
individual_lec_activities
,
lec_recorded_activity_data
):
# this variable will be used to store the correlations
correlations
=
[]
limit
=
10
data_index
=
[
'lecture-{}'
.
format
(
i
+
1
)
for
i
in
range
(
len
(
individual_lec_activities
))]
# student activity labels
student_activity_labels
=
[
'phone checking'
,
'listening'
,
'note taking'
]
lecturer_activity_labels
=
[
'seated'
,
'standing'
,
'walking'
]
# lecturer recorded data list (lecturer)
sitting_perct_list
=
[]
standing_perct_list
=
[]
walking_perct_list
=
[]
# lecture activity data list (student)
phone_perct_list
=
[]
listen_perct_list
=
[]
note_perct_list
=
[]
# loop through the lecturer recorded data (lecturer)
for
data
in
lec_recorded_activity_data
:
sitting_perct_list
.
append
(
int
(
data
[
'seated_count'
]))
standing_perct_list
.
append
(
int
(
data
[
'standing_count'
]))
walking_perct_list
.
append
(
int
(
data
[
'walking_count'
]))
# loop through the lecturer recorded data (student)
for
data
in
individual_lec_activities
:
phone_perct_list
.
append
(
int
(
data
[
'phone_perct'
]))
listen_perct_list
.
append
(
int
(
data
[
'listening_perct'
]))
note_perct_list
.
append
(
int
(
data
[
'writing_perct'
]))
corr_data
=
{
'phone checking'
:
phone_perct_list
,
'listening'
:
listen_perct_list
,
'note taking'
:
note_perct_list
,
'seated'
:
sitting_perct_list
,
'standing'
:
standing_perct_list
,
'walking'
:
walking_perct_list
}
# create the dataframe
df
=
pd
.
DataFrame
(
corr_data
,
index
=
data_index
)
# calculate the correlation
pd_series
=
ut
.
get_top_abs_correlations
(
df
,
limit
)
print
(
'====correlated variables====='
)
print
(
pd_series
)
for
i
in
range
(
limit
):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict
=
{}
index
=
pd_series
.
index
[
i
]
# check whether the first index is a student activity
isStudentAct
=
index
[
0
]
in
student_activity_labels
# check whether the second index is a lecturer activity
isLecturerAct
=
index
[
1
]
in
lecturer_activity_labels
# if both are student and lecturer activities, add to the doctionary
if
isStudentAct
&
isLecturerAct
:
corr_dict
[
'index'
]
=
index
corr_dict
[
'value'
]
=
pd_series
.
values
[
i
]
# append the dictionary to the 'correlations' list
correlations
.
append
(
corr_dict
)
# return the list
return
correlations
FirstApp/logic/head_gaze_estimation.py
View file @
d8f6824a
...
...
@@ -15,10 +15,12 @@ from . face_landmarks import get_landmark_model, detect_marks
import
os
import
shutil
import
math
import
pandas
as
pd
from
..MongoModels
import
*
from
..serializers
import
*
from
.
import
id_generator
as
ig
from
.
import
utilities
as
ut
def
get_2d_points
(
img
,
rotation_vector
,
translation_vector
,
camera_matrix
,
val
):
...
...
@@ -144,18 +146,10 @@ def process_gaze_estimation(video_path):
VIDEO_PATH
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_path
))
GAZE_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"static
\\
FirstApp
\\
gaze"
)
# create a folder with the same name as the video
VIDEO_DIR
=
os
.
path
.
join
(
GAZE_DIR
,
video_path
)
# define a dictionary to return the percentage values
percentages
=
{}
# checking whether the video directory exist
if
os
.
path
.
isdir
(
VIDEO_DIR
):
shutil
.
rmtree
(
VIDEO_DIR
)
# create the new directory
os
.
mkdir
(
VIDEO_DIR
)
# load the face detection model
face_model
=
get_face_detector
()
...
...
@@ -202,6 +196,9 @@ def process_gaze_estimation(video_path):
[
0
,
0
,
1
]],
dtype
=
"double"
)
# for testing purposes
print
(
'starting the gaze estimation process'
)
# iterate the video frames
while
True
:
ret
,
img
=
cap
.
read
()
...
...
@@ -285,35 +282,39 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions
if
isLookingDown
&
isLookingRight
:
cv2
.
putText
(
img
,
'looking down and right'
,
(
facebox
[
0
],
facebox
[
1
]),
font
,
2
,
(
255
,
255
,
128
),
3
)
#
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count
+=
1
elif
isLookingDown
&
isLookingLeft
:
cv2
.
putText
(
img
,
'looking down and left'
,
(
facebox
[
0
],
facebox
[
1
]),
font
,
2
,
(
255
,
255
,
128
),
3
)
#
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count
+=
1
elif
isLookingUp
&
isLookingRight
:
cv2
.
putText
(
img
,
'looking up and right'
,
(
facebox
[
0
],
facebox
[
1
]),
font
,
2
,
(
255
,
255
,
128
),
3
)
#
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count
+=
1
elif
isLookingUp
&
isLookingLeft
:
cv2
.
putText
(
img
,
'looking up and left'
,
(
facebox
[
0
],
facebox
[
1
]),
font
,
2
,
(
255
,
255
,
128
),
3
)
#
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count
+=
1
elif
isLookingFront
:
cv2
.
putText
(
img
,
'Head front'
,
(
facebox
[
0
],
facebox
[
1
]),
font
,
2
,
(
255
,
255
,
128
),
3
)
#
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count
+=
1
# indicate the student name
cv2
.
putText
(
img
,
student_name
,
(
facebox
[
2
],
facebox
[
3
]),
font
,
2
,
(
255
,
255
,
128
),
3
)
#
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count
face_count
+=
1
# naming the new image
image_name
=
"frame-{}.png"
.
format
(
frame_count
)
# new image path
image_path
=
os
.
path
.
join
(
VIDEO_DIR
,
image_name
)
#
image_name = "frame-{}.png".format(frame_count)
#
#
#
new image path
#
image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image
cv2
.
imwrite
(
image_path
,
img
)
# cv2.imwrite(image_path, img)
# for testing purposes
print
(
'gaze estimation count: '
,
frame_count
)
# increment the frame count
frame_count
+=
1
...
...
@@ -323,8 +324,8 @@ def process_gaze_estimation(video_path):
# after extracting the frames, save the changes to static content
p
=
os
.
popen
(
"python manage.py collectstatic"
,
"w"
)
p
.
write
(
"yes"
)
#
p = os.popen("python manage.py collectstatic", "w")
#
p.write("yes")
# calculate percentages
head_up_right_perct
=
(
Decimal
(
head_up_right_count
)
/
Decimal
(
face_count
))
*
100
...
...
@@ -346,6 +347,9 @@ def process_gaze_estimation(video_path):
cv2
.
destroyAllWindows
()
cap
.
release
()
# for testing purposes
print
(
'ending the gaze estimation process'
)
# return the dictionary
return
percentages
...
...
@@ -370,7 +374,7 @@ def getExtractedFrames(lecture_video_name):
# this method will retrieve lecture gaze estimation for each frame
def
get_lecture_gaze_es
r
imation_for_frames
(
video_name
):
def
get_lecture_gaze_es
t
imation_for_frames
(
video_name
):
# get the base directory
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
...
...
@@ -422,6 +426,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
[
0
,
0
,
1
]],
dtype
=
"double"
)
# for testing purposes
print
(
'starting the gaze estimation for frames process'
)
# iterate the video frames
while
True
:
ret
,
img
=
cap
.
read
()
...
...
@@ -551,6 +559,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# append the calculated percentages to the frame_detections
frame_detections
.
append
(
percentages
)
# for testing purposes
print
(
'gaze estimation frame recognition count: '
,
frame_count
)
frame_count
+=
1
else
:
...
...
@@ -558,16 +569,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# for testing purposes
print
(
'ending the gaze estimation for frames process'
)
# return the details
return
frame_detections
,
frame_rate
# this method will get the student gaze estimation summary for period
def
get_student_gaze_estimation_summary_for_period
(
gaze_estimation_data
):
# declare variables to add percentage values
phone_checking_perct_combined
=
0.0
listening_perct_combined
=
0.0
note_taking_perct_combined
=
0.0
# declare variables to add percentage values
looking_up_right_perct_combined
=
0.0
looking_up_left_perct_combined
=
0.0
looking_down_right_perct_combined
=
0.0
...
...
@@ -601,16 +613,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# calculate the average percentages
looking_up_right_average_perct
=
round
((
looking_up_right_perct_combined
/
no_of_gaze_estimations
),
1
)
looking_up_left_perct
=
round
((
looking_up_left_perct_combined
/
no_of_gaze_estimations
),
1
)
looking_up_left_
average_
perct
=
round
((
looking_up_left_perct_combined
/
no_of_gaze_estimations
),
1
)
looking_down_right_average_perct
=
round
((
looking_down_right_perct_combined
/
no_of_gaze_estimations
),
1
)
looking_down_left_average_perct
=
round
((
looking_down_left_perct_combined
/
no_of_gaze_estimations
),
1
)
looking_front_average_perct
=
round
((
looking_front_perct_combined
/
no_of_gaze_estimations
),
1
)
percentages
=
{}
percentages
[
"looking_up_and_right_perct"
]
=
looking_up_right_average_perct
percentages
[
"looking_up_and_left_perct"
]
=
looking_up_left_
perct_combined
percentages
[
"looking_down_and_right_perct"
]
=
looking_down_right_
perct_combined
percentages
[
"looking_down_and_left_perct"
]
=
looking_down_left_
perct_combined
percentages
[
"looking_up_and_left_perct"
]
=
looking_up_left_
average_perct
percentages
[
"looking_down_and_right_perct"
]
=
looking_down_right_
average_perct
percentages
[
"looking_down_and_left_perct"
]
=
looking_down_left_
average_perct
percentages
[
"looking_front_perct"
]
=
looking_front_average_perct
return
percentages
,
individual_lec_gaze_estimations
,
gaze_estimation_labels
...
...
@@ -677,6 +689,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# assign the difference
frame_group_diff
[
key
]
=
diff
if
diff
>
0
else
1
# for testing purposes
print
(
'starting gaze frame grouping process'
)
# looping through the frames
while
True
:
...
...
@@ -802,6 +816,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict
[
frame_name
][
'detection_count'
]
+=
detection_count
# for testing purposes
print
(
'gaze frame groupings count: '
,
frame_count
)
# increment the frame count
frame_count
+=
1
...
...
@@ -848,12 +865,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# define the labels
labels
=
[
'upright_perct'
,
'upleft_perct'
,
'downright_perct'
,
'downleft_perct'
,
'front_perct'
]
# for testing purposes
print
(
'ending gaze frame grouping process'
)
# return the dictionary
return
frame_group_dict
,
labels
# this section will handle some database operations
def
save_frame_detections
(
video_name
):
# for testing purposes
print
(
'starting the saving gaze frame recognition process'
)
# retrieve the lecture emotion id
lec_gaze
=
LectureGazeEstimation
.
objects
.
filter
(
lecture_video_id__video_name
=
video_name
)
lec_gaze_ser
=
LectureGazeEstimationSerializer
(
lec_gaze
,
many
=
True
)
...
...
@@ -868,7 +893,7 @@ def save_frame_detections(video_name):
ig
.
generate_new_id
(
last_lec_gaze_frame_recognitions
.
lecture_gaze_frame_recognition_id
)
# calculate the frame detections
frame_detections
,
frame_rate
=
get_lecture_gaze_es
r
imation_for_frames
(
video_name
)
frame_detections
,
frame_rate
=
get_lecture_gaze_es
t
imation_for_frames
(
video_name
)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details
=
[]
...
...
@@ -892,6 +917,9 @@ def save_frame_detections(video_name):
lec_gaze_frame_recognitions
.
save
()
# for testing purposes
print
(
'ending the saving gaze frame recognition process'
)
# now return the frame recognitions
return
frame_detections
...
...
@@ -899,6 +927,10 @@ def save_frame_detections(video_name):
# this method will save gaze frame groupings to the database
def
save_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
# for testing purposes
print
(
'starting the saving gaze frame groupings process'
)
frame_group_percentages
,
gaze_labels
=
gaze_estimation_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
)
...
...
@@ -928,6 +960,83 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_gaze_frame_groupings
.
lecture_gaze_id_id
=
lec_gaze_id
new_lec_gaze_frame_groupings
.
frame_group_details
=
frame_group_details
# for testing purposes
print
(
'ending the saving gaze frame groupings process'
)
# save
new_lec_gaze_frame_groupings
.
save
()
# this method will get gaze estimation correlations
def
get_gaze_correlations
(
individual_lec_gaze
,
lec_recorded_activity_data
):
# this variable will be used to store the correlations
correlations
=
[]
limit
=
10
data_index
=
[
'lecture-{}'
.
format
(
i
+
1
)
for
i
in
range
(
len
(
individual_lec_gaze
))]
# student gaze labels
student_gaze_labels
=
[
'Up and Right'
,
'Up and Left'
,
'Down and Right'
,
'Down and Left'
,
'Front'
]
lecturer_activity_labels
=
[
'seated'
,
'standing'
,
'walking'
]
# lecturer recorded data list (lecturer)
sitting_perct_list
=
[]
standing_perct_list
=
[]
walking_perct_list
=
[]
# lecture activity data list (student)
upright_perct_list
=
[]
upleft_perct_list
=
[]
downright_perct_list
=
[]
downleft_perct_list
=
[]
front_perct_list
=
[]
# loop through the lecturer recorded data (lecturer)
for
data
in
lec_recorded_activity_data
:
sitting_perct_list
.
append
(
int
(
data
[
'seated_count'
]))
standing_perct_list
.
append
(
int
(
data
[
'standing_count'
]))
walking_perct_list
.
append
(
int
(
data
[
'walking_count'
]))
# loop through the lecturer recorded data (student)
for
data
in
individual_lec_gaze
:
upright_perct_list
.
append
(
int
(
data
[
'looking_up_and_right_perct'
]))
upleft_perct_list
.
append
(
int
(
data
[
'looking_up_and_left_perct'
]))
downright_perct_list
.
append
(
int
(
data
[
'looking_down_and_right_perct'
]))
downleft_perct_list
.
append
(
int
(
data
[
'looking_down_and_left_perct'
]))
front_perct_list
.
append
(
int
(
data
[
'looking_front_perct'
]))
corr_data
=
{
'Up and Right'
:
upright_perct_list
,
'Up and Left'
:
upleft_perct_list
,
'Down and Right'
:
downright_perct_list
,
'Down and Left'
:
downleft_perct_list
,
'Front'
:
front_perct_list
,
'seated'
:
sitting_perct_list
,
'standing'
:
standing_perct_list
,
'walking'
:
walking_perct_list
}
# create the dataframe
df
=
pd
.
DataFrame
(
corr_data
,
index
=
data_index
)
# calculate the correlation
pd_series
=
ut
.
get_top_abs_correlations
(
df
,
limit
)
print
(
'====correlated variables====='
)
print
(
pd_series
)
for
i
in
range
(
limit
):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict
=
{}
index
=
pd_series
.
index
[
i
]
# check whether the first index is a student activity
isStudentGaze
=
index
[
0
]
in
student_gaze_labels
# check whether the second index is a lecturer activity
isLecturerAct
=
index
[
1
]
in
lecturer_activity_labels
# if both are student and lecturer activities, add to the dictionary
if
isStudentGaze
&
isLecturerAct
:
corr_dict
[
'index'
]
=
index
corr_dict
[
'value'
]
=
pd_series
.
values
[
i
]
# append the dictionary to the 'correlations' list
correlations
.
append
(
corr_dict
)
# return the list
return
correlations
FirstApp/logic/utilities.py
0 → 100644
View file @
d8f6824a
def
get_redundant_pairs
(
df
):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop
=
set
()
cols
=
df
.
columns
for
i
in
range
(
0
,
df
.
shape
[
1
]):
for
j
in
range
(
0
,
i
+
1
):
pairs_to_drop
.
add
((
cols
[
i
],
cols
[
j
]))
return
pairs_to_drop
def
get_top_abs_correlations
(
df
,
n
):
au_corr
=
df
.
corr
()
.
abs
()
.
unstack
()
labels_to_drop
=
get_redundant_pairs
(
df
)
au_corr
=
au_corr
.
drop
(
labels
=
labels_to_drop
)
.
sort_values
(
ascending
=
False
)
return
au_corr
[
0
:
n
]
FirstApp/logic/video_extraction.py
View file @
d8f6824a
import
os
import
cv2
import
shutil
import
datetime
# import datetime
from
datetime
import
timedelta
from
FirstApp.MongoModels
import
*
from
FirstApp.serializers
import
*
...
...
@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name):
THRESHOLD_GAP
=
5
# calculating the real duration
real_duration
=
datetime
.
timedelta
(
seconds
=
(
duration
+
THRESHOLD_GAP
))
real_duration
=
timedelta
(
seconds
=
(
duration
))
# defines the number of seconds included for a frame group
THRESHOLD_TIME
=
10
...
...
@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name):
# loop through the threshold gap limit to define the time landmarks
for
i
in
range
(
THRESHOLD_GAP
):
initial_landmark
+=
unit_gap
time_landmark
=
str
(
datetime
.
timedelta
(
seconds
=
initial_landmark
))
time_landmark
=
str
(
timedelta
(
seconds
=
initial_landmark
))
time_landmark_value
=
initial_landmark
time_landmarks
.
append
(
time_landmark
)
time_landmarks_values
.
append
(
time_landmark_value
)
...
...
@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category):
# this section will handle some database operations
def
save_time_landmarks
(
video_name
):
# for testing purposes
print
(
'starting the saving time landmarks process'
)
last_lec_video_time_landmarks
=
LectureVideoTimeLandmarks
.
objects
.
order_by
(
'lecture_video_time_landmarks_id'
)
.
last
()
new_lecture_video_time_landmarks_id
=
"LVTL00001"
if
(
last_lec_video_time_landmarks
is
None
)
else
\
ig
.
generate_new_id
(
last_lec_video_time_landmarks
.
lecture_video_time_landmarks_id
)
...
...
@@ -233,12 +237,18 @@ def save_time_landmarks(video_name):
new_lec_video_time_landmarks
.
lecture_video_id_id
=
lec_video_id
new_lec_video_time_landmarks
.
time_landmarks
=
db_time_landmarks
# for testing purposes
print
(
'ending the saving time landmarks process'
)
new_lec_video_time_landmarks
.
save
()
# this method will save frame landmarks to the database
def
save_frame_landmarks
(
video_name
):
# for testing purposes
print
(
'starting the saving frame landmarks process'
)
# retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks
=
LectureVideoFrameLandmarks
.
objects
.
order_by
(
'lecture_video_frame_landmarks_id'
)
.
last
()
...
...
@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name):
new_lec_video_frame_landmarks
.
save
()
# for testing purposes
print
(
'ending the saving frame landmarks process'
)
# now return the frame landmarks and the frame group dictionary
return
frame_landmarks
,
frame_group_dict
...
...
FirstApp/migrations/0015_auto_20201020_2157.py
0 → 100644
View file @
d8f6824a
# Generated by Django 2.2.11 on 2020-10-20 16:27
from
django.db
import
migrations
,
models
import
django.db.models.deletion
class
Migration
(
migrations
.
Migration
):
dependencies
=
[
(
'FirstApp'
,
'0014_lecturegazeframerecognitions'
),
]
operations
=
[
migrations
.
CreateModel
(
name
=
'Admin'
,
fields
=
[
(
'id'
,
models
.
AutoField
(
auto_created
=
True
,
primary_key
=
True
,
serialize
=
False
,
verbose_name
=
'ID'
)),
(
'admin_id'
,
models
.
CharField
(
max_length
=
10
)),
(
'name'
,
models
.
CharField
(
max_length
=
20
)),
(
'email'
,
models
.
EmailField
(
max_length
=
254
)),
],
),
migrations
.
CreateModel
(
name
=
'AdminCredentialDetails'
,
fields
=
[
(
'id'
,
models
.
AutoField
(
auto_created
=
True
,
primary_key
=
True
,
serialize
=
False
,
verbose_name
=
'ID'
)),
(
'password'
,
models
.
CharField
(
max_length
=
15
)),
(
'username'
,
models
.
ForeignKey
(
on_delete
=
django
.
db
.
models
.
deletion
.
CASCADE
,
to
=
'FirstApp.Admin'
)),
],
),
migrations
.
DeleteModel
(
name
=
'LecturePoseEstimation'
,
),
]
FirstApp/templates/FirstApp/401.html
0 → 100644
View file @
d8f6824a
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html
lang=
"en"
>
<body
id=
"page-top"
>
<!-- Page Wrapper -->
<div
id=
"wrapper"
>
<!-- Content Wrapper -->
<div
id=
"content-wrapper"
class=
"d-flex flex-column"
>
<!-- Main Content -->
<div
id=
"content"
>
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div
class=
"container-fluid"
>
{% load static %}
<!-- 404 Error Text -->
<div
class=
"text-center"
>
<div
class=
"error mx-auto"
data-text=
"404"
>
401
</div>
<p
class=
"lead text-gray-800 mb-5"
>
Unauthorized access
</p>
<p
class=
"text-gray-500 mb-0"
>
It looks like you do not have access to this url
</p>
<p
class=
"text-gray-500 mb-0"
>
Please login with the correct user type
</p>
<a
href=
"/logout"
>
←
Back to Login Page
</a>
</div>
</div>
{% endblock %}
<!--end of container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer
class=
"sticky-footer bg-white"
>
<div
class=
"container my-auto"
>
<div
class=
"copyright text-center my-auto"
>
<span>
Copyright
©
Your Website 2019
</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a
class=
"scroll-to-top rounded"
href=
"#page-top"
>
<i
class=
"fas fa-angle-up"
></i>
</a>
<!-- Logout Modal-->
<div
class=
"modal fade"
id=
"logoutModal"
tabindex=
"-1"
role=
"dialog"
aria-labelledby=
"exampleModalLabel"
aria-hidden=
"true"
>
<div
class=
"modal-dialog"
role=
"document"
>
<div
class=
"modal-content"
>
<div
class=
"modal-header"
>
<h5
class=
"modal-title"
id=
"exampleModalLabel"
>
Ready to Leave?
</h5>
<button
class=
"close"
type=
"button"
data-dismiss=
"modal"
aria-label=
"Close"
>
<span
aria-hidden=
"true"
>
×
</span>
</button>
</div>
<div
class=
"modal-body"
>
Select "Logout" below if you are ready to end your current session.
</div>
<div
class=
"modal-footer"
>
<button
class=
"btn btn-secondary"
type=
"button"
data-dismiss=
"modal"
>
Cancel
</button>
<a
class=
"btn btn-primary"
href=
"login.html"
>
Logout
</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script
src=
"vendor/jquery/jquery.min.js"
></script>
<script
src=
"vendor/bootstrap/js/bootstrap.bundle.min.js"
></script>
<!-- Core plugin JavaScript-->
<script
src=
"vendor/jquery-easing/jquery.easing.min.js"
></script>
<!-- Custom scripts for all pages-->
<script
src=
"js/sb-admin-2.min.js"
></script>
</body>
</html>
FirstApp/templates/FirstApp/Home.html
View file @
d8f6824a
...
...
@@ -234,9 +234,9 @@
//fetch the video time landmark details
fetch
(
'
http://127.0.0.1:8000/get-lecture-video-summary-time-landmarks/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
assignTimeLandmarks
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
assignTimeLandmarks
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
//display the progress bar area
...
...
@@ -251,7 +251,6 @@
}
//this function will handle the activity 'summary' button
$
(
'
#activity_summary_btn
'
).
click
(
function
(
e
)
{
...
...
@@ -260,9 +259,9 @@
//fetch the activity summary details
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
activityFrameGroupPercentages
(
out
,
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
activityFrameGroupPercentages
(
out
,
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
...
...
@@ -275,9 +274,9 @@
//fetch the activity summary details
fetch
(
'
http://127.0.0.1:8000/get-lecture-emotion-summary/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
emotionFrameGroupPercentages
(
out
,
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
emotionFrameGroupPercentages
(
out
,
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
...
...
@@ -288,13 +287,12 @@
//fetch the activity summary details
fetch
(
'
http://127.0.0.1:8000/get-lecture-gaze-summary/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
gazeFrameGroupPercentages
(
out
,
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
gazeFrameGroupPercentages
(
out
,
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
//this function will handle the retrieved activity frame group percentages
function
activityFrameGroupPercentages
(
response
,
e
)
{
...
...
@@ -357,7 +355,6 @@
}
//this function will call the activity chart function
function
renderActivityChart
(
activity_labels
)
{
...
...
@@ -486,7 +483,6 @@
}
var
chart
=
new
CanvasJS
.
Chart
(
"
EmotionChartContainer
"
,
{
animationEnabled
:
true
,
theme
:
"
light2
"
,
...
...
@@ -570,7 +566,6 @@
}
var
chart
=
new
CanvasJS
.
Chart
(
"
GazeChartContainer
"
,
{
animationEnabled
:
true
,
theme
:
"
light2
"
,
...
...
@@ -609,7 +604,6 @@
}
//this function will render the chart for Activity statistics
function
renderActivityStatistics
()
{
...
...
@@ -626,7 +620,6 @@
];
for
(
let
i
=
0
;
i
<
label_length
;
i
++
)
{
let
label
=
activity_labels
[
i
];
...
...
@@ -634,7 +627,7 @@
for
(
let
j
=
0
;
j
<
activity_length
;
j
++
)
{
let
activity
=
individual_activities
[
j
];
datapoints
.
push
({
label
:
"
lecture
"
+
(
j
+
1
),
y
:
activity
[
label
]});
datapoints
.
push
({
label
:
"
lecture
"
+
(
j
+
1
),
y
:
activity
[
label
]});
}
...
...
@@ -644,7 +637,7 @@
name
:
label
,
markerType
:
"
square
"
,
{
#
xValueFormatString
:
"
DD MMM, YYYY
"
,
#
}
xValueFormatString
:
"
lec
"
+
(
i
+
1
),
xValueFormatString
:
"
lec
"
+
(
i
+
1
),
color
:
getRandomColor
(),
dataPoints
:
datapoints
};
...
...
@@ -714,7 +707,7 @@
for
(
let
j
=
0
;
j
<
emotion_length
;
j
++
)
{
let
emotion
=
individual_emotions
[
j
];
datapoints
.
push
({
label
:
"
lecture
"
+
(
j
+
1
),
y
:
emotion
[
label
]});
datapoints
.
push
({
label
:
"
lecture
"
+
(
j
+
1
),
y
:
emotion
[
label
]});
}
let
obj
=
{
...
...
@@ -723,7 +716,7 @@
name
:
label
,
markerType
:
"
square
"
,
{
#
xValueFormatString
:
"
DD MMM, YYYY
"
,
#
}
xValueFormatString
:
"
Lec
"
+
(
i
+
1
),
xValueFormatString
:
"
Lec
"
+
(
i
+
1
),
color
:
colors
[
i
-
1
],
dataPoints
:
datapoints
};
...
...
@@ -740,7 +733,7 @@
axisX
:
{
title
:
"
Lecture
"
,
{
#
valueFormatString
:
"
DD MMM
"
,
#
}
valueFormatString
:
"
lec
"
,
valueFormatString
:
"
lec
"
,
crosshair
:
{
enabled
:
true
,
snapToDataPoint
:
true
...
...
@@ -792,7 +785,7 @@
for
(
let
j
=
0
;
j
<
gaze_estimation_length
;
j
++
)
{
let
gaze_estimation
=
individual_gaze_estimations
[
j
];
datapoints
.
push
({
label
:
"
lecture
"
+
(
j
+
1
),
y
:
gaze_estimation
[
label
]});
datapoints
.
push
({
label
:
"
lecture
"
+
(
j
+
1
),
y
:
gaze_estimation
[
label
]});
}
let
obj
=
{
...
...
@@ -801,7 +794,7 @@
name
:
label
,
markerType
:
"
square
"
,
{
#
xValueFormatString
:
"
DD MMM, YYYY
"
,
#
}
xValueFormatString
:
"
Lec
"
+
(
i
+
1
),
xValueFormatString
:
"
Lec
"
+
(
i
+
1
),
color
:
colors
[
i
-
1
],
dataPoints
:
datapoints
};
...
...
@@ -818,7 +811,7 @@
axisX
:
{
title
:
"
Lecture
"
,
{
#
valueFormatString
:
"
DD MMM
"
,
#
}
valueFormatString
:
"
lec
"
,
valueFormatString
:
"
lec
"
,
crosshair
:
{
enabled
:
true
,
snapToDataPoint
:
true
...
...
@@ -862,7 +855,7 @@
$
(
'
#student_behavior_view_summary_modal
'
).
modal
();
});
});
//this function will handle the view summary option form
$
(
'
#view_summary_option_form
'
).
submit
(
function
(
e
)
{
...
...
@@ -965,6 +958,235 @@
}
//this function will handle the advanced analysis for activity
$
(
'
#activity_advanced_btn
'
).
click
(
function
()
{
$
(
'
#activity_advanced_modal
'
).
modal
();
//enable the loader
$
(
'
#activity_corr_loader
'
).
attr
(
'
hidden
'
,
false
);
let
lecturer
=
"
{{ lecturer }}
"
;
let
option
=
$
(
"
input[name='option']:checked
"
).
val
();
//fetch the correlation data
fetch
(
'
http://127.0.0.1:8000/get-activity-correlations/?lecturer=
'
+
lecturer
+
'
&option=
'
+
option
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayActivityCorrelations
(
out
.
correlations
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
//this function will handle the advanced analysis for emotion
$
(
'
#emotion_advanced_btn
'
).
click
(
function
()
{
$
(
'
#emotion_advanced_modal
'
).
modal
();
//enable the loader
$
(
'
#emotion_corr_loader
'
).
attr
(
'
hidden
'
,
false
);
let
lecturer
=
"
{{ lecturer }}
"
;
let
option
=
$
(
"
input[name='option']:checked
"
).
val
();
//fetch the correlation data
fetch
(
'
http://127.0.0.1:8000/get-emotion-correlations/?lecturer=
'
+
lecturer
+
"
&option=
"
+
option
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayEmotionCorrelations
(
out
.
correlations
))
.
catch
((
err
)
=>
alert
(
'
err:
'
+
err
));
});
//this function will handle the advanced analysis for gaze
$
(
'
#gaze_advanced_btn
'
).
click
(
function
()
{
$
(
'
#gaze_advanced_modal
'
).
modal
();
//enable the loader
$
(
'
#gaze_corr_loader
'
).
attr
(
'
hidden
'
,
false
);
let
lecturer
=
"
{{ lecturer }}
"
;
let
option
=
$
(
"
input[name='option']:checked
"
).
val
();
//fetch the correlation data
fetch
(
'
http://127.0.0.1:8000/get-gaze-correlations/?lecturer=
'
+
lecturer
+
"
&option=
"
+
option
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayGazeCorrelations
(
out
.
correlations
))
.
catch
((
err
)
=>
alert
(
'
err:
'
+
err
));
});
//this method will display the activity correlations in a table
function
displayActivityCorrelations
(
correlations
)
{
let
htmlString
=
""
;
//create the html content for the activity correlation table
for
(
let
i
=
0
;
i
<
correlations
.
length
;
i
++
)
{
let
corr
=
correlations
[
i
];
let
indices
=
corr
.
index
;
let
value
=
corr
.
value
;
value
=
Math
.
round
(
value
*
100
,
1
);
if
(
value
<=
100
&&
value
>
80
)
{
htmlString
+=
"
<tr class='bg-success text-white'>
"
;
}
else
if
(
value
<=
80
&&
value
>
60
)
{
htmlString
+=
"
<tr class='bg-primary text-white'>
"
;
}
else
if
(
value
<=
60
&&
value
>
40
)
{
htmlString
+=
"
<tr class='bg-warning text-white'>
"
;
}
else
if
(
value
<=
40
&&
value
>
20
)
{
htmlString
+=
"
<tr class='bg-danger text-white'>
"
;
}
else
if
(
value
<=
20
&&
value
>
0
)
{
htmlString
+=
"
<tr class='bg-dark text-white'>
"
;
}
//create a
<
tr
>
to
be
inserted
htmlString
+=
"
<td>
"
;
htmlString
+=
indices
[
0
];
htmlString
+=
"
</td>
"
;
htmlString
+=
"
<td>
"
;
htmlString
+=
indices
[
1
];
htmlString
+=
"
</td>
"
;
htmlString
+=
"
<td>
"
;
htmlString
+=
value
;
htmlString
+=
"
</td>
"
;
htmlString
+=
"
</tr>
"
;
}
//append to the
<
tbody
>
$
(
'
#activity_corr_tbody
'
).
append
(
htmlString
);
//hide the loader
$
(
'
#activity_corr_loader
'
).
hide
();
//show the table
$
(
'
#activity_corr_table
'
).
attr
(
'
hidden
'
,
false
);
}
//this method will display the emotion correlations in a table
function
displayEmotionCorrelations
(
correlations
)
{
let
htmlString
=
""
;
//create the html content for the activity correlation table
for
(
let
i
=
0
;
i
<
correlations
.
length
;
i
++
)
{
let
corr
=
correlations
[
i
];
let
indices
=
corr
.
index
;
let
value
=
corr
.
value
;
value
=
Math
.
round
(
value
*
100
,
1
);
if
(
value
<=
100
&&
value
>
80
)
{
htmlString
+=
"
<tr class='bg-success text-white'>
"
;
}
else
if
(
value
<=
80
&&
value
>
60
)
{
htmlString
+=
"
<tr class='bg-primary text-white'>
"
;
}
else
if
(
value
<=
60
&&
value
>
40
)
{
htmlString
+=
"
<tr class='bg-warning text-white'>
"
;
}
else
if
(
value
<=
40
&&
value
>
20
)
{
htmlString
+=
"
<tr class='bg-danger text-white'>
"
;
}
else
if
(
value
<=
20
&&
value
>
0
)
{
htmlString
+=
"
<tr class='bg-dark text-white'>
"
;
}
//create a
<
tr
>
to
be
inserted
htmlString
+=
"
<td>
"
;
htmlString
+=
indices
[
0
];
htmlString
+=
"
</td>
"
;
htmlString
+=
"
<td>
"
;
htmlString
+=
indices
[
1
];
htmlString
+=
"
</td>
"
;
htmlString
+=
"
<td>
"
;
htmlString
+=
value
;
htmlString
+=
"
</td>
"
;
htmlString
+=
"
</tr>
"
;
}
//append to the
<
tbody
>
$
(
'
#emotion_corr_tbody
'
).
append
(
htmlString
);
//hide the loader
$
(
'
#emotion_corr_loader
'
).
hide
();
//show the table
$
(
'
#emotion_corr_table
'
).
attr
(
'
hidden
'
,
false
);
}
//this method will display the activity correlations in a table
function
displayGazeCorrelations
(
correlations
)
{
let
htmlString
=
""
;
//create the html content for the activity correlation table
for
(
let
i
=
0
;
i
<
correlations
.
length
;
i
++
)
{
let
corr
=
correlations
[
i
];
let
indices
=
corr
.
index
;
let
value
=
corr
.
value
;
value
=
Math
.
round
(
value
*
100
,
1
);
if
(
value
<=
100
&&
value
>
80
)
{
htmlString
+=
"
<tr class='bg-success text-white'>
"
;
}
else
if
(
value
<=
80
&&
value
>
60
)
{
htmlString
+=
"
<tr class='bg-primary text-white'>
"
;
}
else
if
(
value
<=
60
&&
value
>
40
)
{
htmlString
+=
"
<tr class='bg-warning text-white'>
"
;
}
else
if
(
value
<=
40
&&
value
>
20
)
{
htmlString
+=
"
<tr class='bg-danger text-white'>
"
;
}
else
if
(
value
<=
20
&&
value
>
0
)
{
htmlString
+=
"
<tr class='bg-dark text-white'>
"
;
}
//create a
<
tr
>
to
be
inserted
htmlString
+=
"
<td>
"
;
htmlString
+=
indices
[
0
];
htmlString
+=
"
</td>
"
;
htmlString
+=
"
<td>
"
;
htmlString
+=
indices
[
1
];
htmlString
+=
"
</td>
"
;
htmlString
+=
"
<td>
"
;
htmlString
+=
value
;
htmlString
+=
"
</td>
"
;
htmlString
+=
"
</tr>
"
;
}
//append to the
<
tbody
>
$
(
'
#gaze_corr_tbody
'
).
append
(
htmlString
);
//hide the loader
$
(
'
#gaze_corr_loader
'
).
hide
();
//show the table
$
(
'
#gaze_corr_table
'
).
attr
(
'
hidden
'
,
false
);
}
});
</script>
...
...
@@ -1189,6 +1411,13 @@
</button>
</div>
<!-- end of stats button -->
<!-- button to view advanced analysis -->
<button
type=
"button"
class=
"btn btn-danger float-right mr-2"
id=
"activity_advanced_btn"
>
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div>
</div>
<!-- end of Activity card -->
...
...
@@ -1264,6 +1493,14 @@
</button>
</div>
<!-- end of stats button -->
<!-- button to view advanced analysis -->
<button
type=
"button"
class=
"btn btn-danger float-right mr-2"
id=
"emotion_advanced_btn"
>
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div>
</div>
...
...
@@ -1333,6 +1570,14 @@
</button>
</div>
<!-- end of stats button -->
<!-- button to view advanced analysis -->
<button
type=
"button"
class=
"btn btn-danger float-right mr-2"
id=
"gaze_advanced_btn"
>
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div>
</div>
...
...
@@ -1400,10 +1645,13 @@
<hr>
<!-- button to view activity summary -->
<button
type=
"button"
class=
"btn btn-primary float-right"
id=
"activity_summary_btn"
>
<button
type=
"button"
class=
"btn btn-primary float-right"
id=
"activity_summary_btn"
>
Summary
</button>
<!-- end of button to view activity summary -->
</li>
<!-- end of the activity list item -->
...
...
@@ -1469,10 +1717,13 @@
<hr>
<!-- button to view emotion summary -->
<button
type=
"button"
class=
"btn btn-primary float-right"
id=
"emotion_summary_btn"
>
<button
type=
"button"
class=
"btn btn-primary float-right"
id=
"emotion_summary_btn"
>
Summary
</button>
<!-- end of button to view emotion summary -->
</li>
<!-- end of the emotion list item -->
...
...
@@ -1513,7 +1764,8 @@
<span
class=
"float-right"
id=
"looking_down_right_perct"
>
50%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-success"
role=
"progressbar"
id=
"looking_down_right_width"
<div
class=
"progress-bar bg-success"
role=
"progressbar"
id=
"looking_down_right_width"
style=
"width: 60%"
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1544,7 +1796,8 @@
<!-- button to view gaze summary -->
<button
type=
"button"
class=
"btn btn-primary float-right"
id=
"gaze_summary_btn"
>
<button
type=
"button"
class=
"btn btn-primary float-right"
id=
"gaze_summary_btn"
>
Summary
</button>
<!-- end of button to view gaze summary -->
...
...
@@ -1554,7 +1807,6 @@
<!-- end of the gaze list item -->
</ul>
...
...
@@ -1884,7 +2136,8 @@
</div>
<div
class=
"custom-control custom-radio mt-2"
>
<input
type=
"radio"
class=
"custom-control-input"
id=
"customRadio3"
name=
"option"
value=
"10000"
>
<input
type=
"radio"
class=
"custom-control-input"
id=
"customRadio3"
name=
"option"
value=
"10000"
>
<label
class=
"custom-control-label"
for=
"customRadio3"
>
All
</label>
</div>
...
...
@@ -1955,7 +2208,8 @@
<!-- gaze estimation Modal-->
<div
class=
"modal fade"
id=
"gaze_estimation_stats_modal"
tabindex=
"-1"
role=
"dialog"
aria-labelledby=
"exampleModalLabel"
<div
class=
"modal fade"
id=
"gaze_estimation_stats_modal"
tabindex=
"-1"
role=
"dialog"
aria-labelledby=
"exampleModalLabel"
aria-hidden=
"true"
>
<div
class=
"modal-dialog"
role=
"document"
style=
"max-width: 1400px"
>
<div
class=
"modal-content"
>
...
...
@@ -1977,6 +2231,139 @@
<!-- end of activity statistics modal -->
<!-- activity advanced analysis modal -->
<div
class=
"modal fade"
id=
"activity_advanced_modal"
tabindex=
"-1"
role=
"dialog"
aria-labelledby=
"exampleModalLabel"
aria-hidden=
"true"
>
<div
class=
"modal-dialog"
role=
"document"
style=
"max-width: 700px"
>
<div
class=
"modal-content"
>
<div
class=
"modal-header"
>
<h5
class=
"modal-title"
id=
"exampleModalLabel"
>
Activity Advanced Analysis
</h5>
<button
class=
"close"
type=
"button"
data-dismiss=
"modal"
aria-label=
"Close"
>
<span
aria-hidden=
"true"
>
×
</span>
</button>
</div>
<div
class=
"modal-body text-center"
>
<h3
class=
"font-weight-bold"
>
Student Activity VS. Lecturer Activity
</h3>
<!-- ajax loader -->
<div
class=
"text-center"
id=
"activity_corr_loader"
hidden
>
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
alt=
"Loader"
>
</div>
<!-- correlation table -->
<table
class=
"table table-striped"
id=
"activity_corr_table"
hidden
>
<thead>
<tr>
<th>
Student Activity
</th>
<th>
Lecturer Activity
</th>
<th>
Correlation Score
</th>
</tr>
</thead>
<tbody
id=
"activity_corr_tbody"
>
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div
class=
"modal-footer"
>
<button
class=
"btn btn-secondary"
type=
"button"
data-dismiss=
"modal"
>
Cancel
</button>
</div>
</div>
</div>
</div>
<!-- end of activity advanced analysis modal -->
<!-- emotion advanced analysis modal -->
<div
class=
"modal fade"
id=
"emotion_advanced_modal"
tabindex=
"-1"
role=
"dialog"
aria-labelledby=
"exampleModalLabel"
aria-hidden=
"true"
>
<div
class=
"modal-dialog"
role=
"document"
style=
"max-width: 700px"
>
<div
class=
"modal-content"
>
<div
class=
"modal-header"
>
<h5
class=
"modal-title"
id=
"exampleModalLabel"
>
Emotion Advanced Analysis
</h5>
<button
class=
"close"
type=
"button"
data-dismiss=
"modal"
aria-label=
"Close"
>
<span
aria-hidden=
"true"
>
×
</span>
</button>
</div>
<div
class=
"modal-body text-center"
>
<h3
class=
"font-weight-bold"
>
Student Emotions VS. Lecturer Activity
</h3>
<!-- ajax loader -->
<div
class=
"text-center"
id=
"emotion_corr_loader"
hidden
>
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
alt=
"Loader"
>
</div>
<!-- correlation table -->
<table
class=
"table table-striped"
id=
"emotion_corr_table"
hidden
>
<thead>
<tr>
<th>
Student Emotion
</th>
<th>
Lecturer Activity
</th>
<th>
Correlation Score
</th>
</tr>
</thead>
<tbody
id=
"emotion_corr_tbody"
>
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div
class=
"modal-footer"
>
<button
class=
"btn btn-secondary"
type=
"button"
data-dismiss=
"modal"
>
Cancel
</button>
</div>
</div>
</div>
</div>
<!-- end of emotion advanced analysis modal -->
<!-- gaze advanced analysis modal -->
<div
class=
"modal fade"
id=
"gaze_advanced_modal"
tabindex=
"-1"
role=
"dialog"
aria-labelledby=
"exampleModalLabel"
aria-hidden=
"true"
>
<div
class=
"modal-dialog"
role=
"document"
style=
"max-width: 700px"
>
<div
class=
"modal-content"
>
<div
class=
"modal-header"
>
<h5
class=
"modal-title"
id=
"exampleModalLabel"
>
Gaze Advanced Analysis
</h5>
<button
class=
"close"
type=
"button"
data-dismiss=
"modal"
aria-label=
"Close"
>
<span
aria-hidden=
"true"
>
×
</span>
</button>
</div>
<div
class=
"modal-body text-center"
>
<h3
class=
"font-weight-bold"
>
Student Gaze estimation VS. Lecturer Activity
</h3>
<!-- ajax loader -->
<div
class=
"text-center"
id=
"gaze_corr_loader"
hidden
>
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
alt=
"Loader"
>
</div>
<!-- correlation table -->
<table
class=
"table table-striped"
id=
"gaze_corr_table"
hidden
>
<thead>
<tr>
<th>
Student Gaze estimation
</th>
<th>
Lecturer Activity
</th>
<th>
Correlation Score
</th>
</tr>
</thead>
<tbody
id=
"gaze_corr_tbody"
>
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div
class=
"modal-footer"
>
<button
class=
"btn btn-secondary"
type=
"button"
data-dismiss=
"modal"
>
Cancel
</button>
</div>
</div>
</div>
</div>
<!-- end of gaze advanced analysis modal -->
{% endblock %}
<!--scripts-->
{% block 'scripts' %}
...
...
FirstApp/templates/FirstApp/activity.html
View file @
d8f6824a
...
...
@@ -30,7 +30,11 @@
var
global_video_name
=
''
;
var
global_lecturer_subject_index
=
0
;
var
global_lecture_date
=
''
;
var
global_lecturer_video_name
=
''
;
var
lecturer_fps
=
0
;
;
//jquery
$
(
document
).
ready
(
function
()
{
...
...
@@ -147,7 +151,6 @@
global_video_name
=
video
.
video_name
;
if
(
lectureVideo
.
isActivityFound
)
{
e
.
target
.
parentNode
.
parentNode
.
lastChild
.
innerHTML
=
'
<button type="button" class="btn btn-primary" id="result_btn">Results</button>
'
;
}
else
{
...
...
@@ -174,13 +177,14 @@
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity/?lecture_video_id=
'
+
global_lecture_video_id
+
'
&lecture_video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
{
let
frames
=
createFrames
(
out
);
return
frames
{
#
let
frames
=
createFrames
(
out
);
#
}
{
#
return
frames
#
}
displayActivity
(
out
);
})
.
then
((
obj
)
=>
{
$
(
'
#video_frames
'
).
prepend
(
obj
);
{
#
$
(
'
#video_frames
'
).
prepend
(
obj
);
#
}
$
(
'
#frame_loader
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#slidecontainer
'
).
attr
(
'
hidden
'
,
false
);
{
#
$
(
'
#slidecontainer
'
).
attr
(
'
hidden
'
,
false
);
#
}
})
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
));
});
...
...
@@ -234,580 +238,163 @@
}
//this section is responsible for displaying the frames as video
//creating the frame content
function
createFrames
(
res
)
{
let
main_frame_content
=
"
<div class='row' id='main_frames'>
"
;
main_frame_content
+=
"
<ul class='list-group list-group-horizontal'>
"
;
let
count
=
0
;
//loop through the frames
res
.
extracted
.
map
((
image
)
=>
{
let
img_src
=
""
;
let
len
=
image
.
detections
.
length
;
if
(
count
===
0
)
{
main_frame_content
+=
"
<li class='list-group-item text-center' id='image_0'>
"
;
img_src
=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
res
.
extracted
[
0
].
frame
+
"
/
"
+
res
.
extracted
[
0
].
detections
[
0
]
+
"
' width='400' height='400'>
"
;
}
else
{
main_frame_content
+=
"
<li class='list-group-item other-frames' id='image_
"
+
count
+
"
' hidden>
"
;
img_src
=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
image
.
frame
+
"
/
"
+
image
.
detections
[
len
-
1
]
+
"
' class='img-link' width='400' height='400'>
"
;
}
main_frame_content
+=
img_src
;
main_frame_content
+=
"
</li>
"
;
count
++
;
});
main_frame_content
+=
"
</ul>
"
;
main_frame_content
+=
"
</div>
"
;
//setting the min, max values of the slider
$
(
'
#myActivityRange
'
).
attr
({
'
min
'
:
0
,
'
max
'
:
count
});
//display the progress bars
displayActivity
(
res
);
return
main_frame_content
;
}
//declaring the variable for setInterval function
let
timeVar
=
null
;
//handling the play button
$
(
'
#play_pause_icon_activity
'
).
click
(
function
()
{
//defining the two possible classes
let
play_class
=
"
fas fa-play
"
;
let
pause_class
=
"
fas fa-pause
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//handling the slider
let
slider
=
document
.
getElementById
(
"
myActivityRange
"
);
let
output
=
document
.
getElementById
(
"
demo
"
);
//when the button is playing
if
(
current_class
===
play_class
)
{
timeVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_
'
+
Number
(
value
);
//displaying the relevant image
$
(
'
#image_0
'
).
html
(
$
(
selectedImage
).
html
());
},
50
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
timeVar
);
}
});
//handling the slider
let
slider
=
document
.
getElementById
(
"
myActivityRange
"
);
let
output
=
document
.
getElementById
(
"
demo
"
);
output
.
innerHTML
=
slider
.
value
;
slider
.
oninput
=
function
()
{
output
.
innerHTML
=
this
.
value
;
let
selectedImage
=
'
#image_
'
+
Number
(
this
.
value
);
//hide
{
#
$
(
'
#image_0
'
).
attr
(
'
hidden
'
,
true
);
#
}
$
(
'
#image_0
'
).
html
(
$
(
selectedImage
).
html
());
//setting the selected image
{
#
$
(
selectedImage
).
attr
(
'
hidden
'
,
false
);
#
}
};
//to handle the 'integrate' modal
$
(
'
#integrate_activity
'
).
click
(
function
()
{
//define the student video src
let
video_src
=
"
{% static '' %}FirstApp/videos/
"
+
global_video_name
;
$
(
document
).
on
(
'
click
'
,
'
.img-link
'
,
function
(
e
)
{
//assign the video src
$
(
'
#student_video
'
).
attr
(
'
src
'
,
video_src
);
//removing previously displayed detections
$
(
'
.detections
'
).
remove
();
//fetch the lecture recorded video name
fetch
(
'
http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=
'
+
global_lecturer
+
'
&subject=
'
+
global_subject
+
'
&date=
'
+
global_lecture_date
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
assignLecturerRecordedVideoName
(
out
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
//removing the no-content message
$
(
'
#no_detection_message_content
'
).
hide
();
{
#
global_lecturer_video_name
=
"
Test_1.mp4
"
;
#
}
{
#
global_lecturer_video_name
=
"
Test_2.mp4
"
;
#
}
{
#
global_lecturer_video_name
=
"
Test_3.mp4
"
;
#
}
//appearing the loader
$
(
'
#detection_loader
'
).
attr
(
'
hidden
'
,
false
);
let
img_src_arr
=
e
.
target
.
src
.
split
(
'
/
'
);
let
len
=
img_src_arr
.
length
;
let
src
=
img_src_arr
[
len
-
1
];
let
frame_name_arr
=
src
.
split
(
'
.
'
);
let
frame_name
=
frame_name_arr
[
0
];
//fetch
ing the detection for the selected frame
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-f
rame-detection/?video_name=
'
+
global_video_name
+
"
&frame_name=
"
+
frame
_name
)
//fetch
data from the API
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-f
or-frame?video_name=
'
+
global_video
_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
display
Detections
(
out
.
detections
,
frame_nam
e
))
.
catch
((
err
or
)
=>
alert
(
'
this is an error
'
));
.
then
((
out
)
=>
display
ActivityRecognitionForFrame
(
out
.
respons
e
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
//the function to display detections
function
displayDetections
(
detections
,
frame_name
)
{
let
img_string
=
''
;
let
no_of_detections
=
detections
.
length
;
//assign the lecturer recorded video name
function
assignLecturerRecordedVideoName
(
res
)
{
//disabling the loader
$
(
'
#detection_loader
'
).
attr
(
'
hidden
'
,
true
);
global_lecturer_video_name
=
res
.
video_name
;
//appearing the no of detections number area
$
(
'
#detection_number_area
'
).
attr
(
'
hidden
'
,
false
);
$
(
'
#no_of_detections
'
).
text
(
no_of_detections
);
//define the lecturer video src
let
lecturer_video_src
=
"
{% static '' %}FirstApp/lecturer_videos/
"
+
global_lecturer_video_name
;
detections
.
map
((
detection
)
=>
{
img_string
+=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
frame_name
+
"
/
"
+
detection
+
"
' class='detections m-2' width='100' height='100' >
"
});
$
(
'
#detection_frames
'
).
prepend
(
img_string
);
//assign the video src
$
(
'
#lecturer_video
'
).
attr
(
'
src
'
,
lecturer_video_src
);
$
(
'
#integrate_modal
'
).
modal
();
}
//this function will load the activity recognition for frames
function
displayActivityRecognitionForFrame
(
response
)
{
//hide the loader
$
(
'
#student_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
//show the progress bars
$
(
'
#student_video_progress
'
).
attr
(
'
hidden
'
,
false
);
//listening for click events in labels
$
(
'
.labels
'
).
click
(
function
()
{
let
label
=
Number
(
$
(
this
).
attr
(
'
data-number
'
));
let
label_name
=
$
(
this
).
attr
(
'
data-label
'
);
//removing the previous student detection lists
$
(
'
.student_detection_lists
'
).
remove
();
//appearing the loader
$
(
'
#detection_student_loader
'
).
attr
(
'
hidden
'
,
false
);
//appearing the loader
$
(
'
#activity_type
'
).
attr
(
'
hidden
'
,
false
);
$
(
'
#activity_type_text
'
).
text
(
label_name
);
//disappearing the no content message
$
(
'
#no_detection_student_content
'
).
attr
(
'
hidden
'
,
true
);
//fetching from the api
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=
'
+
global_video_name
+
'
&label=
'
+
label
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
createDetectedStudentFrames
(
out
))
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
))
});
//creating the detected students frames
function
createDetectedStudentFrames
(
detections
)
{
//creating the html string
let
htmlString
=
""
;
//iterating through the student
detections
.
people
.
map
((
student
)
=>
{
let
title
=
student
.
split
(
'
.
'
)[
0
];
let
images
=
""
;
htmlString
+=
"
<div class='row p-3 student-detection-rows'>
"
;
let
student_count
=
0
;
//iterating through the frames
detections
.
response
.
map
((
frame
)
=>
{
let
frame_detections
=
frame
.
detections
;
if
(
frame_detections
.
includes
(
student
))
{
if
(
student_count
===
0
)
{
images
+=
"
<li class='list-group-item frame-0' id='image_0_
"
+
title
+
"
'>
"
;
}
else
{
images
+=
"
<li class='list-group-item other-student-frames' id='image_
"
+
student_count
+
"
_
"
+
title
+
"
' hidden>
"
;
}
images
+=
"
<img src='{% static '' %}FirstApp/Activity/
"
+
global_video_name
+
"
/
"
+
frame
.
frame
+
"
/
"
+
student
+
"
' width='200' height='200'>
"
;
images
+=
"
</li>
"
;
//increment the student count
student_count
++
;
}
});
htmlString
+=
"
<ul class='list-group'>
"
;
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='row m-3'>
"
;
htmlString
+=
"
<h4 class='font-weight-bold'>Student ID: <span>
"
+
title
+
"
</span></h4>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
{
#
htmlString
+=
"
<div class='row m-3'></div>
"
;
#
}
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>
"
;
htmlString
+=
images
;
htmlString
+=
"
</ul>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='slidecontainer'>
"
;
htmlString
+=
"
<div class='row m-3'></div>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_
"
+
title
+
"
'></i></span>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
<input type='range' min='1' max='100' value='0' class='slider' id='slider_
"
+
title
+
"
'>
"
;
htmlString
+=
"
<p>No of frames: <span id='demo_
"
+
title
+
"
'></span></p>
"
;
htmlString
+=
"
</div>
"
;
//creating the html string, iteratively
response
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
phone_perct
=
Math
.
round
(
frame
.
phone_perct
,
0
);
let
listen_perct
=
Math
.
round
(
frame
.
listen_perct
,
0
);
{
#
let
listen_perct
=
Math
.
round
(
frame
.
listening_perct
,
0
);
#
}
let
note_perct
=
Math
.
round
(
frame
.
note_perct
,
0
);
//append to the html string
//phone checking
htmlString
+=
"
<div class='progress_area' id='progress_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Phone checking</h4>
"
;
htmlString
+=
"
<span class='float-right' id='phone_checking_instant_
"
+
frame_name
+
"
'>
"
+
phone_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
phone_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
htmlString
+=
"
</ul>
"
;
});
//disappearing the loader
$
(
'
#detection_student_loader
'
).
attr
(
'
hidden
'
,
true
);
//append to the relevant html card content
$
(
'
#detection_students
'
).
append
(
htmlString
);
}
let
studentTimeVar
=
null
;
//playing the frames for each student detection
$
(
document
).
on
(
'
click
'
,
'
.play-pause-icon-student-frames
'
,
function
(
e
)
{
//defining the two possible classes
let
play_class
=
"
fas fa-play play-pause-icon-student-frames
"
;
let
pause_class
=
"
fas fa-pause play-pause-icon-student-frames
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//extracting the title pf the clicked icon
let
title_part
=
$
(
this
).
attr
(
'
id
'
);
let
title
=
title_part
.
split
(
"
_
"
)[
1
];
//handling the slider
let
slider
=
document
.
getElementById
(
"
slider_
"
+
title
);
let
output
=
document
.
getElementById
(
"
demo_
"
+
title
);
//when the button is playing
if
(
current_class
===
play_class
)
{
studentTimeVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_
'
+
Number
(
value
)
+
'
_
'
+
title
;
//displaying the relevant image
$
(
'
#image_0_
'
+
title
).
html
(
$
(
selectedImage
).
html
());
},
100
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
studentTimeVar
);
}
});
//this is to handle the 'evaluate' button
$
(
'
#evaluate_button
'
).
click
(
function
()
{
//hide the message
$
(
'
#no_evaluated_student_content
'
).
attr
(
'
hidden
'
,
true
);
//show the loader
$
(
'
#evaluate_student_loader
'
).
attr
(
'
hidden
'
,
false
);
//using the fetch api
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-student-evaluation/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
evaluate_student
(
out
))
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
))
});
//to create html for evaluate function
function
evaluate_student
(
response
)
{
let
htmlString
=
""
;
//iterating through the student
response
.
people
.
map
((
student
)
=>
{
let
title
=
student
.
split
(
'
.
'
)[
0
];
let
images
=
""
;
htmlString
+=
"
<div class='row p-3 student-evaluation-rows'>
"
;
let
student_count
=
0
;
//iterating through the frames
response
.
response
.
map
((
frame
)
=>
{
let
frame_detections
=
frame
.
detections
;
let
frame_detection_length
=
frame_detections
.
length
;
if
(
frame_detections
.
includes
(
student
))
{
if
(
student_count
===
0
)
{
images
+=
"
<li class='list-group-item frame-0' id='image_0_evaluation
"
+
title
+
"
'>
"
;
}
else
{
images
+=
"
<li class='list-group-item other-student-frames' id='image_evaluation
"
+
student_count
+
"
_
"
+
title
+
"
' hidden>
"
;
}
images
+=
"
<img src='{% static '' %}FirstApp/Activity/
"
+
global_video_name
+
"
/
"
+
frame
.
frame
+
"
/
"
+
student
+
"
' width='200' height='200'>
"
;
images
+=
"
</li>
"
;
if
(
student_count
===
(
frame_detection_length
))
{
images
+=
"
<li class='list-group-item'>
"
;
images
+=
"
<button type='button' class='btn btn-dark individual-evaluation' id='evaluate_student_
"
+
title
+
"
'>evaluate</button>
"
;
images
+=
"
</li>
"
;
}
//increment the student count
student_count
++
;
}
});
htmlString
+=
"
<ul class='list-group'>
"
;
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='row m-3'>
"
;
htmlString
+=
"
<h4 class='font-weight-bold'>Student ID: <span>
"
+
title
+
"
</span></h4>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
{
#
htmlString
+=
"
<div class='row m-3'></div>
"
;
#
}
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>
"
;
htmlString
+=
images
;
htmlString
+=
"
</ul>
"
;
//note taking
htmlString
+=
"
<h4 class='small font-weight-bold'>Writing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='note_taking_instant_
"
+
frame_name
+
"
'>
"
+
note_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='note_taking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
note_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='slidecontainer'>
"
;
htmlString
+=
"
<div class='row m-3'></div>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_
"
+
title
+
"
'></i></span>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation
"
+
title
+
"
'>
"
;
htmlString
+=
"
<p>No of frames: <span id='demo_evaluation
"
+
title
+
"
'></span></p>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
htmlString
+=
"
</ul>
"
;
});
//disappearing the loader
$
(
'
#evaluate_student_loader
'
).
attr
(
'
hidden
'
,
true
);
//append to the relevant html card content
$
(
'
#evaluation_students
'
).
append
(
htmlString
);
}
//interval variable for individual students
let
studentEvaluationVar
=
null
;
//playing the frames for each student evaluation
$
(
document
).
on
(
'
click
'
,
'
.play-pause-icon-student-evaluations
'
,
function
(
e
)
{
//defining the two possible classes
let
play_class
=
"
fas fa-play play-pause-icon-student-evaluations
"
;
let
pause_class
=
"
fas fa-pause play-pause-icon-student-evaluations
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//extracting the title pf the clicked icon
let
title_part
=
$
(
this
).
attr
(
'
id
'
);
let
title
=
title_part
.
split
(
"
_
"
)[
1
];
//handling the slider
let
slider
=
document
.
getElementById
(
"
slider_evaluation
"
+
title
);
let
output
=
document
.
getElementById
(
"
demo_evaluation
"
+
title
);
//when the button is playing
if
(
current_class
===
play_class
)
{
studentEvaluationVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_evaluation
'
+
Number
(
value
)
+
'
_
'
+
title
;
//displaying the relevant image
$
(
'
#image_0_evaluation
'
+
title
).
html
(
$
(
selectedImage
).
html
());
},
100
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
studentEvaluationVar
);
}
});
//end of student evaluation video frame
//listening
htmlString
+=
"
<h4 class='small font-weight-bold'>Listening</h4>
"
;
htmlString
+=
"
<span class='float-right' id='listening_instant_
"
+
frame_name
+
"
'>
"
+
listen_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
listen_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//to evaluate the individual student
$
(
document
).
on
(
'
click
'
,
'
.individual-evaluation
'
,
function
(
e
)
{
//ending the progress area
htmlString
+=
"
</div>
"
;
let
individual_id
=
$
(
this
).
attr
(
'
id
'
);
let
student_name
=
individual_id
.
split
(
'
_
'
)[
2
];
student_name
+=
"
.png
"
;
let
html
=
$
(
this
).
html
();
});
//a
fter clicking, change
the html
$
(
this
).
html
(
"
<span class='font-italic'>loading...</span>
"
);
//a
ppend
the html
$
(
'
#student_video_column
'
).
append
(
htmlString
);
//
fetching from the API
fetch
(
'
http://127.0.0.1:8000/
get-lecture-activity-individual-student-evaluation/?video_name=
'
+
global_video_name
+
'
&student_name=
'
+
student
_name
)
//
start retrieving lecturer activity frame recognition
fetch
(
'
http://127.0.0.1:8000/
lecturer/get-lecturer-video-frame-recognitions/?video_name=
'
+
global_lecturer_video
_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayIndividualStudentActivity
(
out
.
response
,
e
,
student_name
))
.
catch
((
error
)
=>
alert
(
'
something went wrong
'
));
});
//this function will display the individual student emotions
function
displayIndividualStudentActivity
(
res
,
e
,
title
)
{
let
phone_perct
=
Math
.
round
(
res
.
phone_perct
,
1
);
let
writing_perct
=
Math
.
round
(
res
.
writing_perct
,
1
);
let
listening_perct
=
Math
.
round
(
res
.
listening_perct
,
1
);
//set the percentage values
//$('#talking_individual_perct').text(res.talking_perct + '%');
$
(
'
#phone_individual_perct
'
).
text
(
phone_perct
+
'
%
'
);
$
(
'
#writing_individual_perct
'
).
text
(
writing_perct
+
'
%
'
);
$
(
'
#listening_individual_perct
'
).
text
(
listening_perct
+
'
%
'
);
//set the width
//$('#talking_individual_width').width(res.talking_perct + '%');
$
(
'
#phone_individual_width
'
).
width
(
phone_perct
+
'
%
'
);
$
(
'
#writing_individual_width
'
).
width
(
writing_perct
+
'
%
'
);
$
(
'
#listening_individual_width
'
).
width
(
listening_perct
+
'
%
'
);
//open the student individual modal
$
(
'
#student_individual_modal
'
).
modal
();
//set the button to default
e
.
target
.
innerHTML
=
"
<span>evaluate</span>
"
;
.
then
((
out
)
=>
displayLecturerActivityRecognitionForFrame
(
out
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
))
}
//to handle the 'integrate' modal
$
(
'
#integrate_gaze
'
).
click
(
function
()
{
//define the student video src
let
video_src
=
"
{% static '' %}FirstApp/videos/
"
+
global_video_name
;
//assign the video src
$
(
'
#student_video
'
).
attr
(
'
src
'
,
video_src
);
$
(
'
#integrate_modal
'
).
modal
();
//fetch data from the API
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-for-frame?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayActivityRecognitionForFrame
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
//this function will load the activity recognition for frames
function
displayActivityRecognitionForFrame
(
response
)
{
function
display
Lecturer
ActivityRecognitionForFrame
(
response
)
{
//hide the loader
$
(
'
#
student
_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#
lecturer
_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
//show the progress bars
$
(
'
#
student
_video_progress
'
).
attr
(
'
hidden
'
,
false
);
$
(
'
#
lecturer
_video_progress
'
).
attr
(
'
hidden
'
,
false
);
//creating the html string
let
htmlString
=
""
;
let
duration
=
1000
/
response
.
fps
;
lecturer_fps
=
Math
.
round
(
duration
,
0
);
console
.
log
(
'
lecturer fps:
'
,
lecturer_fps
);
//creating the html string, iteratively
response
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
phone_perct
=
Math
.
round
(
frame
.
phone
_perct
,
0
);
let
listen_perct
=
Math
.
round
(
frame
.
listen
_perct
,
0
);
{
#
let
listen_perct
=
Math
.
round
(
frame
.
listening_perct
,
0
);
#
}
let
note_perct
=
Math
.
round
(
frame
.
note
_perct
,
0
);
//append to the html string
//phone check
ing
htmlString
+=
"
<div class='progress_area' id='progress_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Phone check
ing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='phone_checking_instant_
"
+
frame_name
+
"
'>
"
+
phone_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
phone_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//note tak
ing
htmlString
+=
"
<h4 class='small font-weight-bold'>Writ
ing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='note_taking_instant_
"
+
frame_name
+
"
'>
"
+
note_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='note_taking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
note_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//listen
ing
htmlString
+=
"
<h4 class='small font-weight-bold'>Listen
ing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='listening_instant_
"
+
frame_name
+
"
'>
"
+
listen_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
listen_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
response
.
frame_recognitions
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
sitting_perct
=
Math
.
round
(
frame
.
sitting
_perct
,
0
);
let
standing_perct
=
Math
.
round
(
frame
.
standing
_perct
,
0
);
{
#
let
listen_perct
=
Math
.
round
(
frame
.
listening_perct
,
0
);
#
}
let
walking_perct
=
Math
.
round
(
frame
.
walking
_perct
,
0
);
//append to the html string
//sitt
ing
htmlString
+=
"
<div class='progress_area' id='progress_lecturer_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Sitt
ing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='sitting_instant_
"
+
frame_name
+
"
'>
"
+
sitting_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
sitting_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//stand
ing
htmlString
+=
"
<h4 class='small font-weight-bold'>Stand
ing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='standing_instant_
"
+
frame_name
+
"
'>
"
+
standing_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='standing_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
standing_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//walk
ing
htmlString
+=
"
<h4 class='small font-weight-bold'>Walk
ing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='walking_instant_
"
+
frame_name
+
"
'>
"
+
walking_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
walking_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
});
//append the html
$
(
'
#student_video_column
'
).
append
(
htmlString
);
$
(
'
#lecturer_video_column
'
).
append
(
htmlString
);
}
...
...
@@ -815,19 +402,19 @@
//to handle the 'integrate' play button
$
(
'
#play_integrate_button
'
).
click
(
function
()
{
let
video
=
$
(
'
video
'
)[
0
];
let
video1
=
$
(
'
video
'
)[
1
];
let
test_video
=
document
.
getElementsByTagName
(
'
video
'
)[
0
];
let
play_class
=
'
btn btn-outline-danger play
'
;
let
pause_class
=
'
btn btn-outline-danger pause
'
;
let
count
=
0
;
let
count_lecturer
=
0
;
let
classes
=
$
(
this
).
attr
(
'
class
'
);
let
video_interval
=
setInterval
(()
=>
{
let
talking_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
let
phone_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
let
note_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
let
listening_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
//=====STUDENTS COLUMN=====
//get the relevant progress area
let
progress_area
=
"
progress_frame-
"
+
count
;
...
...
@@ -842,35 +429,53 @@
//replace the current progress area with the selected one
$
(
'
#student_video_progress
'
).
html
(
progress_area_html
);
//increment the count
count
++
;
//setting the values
/*
$('#talking_instant').text(talking_number + '%');
$('#phone_checking_instant').text(phone_number + '%');
$('#note_taking_instant').text(note_number + '%');
$('#listening_instant').text(listening_number + '%');
console
.
log
(
'
current frame (student):
'
,
count
);
//setting the width
$('#talking_instant_value').width(talking_number + '%');
$('#phone_checking_instant_value').width(phone_number + '%');
$('#note_taking_instant_value').width(note_number + '%');
$('#listening_instant_value').width(listening_number + '%');
*/
},
33
);
let
video_interval_lecturer
=
setInterval
(()
=>
{
//=====LECTURER COLUMN=====
//get the relevant progress area
let
progress_area_lecturer
=
"
progress_lecturer_frame-
"
+
count_lecturer
;
let
progress_area_id_lecturer
=
"
#
"
+
progress_area_lecturer
;
//find the corresponding progress area
let
progress_area_html_lecturer
=
document
.
getElementById
(
progress_area_lecturer
);
//display the retrieved progress area
$
(
progress_area_id_lecturer
).
attr
(
'
hidden
'
,
false
);
//replace the current progress area with the selected one
$
(
'
#lecturer_video_progress
'
).
html
(
progress_area_html_lecturer
);
//increment the count
count_lecturer
++
;
console
.
log
(
'
current frame (lecturer):
'
,
count_lecturer
);
},
lecturer_fps
);
//check for the current class
if
(
classes
===
play_class
)
{
$
(
this
).
text
(
'
Pause
'
);
$
(
this
).
attr
(
'
class
'
,
pause_class
);
video
.
play
();
video1
.
play
();
}
else
if
(
classes
===
pause_class
)
{
$
(
this
).
text
(
'
Play
'
);
$
(
this
).
attr
(
'
class
'
,
play_class
);
video
.
pause
();
video1
.
pause
();
}
//function to do when the video is paused
...
...
@@ -880,7 +485,13 @@
video
.
onended
=
function
(
e
)
{
//stop changing the activity values
clearInterval
(
video_interval
);
}
};
//function to do when the lecturer video is ended
video1
.
onended
=
function
(
e
)
{
//stop changing the activity values
clearInterval
(
video_interval_lecturer
);
};
});
...
...
@@ -893,22 +504,20 @@
$
(
'
#generate_report_message
'
).
hide
();
fetch
(
'
http://127.0.0.1:8000/lecture-activity-report-generation/?lecturer=
'
+
global_lecturer
+
'
&subject=
'
+
global_subject
+
'
&date=
'
+
global_lecture_date
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
{
//show the loader and loading message
$
(
'
#generate_report_loader
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#generate_report_loading_message
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#generateReportModal
'
).
modal
(
'
hide
'
);
})
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
))
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
{
//show the loader and loading message
$
(
'
#generate_report_loader
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#generate_report_loading_message
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#generateReportModal
'
).
modal
(
'
hide
'
);
})
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
))
});
});
</script>
...
...
@@ -931,11 +540,11 @@
{% load static %}
<!-- Page Heading -->
{#
<div
class=
"d-sm-flex align-items-center justify-content-between mb-4"
>
#}
{#
<h1
class=
"h3 mb-0 text-gray-800"
>
Student Activity Recognition
</h1>
#}
{#
<button
type=
"button"
data-target=
"#generateReportModal"
data-toggle=
"modal"
class=
"d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm"
id=
"generate_report_before"
disabled
><i
#
}
{
#
class=
"fas fa-download fa-sm text-white-50"
></i>
Generate Report
</button>
#}
{#
</div>
#}
<div
class=
"d-sm-flex align-items-center justify-content-between mb-4"
>
<h1
class=
"h3 mb-0 text-gray-800"
>
Student Activity Recognition
</h1>
{#
<button
type=
"button"
data-target=
"#generateReportModal"
data-toggle=
"modal"
class=
"d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm"
id=
"generate_report_before"
disabled
><i
#
}
{
#
class=
"fas fa-download fa-sm text-white-50"
></i>
Generate Report
</button>
#}
</div>
<!--first row -->
...
...
@@ -1084,81 +693,67 @@
alt=
"Loader"
>
</div>
<!--frames -->
.
<div
class=
"text-center p-4"
id=
"video_frames"
>
<!-- slide container -->
<div
id=
"slidecontainer"
hidden
>
<div
class=
"row m-3"
></div>
<!-- play/pause icon -->
<div
class=
"row"
>
<span><i
class=
"fas fa-play"
id=
"play_pause_icon_activity"
></i></span>
</div>
<input
type=
"range"
min=
"1"
max=
"100"
value=
"0"
class=
"slider"
id=
"myActivityRange"
>
<p>
No of frames:
<span
id=
"demo"
></span></p>
<!--this area will display the progress bars -->
<div
class=
"progress_area mt-4"
hidden
>
<!--talking with friends -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"1"
data-label=
"talking-with-friends"
>
<h4
class=
"small font-weight-bold"
>
Talking with friends
</h4>
</a>
<span
class=
"float-right"
id=
"talking_perct"
>
40%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"talking_width"
style=
"width: 20%"
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
</div>
</div>
<!--this area will display the progress bars -->
<div
class=
"progress_area"
hidden
>
<!--talking with friends -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"1"
data-label=
"talking-with-friends"
>
<h4
class=
"small font-weight-bold"
>
Talking with friends
</h4>
</a>
<span
class=
"float-right"
id=
"talking_perct"
>
40%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"talking_width"
style=
"width: 20%"
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--phone checking -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"0"
data-label=
"phone-checking"
>
<h4
class=
"small font-weight-bold"
>
Phone checking
</h4>
</a>
<span
class=
"float-right"
id=
"phone_perct"
>
45%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"phone_width"
style=
"width: 40%"
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--phone chec
king -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"0
"
data-label=
"phone-chec
king"
>
<h4
class=
"small font-weight-bold"
>
Phone check
ing
</h4>
</a>
<span
class=
"float-right"
id=
"phone_perct"
>
45
%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar
"
id=
"phone_width
"
style=
"width: 40%
"
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--note ta
king -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2
"
data-label=
"note-ta
king"
>
<h4
class=
"small font-weight-bold"
>
Writ
ing
</h4>
</a>
<span
class=
"float-right"
id=
"writing_perct"
>
50
%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"writing_width
"
style=
"width: 60%
"
aria-valuenow=
"60"
aria-valuemin=
"0
"
aria-valuemax=
"100"
></div>
</div>
<!--note taking
-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
data-label=
"note-taking"
>
<
h4
class=
"small font-weight-bold"
>
Writing
</h4
>
</a
>
<span
class=
"float-right"
id=
"writing_perct"
>
50%
</span
>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"writing_width
"
style=
"width: 60%
"
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--listening
-->
<a
href=
"#"
class=
"btn btn-link labels"
>
<h4
class=
"small font-weight-bold"
>
Listening
</h4
>
<
/a
>
<span
class=
"float-right"
id=
"listening_perct"
>
60%
</span
>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"listening_width"
style=
"width: 80%
"
aria-valuenow=
"80"
aria-valuemin=
"0
"
aria-valuemax=
"100"
></div>
</div>
<!--listening-->
<a
href=
"#"
class=
"btn btn-link labels"
>
<h4
class=
"small font-weight-bold"
>
Listening
</h4>
</a>
<span
class=
"float-right"
id=
"listening_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"listening_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--evaluate button -->
<button
type=
"button"
class=
"btn btn-danger float-right"
id=
"evaluate_button"
>
Evaluate
</button>
</div>
</div>
<!--graph tab -->
...
...
@@ -1215,66 +810,6 @@
</div>
<!--2nd column -->
{#
<div
class=
"col-lg-6"
>
#}
{#
<!--card content -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header py-3"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Frame Detections
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"text-center p-4"
id=
"detection_frames"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_detection_message_content"
>
#}
{#
<span
class=
"font-italic"
>
No frame is selected
</span>
#}
{#
</div>
#}
{##}
{#
<div
class=
"text-left m-3"
id=
"detection_number_area"
hidden
>
#}
{#
<p>
No of detections:
<span
id=
"no_of_detections"
></span></p>
#}
{#
</div>
#}
{#
<!--the detection loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"detection_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
</div>
#}
{#
</div>
#}
<!--detection person card -->
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header py-3"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Detected Students (by activity#}
{# type)
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"text-center p-4"
id=
"detection_students"
>
#}
{#
<!--activity type line -->
#}
{#
<div
class=
"text-center p-2"
id=
"activity_type"
hidden
>
#}
{#
<p>
Activity Type:
<span
class=
"font-weight-bold"
id=
"activity_type_text"
></span>
#}
{#
</p>
#}
{#
</div>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_detection_student_content"
>
#}
{#
<span
class=
"font-italic"
>
No activity type is selected
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"detection_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{##}
{#
</div>
#}
{#
</div>
#}
{#
</div>
#}
<!--2nd column -->
<div
class=
"col-lg-6"
>
<!--card -->
...
...
@@ -1292,7 +827,7 @@
<!--button -->
<div
class=
"text-right m-4"
>
<button
type=
"button"
class=
"btn btn-outline-success"
id=
"integrate_
gaze
"
>
<button
type=
"button"
class=
"btn btn-outline-success"
id=
"integrate_
activity
"
>
Process
</button>
</div>
...
...
@@ -1310,32 +845,32 @@
<!--1st column -->
<div
class=
"col-lg-6"
>
{#
<!--card -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Evaluated Students
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"card-body"
id=
"evaluation_students"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_evaluated_student_content"
>
#}
{#
<span
class=
"font-italic"
>
Press 'Evaluate' button to evaluate students
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"evaluate_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
<!--end of student detection loader -->
#}
{##}
{##}
{#
</div>
#}
{##}
{#
</div>
#}
{#
<!--card -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Evaluated Students
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"card-body"
id=
"evaluation_students"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_evaluated_student_content"
>
#}
{#
<span
class=
"font-italic"
>
Press 'Evaluate' button to evaluate students
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"evaluate_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
<!--end of student detection loader -->
#}
{##}
{##}
{#
</div>
#}
{##}
{#
</div>
#}
</div>
...
...
@@ -1508,7 +1043,8 @@
</div>
</div>
<div
class=
"modal-footer"
>
<button
type=
"button"
class=
"btn btn-primary"
data-dismiss=
"modal"
id=
"generate_report_btn"
>
Yes
</button>
<button
type=
"button"
class=
"btn btn-primary"
data-dismiss=
"modal"
id=
"generate_report_btn"
>
Yes
</button>
<button
type=
"button"
class=
"btn btn-danger"
data-dismiss=
"modal"
>
No
</button>
</div>
</div>
...
...
@@ -1558,7 +1094,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"talking_instant_value"
{
#
style=
"width: 0%"
#
}
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1569,7 +1105,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"phone_checking_instant_value"
{
#
style=
"width: 0%"
#
}
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1580,7 +1116,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"note_taking_instant_value"
{
#
style=
"width: 0%"
#
}
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1591,7 +1127,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"listening_instant_value"
{
#
style=
"width: 80%"
#
}
{
#
style=
"width: 80%"
#
}
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1601,24 +1137,74 @@
<!--end of 1st column -->
<!--2nd column -->
<div
class=
"col-md-6"
>
<div
class=
"col-md-6"
id=
"lecturer_video_column"
>
<div
class=
"text-center"
>
<span
class=
"h3 font-italic font-weight-bold"
>
Lecturer Performance
</span>
</div>
<!--display lecture video -->
<div
class=
"text-center m-3"
id=
"lecturer_video_section"
>
<!--temporary text -->
<div
class=
"text-center"
id=
"temp_lecturer_text"
>
<span
class=
"font-italic"
>
No video was found
</span>
{#
<!--temporary text -->
#}
{#
<div
class=
"text-center"
id=
"temp_lecturer_text"
>
#}
{#
<span
class=
"font-italic"
>
No video was found
</span>
#}
{#
</div>
#}
<!--display lecturer video -->
<div
class=
"text-center m-3"
id=
"lecturer_video_section"
>
<video
width=
"500"
height=
"300"
id=
"lecturer_video"
controls
>
<source
src=
"#"
type=
"video/mp4"
>
Your browser does not support the video tag.
</video>
</div>
<!--end of lecturer video section -->
<!-- ajax loader section -->
<div
class=
"text-center mt-3"
id=
"lecturer_video_progress_loader"
>
<img
src=
"{% static 'FirstApp/images/ajax-loader-1.gif' %}"
alt=
"loader"
>
</div>
<!--progress bar section -->
<div
class=
"progress_area"
id=
"lecturer_video_progress"
hidden
>
<!--sitting -->
<h4
class=
"small font-weight-bold"
>
Sitting
</h4>
<span
class=
"float-right"
id=
"sitting_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"sitting_instant_value"
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--standing -->
<h4
class=
"small font-weight-bold"
>
Standing
</h4>
<span
class=
"float-right"
id=
"standing_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"standing_instant_value"
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--walking-->
<h4
class=
"small font-weight-bold"
>
Walking
</h4>
<span
class=
"float-right"
id=
"walking_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"walking_instant_value"
{
#
style=
"width: 80%"
#
}
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
</div>
<!--end of progress bar section -->
<!-- video -->
{#
<video
width=
"500"
height=
"300"
id=
"lecturer_video"
controls
>
#}
{#
<source
src=
"#"
#
}
{
#
type=
"video/mp4"
>
#}
{# Your browser does not support the video tag.#}
{#
</video>
#}
</div>
<!--end of lecture video section -->
...
...
FirstApp/templates/FirstApp/admin_login.html
0 → 100644
View file @
d8f6824a
{% load static %}
<!DOCTYPE html>
<html
lang=
"en"
>
<head>
<meta
charset=
"utf-8"
>
<meta
http-equiv=
"X-UA-Compatible"
content=
"IE=edge"
>
<meta
name=
"viewport"
content=
"width=device-width, initial-scale=1, shrink-to-fit=no"
>
<meta
name=
"description"
content=
""
>
<meta
name=
"author"
content=
""
>
<title>
SLPES
</title>
<!-- Custom fonts for this template-->
<link
href=
"{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}"
rel=
"stylesheet"
type=
"text/css"
>
<link
href=
"https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel=
"stylesheet"
>
<!-- Custom styles for this template-->
<link
href=
"{% static 'FirstApp/css/sb-admin-2.min.css' %}"
rel=
"stylesheet"
>
</head>
<body
class=
"bg-gradient-primary"
>
<div
class=
"container"
>
<!-- Outer Row -->
<div
class=
"row justify-content-center"
>
<div
class=
"col-xl-10 col-lg-12 col-md-9"
>
<div
class=
"card o-hidden border-0 shadow-lg my-5"
>
<div
class=
"card-body p-0"
>
<!-- Nested Row within Card Body -->
<div
class=
"row"
>
<div
class=
"col-lg-6 d-none d-lg-block"
>
<img
src=
"{% static 'FirstApp/images/admin.jpg' %}"
width=
"400"
height=
"600"
alt=
"No image"
>
</div>
<div
class=
"col-lg-6"
>
<div
class=
"p-5"
>
<div
class=
"text-center"
>
<h1
class=
"h4 text-gray-900 mb-4"
>
Welcome Back!
</h1>
</div>
<!--form -->
<form
action=
"/process-admin-login"
method=
"POST"
name=
"loginForm"
class=
"user"
>
{% csrf_token %}
<div
class=
"form-group"
>
<input
type=
"email"
name=
"email"
class=
"form-control form-control-user"
id=
"exampleInputEmail"
aria-describedby=
"emailHelp"
placeholder=
"Enter Email Address..."
>
</div>
<div
class=
"form-group"
>
<input
type=
"password"
name=
"password"
class=
"form-control form-control-user"
id=
"exampleInputPassword"
placeholder=
"Password"
>
<div
class=
"alert alert-danger m-4"
>
{{ message }}
</div>
</div>
<div
class=
"form-group"
>
<div
class=
"custom-control custom-checkbox small"
>
<input
type=
"checkbox"
class=
"custom-control-input"
id=
"customCheck"
>
<label
class=
"custom-control-label"
for=
"customCheck"
>
Remember Me
</label>
</div>
</div>
<button
type=
"submit"
class=
"btn btn-primary btn-user btn-block"
>
Login
</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script
src=
"{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"
></script>
<script
src=
"{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"
></script>
<!-- Core plugin JavaScript-->
<script
src=
"{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"
></script>
<!-- Custom scripts for all pages-->
<script
src=
"{% static 'FirstApp/js/sb-admin-2.min.js' %}"
></script>
</body>
</html>
FirstApp/templates/FirstApp/emotion.html
View file @
d8f6824a
...
...
@@ -29,6 +29,8 @@
var
global_lecture_video_id
=
''
;
var
global_video_name
=
''
;
var
global_lecturer_subject_index
=
0
;
var
global_lecturer_video_name
=
''
;
var
lecturer_fps
=
0
;
//jquery
$
(
document
).
ready
(
function
()
{
...
...
@@ -170,13 +172,14 @@
fetch
(
'
http://127.0.0.1:8000/get-lecture-emotion/?lecture_video_id=
'
+
global_lecture_video_id
+
'
&lecture_video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
{
let
frames
=
createFrames
(
out
);
return
frames
{
#
let
frames
=
createFrames
(
out
);
#
}
{
#
return
frames
#
}
displayActivity
(
out
)
})
.
then
((
obj
)
=>
{
$
(
'
#video_frames
'
).
prepend
(
obj
);
{
#
$
(
'
#video_frames
'
).
prepend
(
obj
);
#
}
$
(
'
#frame_loader
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#slidecontainer
'
).
attr
(
'
hidden
'
,
false
);
{
#
$
(
'
#slidecontainer
'
).
attr
(
'
hidden
'
,
false
);
#
}
})
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
));
});
...
...
@@ -216,6 +219,7 @@
//to handle the 'btn-success' (process) button
$
(
document
).
on
(
'
click
'
,
'
.btn-success
'
,
function
(
e
)
{
//sending the POST request to process the lecture activities
fetch
(
'
http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=
'
+
global_video_name
+
'
&lecture_video_id=
'
+
global_lecture_video_id
)
.
then
((
res
)
=>
res
.
json
())
...
...
@@ -232,614 +236,189 @@
}
//this section is responsible for displaying the frames as video
//creating the frame content
function
createFrames
(
res
)
{
let
main_frame_content
=
"
<div class='row' id='main_frames'>
"
;
main_frame_content
+=
"
<ul class='list-group list-group-horizontal'>
"
;
let
count
=
0
;
//loop through the frames
res
.
extracted
.
map
((
image
)
=>
{
let
img_src
=
""
;
let
len
=
image
.
detections
.
length
;
if
(
count
===
0
)
{
main_frame_content
+=
"
<li class='list-group-item text-center' id='image_0'>
"
;
img_src
=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
res
.
extracted
[
0
].
frame
+
"
/
"
+
res
.
extracted
[
0
].
detections
[
0
]
+
"
' width='400' height='400'>
"
;
}
else
{
main_frame_content
+=
"
<li class='list-group-item other-frames' id='image_
"
+
count
+
"
' hidden>
"
;
img_src
=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
image
.
frame
+
"
/
"
+
image
.
detections
[
len
-
1
]
+
"
' class='img-link' width='400' height='400'>
"
;
}
main_frame_content
+=
img_src
;
main_frame_content
+=
"
</li>
"
;
count
++
;
});
main_frame_content
+=
"
</ul>
"
;
main_frame_content
+=
"
</div>
"
;
//setting the min, max values of the slider
$
(
'
#myActivityRange
'
).
attr
({
'
min
'
:
0
,
'
max
'
:
count
});
//display the progress bars
displayActivity
(
res
);
return
main_frame_content
;
}
//declaring the variable for setInterval function
let
timeVar
=
null
;
//handling the play button
$
(
'
#play_pause_icon_activity
'
).
click
(
function
()
{
//defining the two possible classes
let
play_class
=
"
fas fa-play
"
;
let
pause_class
=
"
fas fa-pause
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//handling the slider
let
slider
=
document
.
getElementById
(
"
myActivityRange
"
);
let
output
=
document
.
getElementById
(
"
demo
"
);
//when the button is playing
if
(
current_class
===
play_class
)
{
timeVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_
'
+
Number
(
value
);
//displaying the relevant image
$
(
'
#image_0
'
).
html
(
$
(
selectedImage
).
html
());
},
50
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
timeVar
);
}
});
//handling the slider
let
slider
=
document
.
getElementById
(
"
myActivityRange
"
);
let
output
=
document
.
getElementById
(
"
demo
"
);
output
.
innerHTML
=
slider
.
value
;
slider
.
oninput
=
function
()
{
output
.
innerHTML
=
this
.
value
;
let
selectedImage
=
'
#image_
'
+
Number
(
this
.
value
);
//hide
{
#
$
(
'
#image_0
'
).
attr
(
'
hidden
'
,
true
);
#
}
$
(
'
#image_0
'
).
html
(
$
(
selectedImage
).
html
());
//setting the selected image
{
#
$
(
selectedImage
).
attr
(
'
hidden
'
,
false
);
#
}
};
$
(
document
).
on
(
'
click
'
,
'
.img-link
'
,
function
(
e
)
{
//removing previously displayed detections
$
(
'
.detections
'
).
remove
();
//removing the no-content message
$
(
'
#no_detection_message_content
'
).
hide
();
//appearing the loader
$
(
'
#detection_loader
'
).
attr
(
'
hidden
'
,
false
);
let
img_src_arr
=
e
.
target
.
src
.
split
(
'
/
'
);
let
len
=
img_src_arr
.
length
;
let
src
=
img_src_arr
[
len
-
1
];
let
frame_name_arr
=
src
.
split
(
'
.
'
);
let
frame_name
=
frame_name_arr
[
0
];
//fetching the detection for the selected frame
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=
'
+
global_video_name
+
"
&frame_name=
"
+
frame_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayDetections
(
out
.
detections
,
frame_name
))
.
catch
((
error
)
=>
alert
(
'
this is an error
'
));
});
//the function to display detections
function
displayDetections
(
detections
,
frame_name
)
{
let
img_string
=
''
;
let
no_of_detections
=
detections
.
length
;
//disabling the loader
$
(
'
#detection_loader
'
).
attr
(
'
hidden
'
,
true
);
//appearing the no of detections number area
$
(
'
#detection_number_area
'
).
attr
(
'
hidden
'
,
false
);
$
(
'
#no_of_detections
'
).
text
(
no_of_detections
);
//to handle the 'integrate' modal
$
(
'
#integrate_emotion
'
).
click
(
function
()
{
//define the student video src
let
video_src
=
"
{% static '' %}FirstApp/videos/
"
+
global_video_name
;
detections
.
map
((
detection
)
=>
{
img_string
+=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
frame_name
+
"
/
"
+
detection
+
"
' class='detections m-2' width='100' height='100' >
"
})
;
{
#
global_lecturer_video_name
=
"
Test_1.mp4
"
;
#
}
{
#
global_lecturer_video_name
=
"
Test_2.mp4
"
;
#
}
global_lecturer_video_name
=
"
Test_3.mp4
"
;
$
(
'
#detection_frames
'
).
prepend
(
img_string
);
//define the lecturer video src
let
lecturer_video_src
=
"
{% static '' %}FirstApp/lecturer_videos/
"
+
global_lecturer_video_name
;
}
//assign the video src
$
(
'
#student_video
'
).
attr
(
'
src
'
,
video_src
);
//listening for click events in labels
$
(
'
.labels
'
).
click
(
function
()
{
//assign the video src
$
(
'
#lecturer_video
'
).
attr
(
'
src
'
,
lecturer_video_src
);
let
label
=
Number
(
$
(
this
).
attr
(
'
data-number
'
));
let
label_name
=
$
(
this
).
attr
(
'
data-label
'
);
$
(
'
#integrate_modal
'
).
modal
();
//removing the previous student detection lists
$
(
'
.student_detection_lists
'
).
remove
();
//appearing the loader
$
(
'
#detection_student_loader
'
).
attr
(
'
hidden
'
,
false
);
//appearing the loader
$
(
'
#activity_type
'
).
attr
(
'
hidden
'
,
false
);
$
(
'
#activity_type_text
'
).
text
(
label_name
);
//disappearing the no content message
$
(
'
#no_detection_student_content
'
).
attr
(
'
hidden
'
,
true
);
//fetch data from the API
//fetching from the api
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=
'
+
global_video_name
+
'
&label=
'
+
label
)
fetch
(
'
http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
createDetectedStudentFrames
(
out
))
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
))
});
//creating the detected students frames
function
createDetectedStudentFrames
(
detections
)
{
let
htmlString
=
""
;
//iterating through the student
detections
.
people
.
map
((
student
)
=>
{
let
title
=
student
.
split
(
'
.
'
)[
0
];
let
images
=
""
;
htmlString
+=
"
<div class='row p-3 student-detection-rows'>
"
;
let
student_count
=
0
;
.
then
((
out
)
=>
displayEmotionRecognitionForFrame
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
//iterating through the frames
detections
.
response
.
map
((
frame
)
=>
{
let
frame_detections
=
frame
.
detections
;
if
(
frame_detections
.
includes
(
student
))
{
if
(
student_count
===
0
)
{
images
+=
"
<li class='list-group-item frame-0' id='image_0_
"
+
title
+
"
'>
"
;
}
else
{
images
+=
"
<li class='list-group-item other-student-frames' id='image_
"
+
student_count
+
"
_
"
+
title
+
"
' hidden>
"
;
}
images
+=
"
<img src='{% static '' %}FirstApp/Activity/
"
+
global_video_name
+
"
/
"
+
frame
.
frame
+
"
/
"
+
student
+
"
' width='200' height='200'>
"
;
images
+=
"
</li>
"
;
//increment the student count
student_count
++
;
}
});
htmlString
+=
"
<h6 class='font-italic'>
"
+
title
+
"
</h6>
"
;
htmlString
+=
"
<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>
"
;
htmlString
+=
images
;
htmlString
+=
"
</ul>
"
;
htmlString
+=
"
<div class='slidecontainer'>
"
;
htmlString
+=
"
<div class='row m-3'></div>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_
"
+
title
+
"
'></i></span>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
<input type='range' min='1' max='100' value='0' class='slider' id='slider_
"
+
title
+
"
'>
"
;
htmlString
+=
"
<p>No of frames: <span id='demo_
"
+
title
+
"
'></span></p>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</div>
"
;
});
//disappearing the loader
$
(
'
#detection_student_loader
'
).
attr
(
'
hidden
'
,
true
);
//append to the relevant html card content
$
(
'
#detection_students
'
).
append
(
htmlString
);
}
let
studentTimeVar
=
null
;
//playing the frames for each student detection (by label)
$
(
document
).
on
(
'
click
'
,
'
.play-pause-icon-student-frames
'
,
function
(
e
)
{
//defining the two possible classes
let
play_class
=
"
fas fa-play play-pause-icon-student-frames
"
;
let
pause_class
=
"
fas fa-pause play-pause-icon-student-frames
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//extracting the title pf the clicked icon
let
title_part
=
$
(
this
).
attr
(
'
id
'
);
let
title
=
title_part
.
split
(
"
_
"
)[
1
];
//handling the slider
let
slider
=
document
.
getElementById
(
"
slider_
"
+
title
);
let
output
=
document
.
getElementById
(
"
demo_
"
+
title
);
//when the button is playing
if
(
current_class
===
play_class
)
{
studentTimeVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_
'
+
Number
(
value
)
+
'
_
'
+
title
;
//displaying the relevant image
$
(
'
#image_0_
'
+
title
).
html
(
$
(
selectedImage
).
html
());
},
100
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
studentTimeVar
);
}
});
//this is to handle the 'evaluate' button
$
(
'
#evaluate_button
'
).
click
(
function
()
{
//hide the message
$
(
'
#no_evaluated_student_content
'
).
attr
(
'
hidden
'
,
true
);
//show the loader
$
(
'
#evaluate_student_loader
'
).
attr
(
'
hidden
'
,
false
);
//using the fetch api
fetch
(
'
http://127.0.0.1:8000/get-lecture-emotion-student-evaluation/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
evaluate_student
(
out
))
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
))
});
//this function will display the emotion percentages for each frame
function
displayEmotionRecognitionForFrame
(
response
)
{
//to create html for evaluate function
function
evaluate_student
(
response
)
{
//hide the loader
$
(
'
#student_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
//show the progress bars
$
(
'
#student_video_progress
'
).
attr
(
'
hidden
'
,
false
);
//creating the html string
let
htmlString
=
""
;
//iterating through the student
response
.
people
.
map
((
student
)
=>
{
let
title
=
student
.
split
(
'
.
'
)[
0
];
let
images
=
""
;
htmlString
+=
"
<div class='row p-3 student-evaluation-rows'>
"
;
let
student_count
=
0
;
//iterating through the frames
response
.
response
.
map
((
frame
)
=>
{
let
frame_detections
=
frame
.
detections
;
let
frame_detection_length
=
frame_detections
.
length
;
if
(
frame_detections
.
includes
(
student
))
{
if
(
student_count
===
0
)
{
images
+=
"
<li class='list-group-item frame-0' id='image_0_evaluation
"
+
title
+
"
'>
"
;
}
else
{
images
+=
"
<li class='list-group-item other-student-frames' id='image_evaluation
"
+
student_count
+
"
_
"
+
title
+
"
' hidden>
"
;
}
images
+=
"
<img src='{% static '' %}FirstApp/Activity/
"
+
global_video_name
+
"
/
"
+
frame
.
frame
+
"
/
"
+
student
+
"
' width='200' height='200'>
"
;
images
+=
"
</li>
"
;
if
(
student_count
===
(
frame_detection_length
))
{
images
+=
"
<li class='list-group-item'>
"
;
images
+=
"
<button type='button' class='btn btn-dark individual-evaluation' id='evaluate_student_
"
+
title
+
"
'>evaluate</button>
"
;
images
+=
"
</li>
"
;
}
//increment the student count
student_count
++
;
}
});
htmlString
+=
"
<ul class='list-group'>
"
;
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='row m-3'>
"
;
htmlString
+=
"
<h4 class='font-weight-bold'>Student ID: <span>
"
+
title
+
"
</span></h4>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
{
#
htmlString
+=
"
<div class='row m-3'></div>
"
;
#
}
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>
"
;
htmlString
+=
images
;
htmlString
+=
"
</ul>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
htmlString
+=
"
<li class='list-group-item'>
"
;
htmlString
+=
"
<div class='slidecontainer'>
"
;
htmlString
+=
"
<div class='row m-3'></div>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_
"
+
title
+
"
'></i></span>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation
"
+
title
+
"
'>
"
;
htmlString
+=
"
<p>No of frames: <span id='demo_evaluation
"
+
title
+
"
'></span></p>
"
;
htmlString
+=
"
</div>
"
;
//creating the html string, iteratively
response
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
happy_perct
=
Math
.
round
(
frame
.
happy_perct
,
0
);
let
sad_perct
=
Math
.
round
(
frame
.
sad_perct
,
0
);
let
angry_perct
=
Math
.
round
(
frame
.
angry_perct
,
0
);
let
neutral_perct
=
Math
.
round
(
frame
.
neutral_perct
,
0
);
let
surprise_perct
=
Math
.
round
(
frame
.
surprise_perct
,
0
);
//append to the html string
//Happy
htmlString
+=
"
<div class='progress_area' id='progress_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Happy</h4>
"
;
htmlString
+=
"
<span class='float-right' id='happy_instant_
"
+
frame_name
+
"
'>
"
+
happy_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
happy_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</li>
"
;
htmlString
+=
"
</ul>
"
;
});
//disappearing the loader
$
(
'
#evaluate_student_loader
'
).
attr
(
'
hidden
'
,
true
);
//append to the relevant html card content
$
(
'
#evaluation_students
'
).
append
(
htmlString
);
}
let
studentEvaluationVar
=
null
;
//playing the frames for each student evaluation
$
(
document
).
on
(
'
click
'
,
'
.play-pause-icon-student-evaluations
'
,
function
(
e
)
{
//defining the two possible classes
let
play_class
=
"
fas fa-play play-pause-icon-student-evaluations
"
;
let
pause_class
=
"
fas fa-pause play-pause-icon-student-evaluations
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//extracting the title pf the clicked icon
let
title_part
=
$
(
this
).
attr
(
'
id
'
);
let
title
=
title_part
.
split
(
"
_
"
)[
1
];
//Sad
htmlString
+=
"
<h4 class='small font-weight-bold'>Sad</h4>
"
;
htmlString
+=
"
<span class='float-right' id='note_taking_instant_
"
+
frame_name
+
"
'>
"
+
sad_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='note_taking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
sad_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//handling the slider
let
slider
=
document
.
getElementById
(
"
slider_evaluation
"
+
title
);
let
output
=
document
.
getElementById
(
"
demo_evaluation
"
+
title
);
//when the button is playing
if
(
current_class
===
play_class
)
{
studentEvaluationVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_evaluation
'
+
Number
(
value
)
+
'
_
'
+
title
;
//displaying the relevant image
$
(
'
#image_0_evaluation
'
+
title
).
html
(
$
(
selectedImage
).
html
());
},
100
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
studentEvaluationVar
);
}
});
//Angry
htmlString
+=
"
<h4 class='small font-weight-bold'>Angry</h4>
"
;
htmlString
+=
"
<span class='float-right' id='listening_instant_
"
+
frame_name
+
"
'>
"
+
angry_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
angry_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//Neutral
htmlString
+=
"
<h4 class='small font-weight-bold'>Neutral</h4>
"
;
htmlString
+=
"
<span class='float-right' id='note_taking_instant_
"
+
frame_name
+
"
'>
"
+
neutral_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='note_taking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
neutral_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//to evaluate the individual student
$
(
document
).
on
(
'
click
'
,
'
.individual-evaluation
'
,
function
(
e
)
{
//Surprise
htmlString
+=
"
<h4 class='small font-weight-bold'>Surprise</h4>
"
;
htmlString
+=
"
<span class='float-right' id='listening_instant_
"
+
frame_name
+
"
'>
"
+
surprise_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
surprise_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
let
individual_id
=
$
(
this
).
attr
(
'
id
'
);
let
student_name
=
individual_id
.
split
(
'
_
'
)[
2
];
student_name
+=
"
.png
"
;
let
html
=
$
(
this
).
html
();
//ending the progress area
htmlString
+=
"
</div>
"
;
//after clicking, change the html
$
(
this
).
html
(
"
<span class='font-italic'>loading...</span>
"
);
});
//append the html
$
(
'
#student_video_column
'
).
append
(
htmlString
);
//
fetching from the API
fetch
(
'
http://127.0.0.1:8000/
get-lecture-emotion-individual-student-evaluation/?video_name=
'
+
global_video_name
+
'
&student_name=
'
+
student
_name
)
//
start retrieving lecturer activity frame recognition
fetch
(
'
http://127.0.0.1:8000/
lecturer/get-lecturer-video-frame-recognitions/?video_name=
'
+
global_lecturer_video
_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayIndividualStudentEmotion
(
out
.
response
,
e
,
student_name
))
.
catch
((
error
)
=>
alert
(
'
something went wrong
'
));
//after 5 seconds, replace with the original html
/*
setTimeout(() => {
$(this).html(html);
//open the student individual modal
$('#student_individual_modal').modal();
}, 5000);
*/
});
//this function will display the individual student emotions
function
displayIndividualStudentEmotion
(
res
,
e
,
title
)
{
.
then
((
out
)
=>
displayLecturerEmotionRecognitionForFrame
(
out
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
))
//set the percentage values
$
(
'
#happy_individual_perct
'
).
text
(
res
.
happy_perct
+
'
%
'
);
$
(
'
#sad_individual_perct
'
).
text
(
res
.
sad_perct
+
'
%
'
);
$
(
'
#anger_individual_perct
'
).
text
(
res
.
angry_perct
+
'
%
'
);
$
(
'
#surprise_individual_perct
'
).
text
(
res
.
surprise_perct
+
'
%
'
);
$
(
'
#neutral_individual_perct
'
).
text
(
res
.
neutral_perct
+
'
%
'
);
//set the width
$
(
'
#happy_individual_width
'
).
width
(
res
.
happy_perct
+
'
%
'
);
$
(
'
#sad_individual_width
'
).
width
(
res
.
sad_perct
+
'
%
'
);
$
(
'
#anger_individual_width
'
).
width
(
res
.
angry_perct
+
'
%
'
);
$
(
'
#surprise_individual_width
'
).
width
(
res
.
surprise_perct
+
'
%
'
);
$
(
'
#neutral_individual_width
'
).
width
(
res
.
neutral_perct
+
'
%
'
);
//open the student individual modal
$
(
'
#student_individual_modal
'
).
modal
();
//set the button to default
e
.
target
.
innerHTML
=
"
<span>evaluate</span>
"
;
}
//to handle the 'integrate' modal
$
(
'
#integrate_activity
'
).
click
(
function
()
{
//define the student video src
let
video_src
=
"
{% static '' %}FirstApp/videos/
"
+
global_video_name
;
//assign the video src
$
(
'
#student_video
'
).
attr
(
'
src
'
,
video_src
);
$
(
'
#integrate_modal
'
).
modal
();
//fetch data from the API
fetch
(
'
http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayEmotionRecognitionForFrame
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
//this function will load the activity recognition for frames
function
displayLecturerEmotionRecognitionForFrame
(
response
)
{
//hide the loader
$
(
'
#lecturer_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
//show the progress bars
$
(
'
#lecturer_video_progress
'
).
attr
(
'
hidden
'
,
false
);
//creating the html string
let
htmlString
=
""
;
let
duration
=
1000
/
response
.
fps
;
}
);
lecturer_fps
=
Math
.
round
(
duration
,
0
);
console
.
log
(
'
lecturer fps:
'
,
lecturer_fps
);
//this function will display the emotion percentages for each frame
function
displayEmotionRecognitionForFrame
(
response
)
{
//creating the html string, iteratively
response
.
frame_recognitions
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
sitting_perct
=
Math
.
round
(
frame
.
sitting_perct
,
0
);
let
standing_perct
=
Math
.
round
(
frame
.
standing_perct
,
0
);
{
#
let
listen_perct
=
Math
.
round
(
frame
.
listening_perct
,
0
);
#
}
let
walking_perct
=
Math
.
round
(
frame
.
walking_perct
,
0
);
//append to the html string
//sitting
htmlString
+=
"
<div class='progress_area' id='progress_lecturer_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Sitting</h4>
"
;
htmlString
+=
"
<span class='float-right' id='sitting_instant_
"
+
frame_name
+
"
'>
"
+
sitting_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
sitting_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//hide the loader
$
(
'
#student_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
//show the progress bars
$
(
'
#student_video_progress
'
).
attr
(
'
hidden
'
,
false
);
//standing
htmlString
+=
"
<h4 class='small font-weight-bold'>Standing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='standing_instant_
"
+
frame_name
+
"
'>
"
+
standing_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='standing_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
standing_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//creating the html string
let
htmlString
=
""
;
//walking
htmlString
+=
"
<h4 class='small font-weight-bold'>Walking</h4>
"
;
htmlString
+=
"
<span class='float-right' id='walking_instant_
"
+
frame_name
+
"
'>
"
+
walking_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
walking_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//creating the html string, iteratively
response
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
happy_perct
=
Math
.
round
(
frame
.
happy_perct
,
0
);
let
sad_perct
=
Math
.
round
(
frame
.
sad_perct
,
0
);
let
angry_perct
=
Math
.
round
(
frame
.
angry_perct
,
0
);
let
neutral_perct
=
Math
.
round
(
frame
.
neutral_perct
,
0
);
let
surprise_perct
=
Math
.
round
(
frame
.
surprise_perct
,
0
);
//append to the html string
//Happy
htmlString
+=
"
<div class='progress_area' id='progress_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Happy</h4>
"
;
htmlString
+=
"
<span class='float-right' id='happy_instant_
"
+
frame_name
+
"
'>
"
+
happy_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
happy_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//Sad
htmlString
+=
"
<h4 class='small font-weight-bold'>Sad</h4>
"
;
htmlString
+=
"
<span class='float-right' id='note_taking_instant_
"
+
frame_name
+
"
'>
"
+
sad_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='note_taking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
sad_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//Angry
htmlString
+=
"
<h4 class='small font-weight-bold'>Angry</h4>
"
;
htmlString
+=
"
<span class='float-right' id='listening_instant_
"
+
frame_name
+
"
'>
"
+
angry_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
angry_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//Neutral
htmlString
+=
"
<h4 class='small font-weight-bold'>Neutral</h4>
"
;
htmlString
+=
"
<span class='float-right' id='note_taking_instant_
"
+
frame_name
+
"
'>
"
+
neutral_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='note_taking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
neutral_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//Surprise
htmlString
+=
"
<h4 class='small font-weight-bold'>Surprise</h4>
"
;
htmlString
+=
"
<span class='float-right' id='listening_instant_
"
+
frame_name
+
"
'>
"
+
surprise_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
surprise_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
});
//append the html
$
(
'
#student_video_column
'
).
append
(
htmlString
);
$
(
'
#lecturer_video_column
'
).
append
(
htmlString
);
}
//to handle the 'integrate' play button
$
(
'
#play_integrate_button
'
).
click
(
function
()
{
let
video
=
$
(
'
video
'
)[
0
];
let
video1
=
$
(
'
video
'
)[
1
];
let
test_video
=
document
.
getElementsByTagName
(
'
video
'
)[
0
];
let
play_class
=
'
btn btn-outline-danger play
'
;
let
pause_class
=
'
btn btn-outline-danger pause
'
;
let
count
=
0
;
let
count_lecturer
=
0
;
let
classes
=
$
(
this
).
attr
(
'
class
'
);
let
video_interval
=
setInterval
(()
=>
{
let
talking_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
let
phone_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
let
note_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
let
listening_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
//=====STUDENTS COLUMN=====
//get the relevant progress area
let
progress_area
=
"
progress_frame-
"
+
count
;
...
...
@@ -857,32 +436,49 @@
//increment the count
count
++
;
//setting the values
/*
$('#talking_instant').text(talking_number + '%');
$('#phone_checking_instant').text(phone_number + '%');
$('#note_taking_instant').text(note_number + '%');
$('#listening_instant').text(listening_number + '%');
//setting the width
$('#talking_instant_value').width(talking_number + '%');
$('#phone_checking_instant_value').width(phone_number + '%');
$('#note_taking_instant_value').width(note_number + '%');
$('#listening_instant_value').width(listening_number + '%');
},
33
);
let
video_interval_lecturer
=
setInterval
(()
=>
{
//=====LECTURER COLUMN=====
//get the relevant progress area
let
progress_area_lecturer
=
"
progress_lecturer_frame-
"
+
count_lecturer
;
let
progress_area_id_lecturer
=
"
#
"
+
progress_area_lecturer
;
//find the corresponding progress area
let
progress_area_html_lecturer
=
document
.
getElementById
(
progress_area_lecturer
);
//display the retrieved progress area
$
(
progress_area_id_lecturer
).
attr
(
'
hidden
'
,
false
);
//replace the current progress area with the selected one
$
(
'
#lecturer_video_progress
'
).
html
(
progress_area_html_lecturer
);
//increment the count
count_lecturer
++
;
console
.
log
(
'
current frame (lecturer):
'
,
count_lecturer
);
},
lecturer_fps
);
*/
},
1000
);
//check for the current class
if
(
classes
===
play_class
)
{
$
(
this
).
text
(
'
Pause
'
);
$
(
this
).
attr
(
'
class
'
,
pause_class
);
video
.
play
();
video1
.
play
();
}
else
if
(
classes
===
pause_class
)
{
$
(
this
).
text
(
'
Play
'
);
$
(
this
).
attr
(
'
class
'
,
play_class
);
video
.
pause
();
video1
.
pause
();
}
//function to do when the video is paused
...
...
@@ -892,6 +488,12 @@
video
.
onended
=
function
(
e
)
{
//stop changing the activity values
clearInterval
(
video_interval
);
};
//function to do when the video is ended
video1
.
onended
=
function
(
e
)
{
//stop changing the activity values
clearInterval
(
video_interval_lecturer
);
}
});
...
...
@@ -1063,100 +665,89 @@
<!--temporary text -->
<span
class=
"font-italic"
id=
"temporary_text"
>
Frame will be displayed here
</span>
<!--loading buffer area-->
<div
class=
"text-center"
id=
"frame_loader"
hidden
>
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
alt=
"Loader"
>
</div>
<!--frames -->
.
<div
class=
"text-center p-4"
id=
"video_frames"
>
<!-- slide container -->
<div
id=
"slidecontainer"
hidden
>
<div
class=
"row m-3"
></div>
<!-- play/pause icon -->
<div
class=
"row"
>
<span><i
class=
"fas fa-play"
id=
"play_pause_icon_activity"
></i></span>
</div>
<input
type=
"range"
min=
"1"
max=
"100"
value=
"0"
class=
"slider"
id=
"myActivityRange"
>
<p>
No of frames:
<span
id=
"demo"
></span></p>
<!--this area will display the progress bars -->
<div
class=
"progress_area mt-4"
hidden
>
<!--Happy -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"1"
data-label=
"Happy"
>
<h4
class=
"small font-weight-bold"
>
Happy
</h4>
</a>
<span
class=
"float-right"
id=
"happy_perct"
>
40%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"happy_width"
style=
"width: 20%"
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
</div>
</div>
<!--this area will display the progress bars -->
<div
class=
"progress_area"
hidden
>
<!--Happy -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"1"
data-label=
"Happy"
>
<h4
class=
"small font-weight-bold"
>
Happy
</h4>
</a>
<span
class=
"float-right"
id=
"happy_perct"
>
40%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"happy_width"
style=
"width: 20%"
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--sad -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"0"
data-label=
"sad"
>
<h4
class=
"small font-weight-bold"
>
Sad
</h4>
</a>
<span
class=
"float-right"
id=
"sad_perct"
>
45%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"sad_width"
style=
"width: 40%"
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--sad
-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"0
"
data-label=
"sad
"
>
<h4
class=
"small font-weight-bold"
>
Sad
</h4>
</a>
<span
class=
"float-right"
id=
"sad_perct"
>
45
%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar
"
id=
"sad_width
"
style=
"width: 40%
"
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--anger
-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2
"
data-label=
"anger
"
>
<h4
class=
"small font-weight-bold"
>
Anger
</h4>
</a>
<span
class=
"float-right"
id=
"anger_perct"
>
50
%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"anger_width
"
style=
"width: 60%
"
aria-valuenow=
"60"
aria-valuemin=
"0
"
aria-valuemax=
"100"
></div>
</div>
<!--anger -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
data-label=
"anger"
>
<h4
class=
"small font-weight-bold"
>
Anger
</h4>
</a>
<span
class=
"float-right"
id=
"anger_perct"
>
50%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"anger_width"
style=
"width: 60%"
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--surprise-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
data-label=
"surprise"
>
<h4
class=
"small font-weight-bold"
>
Surprise
</h4>
</a>
<span
class=
"float-right"
id=
"surprise_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"surprise_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--neutral-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
data-label=
"neutral"
>
<h4
class=
"small font-weight-bold"
>
Neutral
</h4>
</a>
<span
class=
"float-right"
id=
"neutral_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"neutral_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--surprise-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
data-label=
"surprise"
>
<h4
class=
"small font-weight-bold"
>
Surprise
</h4>
</a>
<span
class=
"float-right"
id=
"surprise_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"surprise_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--neutral-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
data-label=
"neutral"
>
<h4
class=
"small font-weight-bold"
>
Neutral
</h4>
</a>
<span
class=
"float-right"
id=
"neutral_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"neutral_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
{#
<!--evaluate button -->
#}
{#
<button
type=
"button"
class=
"btn btn-danger float-right"
#
}
{
#
id=
"evaluate_button"
>
Evaluate#}
{#
</button>
#}
</div>
</div>
<!--graph tab -->
...
...
@@ -1214,63 +805,63 @@
<!--2nd column -->
{#
<div
class=
"col-lg-6"
>
#}
{#
<!--card content -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header py-3"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Frame Detections
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"text-center p-4"
id=
"detection_frames"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_detection_message_content"
>
#}
{#
<span
class=
"font-italic"
>
No frame is selected
</span>
#}
{#
</div>
#}
{##}
{#
<div
class=
"text-left m-3"
id=
"detection_number_area"
hidden
>
#}
{#
<p>
No of detections:
<span
id=
"no_of_detections"
></span></p>
#}
{#
</div>
#}
{#
<!--the detection loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"detection_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
</div>
#}
{#
</div>
#}
{##}
{#
<!--detection person card -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header py-3"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Detected Students (by emotion#}
{# type)
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"text-center p-4"
id=
"detection_students"
>
#}
{#
<!--activity type line -->
#}
{#
<div
class=
"text-center p-2"
id=
"activity_type"
hidden
>
#}
{#
<p>
Activity Type:
<span
class=
"font-weight-bold"
id=
"activity_type_text"
></span>
#}
{#
</p>
#}
{#
</div>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_detection_student_content"
>
#}
{#
<span
class=
"font-italic"
>
No activity type is selected
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"detection_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{##}
{#
</div>
#}
{#
</div>
#}
{#
</div>
#}
{#
<div
class=
"col-lg-6"
>
#}
{#
<!--card content -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header py-3"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Frame Detections
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"text-center p-4"
id=
"detection_frames"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_detection_message_content"
>
#}
{#
<span
class=
"font-italic"
>
No frame is selected
</span>
#}
{#
</div>
#}
{##}
{#
<div
class=
"text-left m-3"
id=
"detection_number_area"
hidden
>
#}
{#
<p>
No of detections:
<span
id=
"no_of_detections"
></span></p>
#}
{#
</div>
#}
{#
<!--the detection loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"detection_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
</div>
#}
{#
</div>
#}
{##}
{#
<!--detection person card -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header py-3"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Detected Students (by emotion#}
{# type)
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"text-center p-4"
id=
"detection_students"
>
#}
{#
<!--activity type line -->
#}
{#
<div
class=
"text-center p-2"
id=
"activity_type"
hidden
>
#}
{#
<p>
Activity Type:
<span
class=
"font-weight-bold"
id=
"activity_type_text"
></span>
#}
{#
</p>
#}
{#
</div>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_detection_student_content"
>
#}
{#
<span
class=
"font-italic"
>
No activity type is selected
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"detection_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{##}
{#
</div>
#}
{#
</div>
#}
{#
</div>
#}
</div>
...
...
@@ -1280,36 +871,36 @@
<div
class=
"row p-2"
>
<!--1st column -->
{#
<div
class=
"col-lg-6"
>
#}
{#
<!--card -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Evaluated Students
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"card-body"
id=
"evaluation_students"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_evaluated_student_content"
>
#}
{#
<span
class=
"font-italic"
>
Press 'Evaluate' button to evaluate students
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"evaluate_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
<!--end of student detection loader -->
#}
{##}
{##}
{#
</div>
#}
{##}
{#
</div>
#}
{##}
{##}
{#
</div>
#}
{#
<div
class=
"col-lg-6"
>
#}
{#
<!--card -->
#}
{#
<div
class=
"card shadow mb-4"
>
#}
{#
<!--card header -->
#}
{#
<div
class=
"card-header"
>
#}
{#
<h5
class=
"m-0 font-weight-bold text-primary"
>
Evaluated Students
</h5>
#}
{#
</div>
#}
{##}
{#
<!--card body -->
#}
{#
<div
class=
"card-body"
id=
"evaluation_students"
>
#}
{##}
{#
<!--no content message-->
#}
{#
<div
class=
"text-center p-2"
id=
"no_evaluated_student_content"
>
#}
{#
<span
class=
"font-italic"
>
Press 'Evaluate' button to evaluate students
</span>
#}
{#
</div>
#}
{##}
{#
<!--the detection student loader -->
#}
{#
<div
class=
"text-center p-2"
id=
"evaluate_student_loader"
hidden
>
#}
{#
<img
src=
"{% static 'FirstApp/images/ajax-loader.gif' %}"
#
}
{
#
alt=
"Loader"
>
#}
{#
</div>
#}
{#
<!--end of student detection loader -->
#}
{##}
{##}
{#
</div>
#}
{##}
{#
</div>
#}
{##}
{##}
{#
</div>
#}
<!--end of 1st column -->
...
...
@@ -1330,7 +921,7 @@
<!--button -->
<div
class=
"text-right m-4"
>
<button
type=
"button"
class=
"btn btn-outline-success"
id=
"integrate_
activity
"
>
<button
type=
"button"
class=
"btn btn-outline-success"
id=
"integrate_
emotion
"
>
Process
</button>
</div>
...
...
@@ -1529,7 +1120,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"happy_instant_value"
{
#
style=
"width: 0%"
#
}
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1540,7 +1131,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"sad_instant_value"
{
#
style=
"width: 0%"
#
}
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1551,7 +1142,7 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"angry_instant_value"
{
#
style=
"width: 0%"
#
}
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1562,19 +1153,19 @@
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"neutral_instant_value"
{
#
style=
"width: 80%"
#
}
{
#
style=
"width: 80%"
#
}
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!-- Surprise-->
<!-- Surprise-->
<h4
class=
"small font-weight-bold"
>
Surprise
</h4>
<span
class=
"float-right"
id=
"surprise_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"surprise_instant_value"
{
#
style=
"width: 80%"
#
}
{
#
style=
"width: 80%"
#
}
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1583,25 +1174,71 @@
</div>
<!--end of 1st column -->
<!--2nd column -->
<div
class=
"col-md-6"
>
<!-- 2nd column -->
<div
class=
"col-md-6"
id=
"lecturer_video_column"
>
<div
class=
"text-center"
>
<span
class=
"h3 font-italic font-weight-bold"
>
Lecturer Performance
</span>
</div>
<!--display lecture video -->
<div
class=
"text-center m-3"
id=
"lecturer_video_section"
>
<!--temporary text -->
<div
class=
"text-center"
id=
"temp_lecturer_text"
>
<span
class=
"font-italic"
>
No video was found
</span>
<!--display lecturer video -->
<div
class=
"text-center m-3"
id=
"lecturer_video_section"
>
<video
width=
"500"
height=
"300"
id=
"lecturer_video"
controls
>
<source
src=
"#"
type=
"video/mp4"
>
Your browser does not support the video tag.
</video>
</div>
<!--end of lecturer video section -->
<video
width=
"500"
height=
"300"
id=
"lecturer_video"
controls
>
<source
src=
"#"
type=
"video/mp4"
>
Your browser does not support the video tag.
</video>
<!-- ajax loader section -->
<div
class=
"text-center mt-3"
id=
"lecturer_video_progress_loader"
>
<img
src=
"{% static 'FirstApp/images/ajax-loader-1.gif' %}"
alt=
"loader"
>
</div>
<!--progress bar section -->
<div
class=
"progress_area"
id=
"lecturer_video_progress"
hidden
>
<!--sitting -->
<h4
class=
"small font-weight-bold"
>
Sitting
</h4>
<span
class=
"float-right"
id=
"sitting_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"sitting_instant_value"
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--standing -->
<h4
class=
"small font-weight-bold"
>
Standing
</h4>
<span
class=
"float-right"
id=
"standing_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"standing_instant_value"
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--walking-->
<h4
class=
"small font-weight-bold"
>
Walking
</h4>
<span
class=
"float-right"
id=
"walking_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"walking_instant_value"
{
#
style=
"width: 80%"
#
}
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
</div>
<!--end of progress bar section -->
</div>
<!--end of lecture video section -->
...
...
FirstApp/templates/FirstApp/gaze.html
View file @
d8f6824a
...
...
@@ -29,6 +29,8 @@
var
global_lecture_video_id
=
''
;
var
global_video_name
=
''
;
var
global_lecturer_subject_index
=
0
;
var
global_lecturer_video_name
=
''
;
var
lecturer_fps
=
0
;
//jquery
$
(
document
).
ready
(
function
()
{
...
...
@@ -169,13 +171,14 @@
fetch
(
'
http://127.0.0.1:8000/get-lecture-gaze-estimation/?lecture_video_id=
'
+
global_lecture_video_id
+
'
&lecture_video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
{
let
frames
=
createFrames
(
out
);
return
frames
{
#
let
frames
=
createFrames
(
out
);
#
}
{
#
return
frames
#
}
displayGazeEstimation
(
out
)
})
.
then
((
obj
)
=>
{
$
(
'
#video_frames
'
).
prepend
(
obj
);
{
#
$
(
'
#video_frames
'
).
prepend
(
obj
);
#
}
$
(
'
#frame_loader
'
).
attr
(
'
hidden
'
,
true
);
$
(
'
#slidecontainer
'
).
attr
(
'
hidden
'
,
false
);
{
#
$
(
'
#slidecontainer
'
).
attr
(
'
hidden
'
,
false
);
#
}
})
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
));
});
...
...
@@ -231,61 +234,32 @@
}
//this section is responsible for displaying the frames as video
//creating the frame content
function
createFrames
(
res
)
{
let
main_frame_content
=
"
<div class='row' id='main_frames'>
"
;
main_frame_content
+=
"
<ul class='list-group list-group-horizontal'>
"
;
let
count
=
0
;
//loop through the frames
res
.
extracted
.
map
((
image
)
=>
{
let
img_src
=
""
;
if
(
count
===
0
)
{
main_frame_content
+=
"
<li class='list-group-item text-center' id='image_0'>
"
;
img_src
=
"
<img src='{% static '' %}FirstApp/gaze/
"
+
global_video_name
+
"
/
"
+
res
.
extracted
[
0
]
+
"
' width='400' height='400'>
"
;
}
else
{
main_frame_content
+=
"
<li class='list-group-item other-frames' id='image_
"
+
count
+
"
' hidden>
"
;
img_src
=
"
<img src='{% static '' %}FirstApp/gaze/
"
+
global_video_name
+
"
/
"
+
image
+
"
' class='img-link' width='400' height='400'>
"
;
}
main_frame_content
+=
img_src
;
main_frame_content
+=
"
</li>
"
;
count
++
;
});
main_frame_content
+=
"
</ul>
"
;
main_frame_content
+=
"
</div>
"
;
//setting the min, max values of the slider
$
(
'
#myActivityRange
'
).
attr
({
'
min
'
:
0
,
'
max
'
:
count
});
//display the progress bars
displayGazeEstimation
(
res
);
return
main_frame_content
;
}
//to handle the 'integrate' modal
$
(
'
#integrate_
activity
'
).
click
(
function
()
{
$
(
'
#integrate_
gaze
'
).
click
(
function
()
{
//define the student video src
let
video_src
=
"
{% static '' %}FirstApp/videos/
"
+
global_video_name
;
{
#
global_lecturer_video_name
=
"
Test_1.mp4
"
;
#
}
{
#
global_lecturer_video_name
=
"
Test_2.mp4
"
;
#
}
global_lecturer_video_name
=
"
Test_3.mp4
"
;
//define the lecturer video src
let
lecturer_video_src
=
"
{% static '' %}FirstApp/lecturer_videos/
"
+
global_lecturer_video_name
;
//assign the video src
$
(
'
#student_video
'
).
attr
(
'
src
'
,
video_src
);
$
(
'
#integrate_modal
'
).
modal
();
//assign the video src
$
(
'
#lecturer_video
'
).
attr
(
'
src
'
,
lecturer_video_src
);
$
(
'
#integrate_modal
'
).
modal
();
//fetch data from the API
fetch
(
'
http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=
'
+
global_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayGazeEstimationForFrame
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayGazeEstimationForFrame
(
out
.
response
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
));
});
...
...
@@ -303,79 +277,142 @@
//creating the html string, iteratively
response
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
look_up_right
=
Math
.
round
(
frame
.
upright_perct
,
0
);
let
look_up_left
=
Math
.
round
(
frame
.
upleft_perct
,
0
);
let
look_down_right
=
Math
.
round
(
frame
.
downright_perct
,
0
);
let
look_down_left
=
Math
.
round
(
frame
.
downleft_perct
,
0
);
let
look_front
=
Math
.
round
(
frame
.
front_perct
,
0
);
//append to the html string
//looking up and right
htmlString
+=
"
<div class='progress_area' id='progress_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking up and right</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_up_right_instant_
"
+
frame_name
+
"
'>
"
+
look_up_right
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_up_right
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking up and left
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking up and left</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_up_left_instant_
"
+
frame_name
+
"
'>
"
+
look_up_left
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_up_left
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking down and right
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking down and right</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_down_right_instant_
"
+
frame_name
+
"
'>
"
+
look_down_right
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_down_right
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking down and left
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking down and left</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_down_left_instant_
"
+
frame_name
+
"
'>
"
+
look_down_left
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_down_left
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking front
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking front</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_front_instant_
"
+
frame_name
+
"
'>
"
+
look_front
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_front
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
let
frame_name
=
frame
.
frame_name
;
let
look_up_right
=
Math
.
round
(
frame
.
upright_perct
,
0
);
let
look_up_left
=
Math
.
round
(
frame
.
upleft_perct
,
0
);
let
look_down_right
=
Math
.
round
(
frame
.
downright_perct
,
0
);
let
look_down_left
=
Math
.
round
(
frame
.
downleft_perct
,
0
);
let
look_front
=
Math
.
round
(
frame
.
front_perct
,
0
);
//append to the html string
//looking up and right
htmlString
+=
"
<div class='progress_area' id='progress_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking up and right</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_up_right_instant_
"
+
frame_name
+
"
'>
"
+
look_up_right
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_up_right
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking up and left
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking up and left</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_up_left_instant_
"
+
frame_name
+
"
'>
"
+
look_up_left
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_up_left
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking down and right
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking down and right</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_down_right_instant_
"
+
frame_name
+
"
'>
"
+
look_down_right
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_down_right
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking down and left
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking down and left</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_down_left_instant_
"
+
frame_name
+
"
'>
"
+
look_down_left
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_down_left
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//looking front
htmlString
+=
"
<h4 class='small font-weight-bold'>Looking front</h4>
"
;
htmlString
+=
"
<span class='float-right' id='look_front_instant_
"
+
frame_name
+
"
'>
"
+
look_front
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
look_front
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
});
//append the html
$
(
'
#student_video_column
'
).
append
(
htmlString
);
//start retrieving lecturer activity frame recognition
fetch
(
'
http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=
'
+
global_lecturer_video_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayLecturerEmotionRecognitionForFrame
(
out
))
.
catch
((
err
)
=>
alert
(
'
error:
'
+
err
))
}
//this function will load the activity recognition for frames
function
displayLecturerEmotionRecognitionForFrame
(
response
)
{
//hide the loader
$
(
'
#lecturer_video_progress_loader
'
).
attr
(
'
hidden
'
,
true
);
//show the progress bars
$
(
'
#lecturer_video_progress
'
).
attr
(
'
hidden
'
,
false
);
//creating the html string
let
htmlString
=
""
;
let
duration
=
1000
/
response
.
fps
;
lecturer_fps
=
Math
.
round
(
duration
,
0
);
console
.
log
(
'
lecturer fps:
'
,
lecturer_fps
);
//creating the html string, iteratively
response
.
frame_recognitions
.
map
((
frame
)
=>
{
let
frame_name
=
frame
.
frame_name
;
let
sitting_perct
=
Math
.
round
(
frame
.
sitting_perct
,
0
);
let
standing_perct
=
Math
.
round
(
frame
.
standing_perct
,
0
);
{
#
let
listen_perct
=
Math
.
round
(
frame
.
listening_perct
,
0
);
#
}
let
walking_perct
=
Math
.
round
(
frame
.
walking_perct
,
0
);
//append to the html string
//sitting
htmlString
+=
"
<div class='progress_area' id='progress_lecturer_
"
+
frame_name
+
"
' hidden>
"
;
htmlString
+=
"
<h4 class='small font-weight-bold'>Sitting</h4>
"
;
htmlString
+=
"
<span class='float-right' id='sitting_instant_
"
+
frame_name
+
"
'>
"
+
sitting_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
sitting_perct
+
"
%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//standing
htmlString
+=
"
<h4 class='small font-weight-bold'>Standing</h4>
"
;
htmlString
+=
"
<span class='float-right' id='standing_instant_
"
+
frame_name
+
"
'>
"
+
standing_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar' role='progressbar' id='standing_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
standing_perct
+
"
%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//walking
htmlString
+=
"
<h4 class='small font-weight-bold'>Walking</h4>
"
;
htmlString
+=
"
<span class='float-right' id='walking_instant_
"
+
frame_name
+
"
'>
"
+
walking_perct
+
"
%</span>
"
;
htmlString
+=
"
<div class='progress mb-4'>
"
;
htmlString
+=
"
<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_
"
+
frame_name
+
"
' style='width:
"
+
walking_perct
+
"
%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>
"
;
htmlString
+=
"
</div>
"
;
//ending the progress area
htmlString
+=
"
</div>
"
;
});
//append the html
$
(
'
#lecturer_video_column
'
).
append
(
htmlString
);
}
//to handle the 'integrate' play button
$
(
'
#play_integrate_button
'
).
click
(
function
()
{
let
video
=
$
(
'
video
'
)[
0
];
let
video1
=
$
(
'
video
'
)[
1
];
let
test_video
=
document
.
getElementsByTagName
(
'
video
'
)[
0
];
let
play_class
=
'
btn btn-outline-danger play
'
;
let
pause_class
=
'
btn btn-outline-danger pause
'
;
let
count
=
0
;
let
count_lecturer
=
0
;
let
classes
=
$
(
this
).
attr
(
'
class
'
);
let
video_interval
=
setInterval
(()
=>
{
{
#
let
talking_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
#
}
{
#
let
phone_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
#
}
{
#
let
note_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
#
}
{
#
let
listening_number
=
Math
.
round
(
Math
.
random
()
*
100
,
0
);
#
}
//=====STUDENTS COLUMN=====
//get the relevant progress area
let
progress_area
=
"
progress_frame-
"
+
count
;
...
...
@@ -393,33 +430,49 @@
//increment the count
count
++
;
//setting the values
{
#
$
(
'
#looking_up_right_instant_perct
'
).
text
(
talking_number
+
'
%
'
);
#
}
{
#
$
(
'
#looking_up_left_instant_perct
'
).
text
(
phone_number
+
'
%
'
);
#
}
{
#
$
(
'
#looking_down_right_instant_perct
'
).
text
(
note_number
+
'
%
'
);
#
}
{
#
$
(
'
#looking_down_left_instant_perct
'
).
text
(
listening_number
+
'
%
'
);
#
}
{
#
$
(
'
#looking_front_instant_perct
'
).
text
(
listening_number
+
'
%
'
);
#
}
{
##
}
{
#
//setting the width#}
{
#
$
(
'
#talking_instant_value
'
).
width
(
talking_number
+
'
%
'
);
#
}
{
#
$
(
'
#phone_checking_instant_value
'
).
width
(
phone_number
+
'
%
'
);
#
}
{
#
$
(
'
#note_taking_instant_value
'
).
width
(
note_number
+
'
%
'
);
#
}
{
#
$
(
'
#listening_instant_value
'
).
width
(
listening_number
+
'
%
'
);
#
}
},
33
);
},
33
);
let
video_interval_lecturer
=
setInterval
(()
=>
{
//=====LECTURER COLUMN=====
//get the relevant progress area
let
progress_area_lecturer
=
"
progress_lecturer_frame-
"
+
count_lecturer
;
let
progress_area_id_lecturer
=
"
#
"
+
progress_area_lecturer
;
//find the corresponding progress area
let
progress_area_html_lecturer
=
document
.
getElementById
(
progress_area_lecturer
);
//display the retrieved progress area
$
(
progress_area_id_lecturer
).
attr
(
'
hidden
'
,
false
);
//replace the current progress area with the selected one
$
(
'
#lecturer_video_progress
'
).
html
(
progress_area_html_lecturer
);
//increment the count
count_lecturer
++
;
console
.
log
(
'
current frame (lecturer):
'
,
count_lecturer
);
},
lecturer_fps
);
//check for the current class
if
(
classes
===
play_class
)
{
$
(
this
).
text
(
'
Pause
'
);
$
(
this
).
attr
(
'
class
'
,
pause_class
);
video
.
play
();
video1
.
play
();
}
else
if
(
classes
===
pause_class
)
{
$
(
this
).
text
(
'
Play
'
);
$
(
this
).
attr
(
'
class
'
,
play_class
);
video
.
pause
();
video1
.
pause
();
}
//function to do when the video is paused
...
...
@@ -429,258 +482,16 @@
video
.
onended
=
function
(
e
)
{
//stop changing the activity values
clearInterval
(
video_interval
);
}
});
//declaring the variable for setInterval function
let
timeVar
=
null
;
//handling the play button
$
(
'
#play_pause_icon_activity
'
).
click
(
function
()
{
//defining the two possible classes
let
play_class
=
"
fas fa-play
"
;
let
pause_class
=
"
fas fa-pause
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
};
//handling the slider
let
slider
=
document
.
getElementById
(
"
myActivityRange
"
);
let
output
=
document
.
getElementById
(
"
demo
"
);
//when the button is playing
if
(
current_class
===
play_class
)
{
timeVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_
'
+
Number
(
value
);
//displaying the relevant image
$
(
'
#image_0
'
).
html
(
$
(
selectedImage
).
html
());
},
100
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
timeVar
);
//function to do when the video is ended
video1
.
onended
=
function
(
e
)
{
//stop changing the activity values
clearInterval
(
video_interval_lecturer
);
}
});
//handling the slider
let
slider
=
document
.
getElementById
(
"
myActivityRange
"
);
let
output
=
document
.
getElementById
(
"
demo
"
);
output
.
innerHTML
=
slider
.
value
;
slider
.
oninput
=
function
()
{
output
.
innerHTML
=
this
.
value
;
let
selectedImage
=
'
#image_
'
+
Number
(
this
.
value
);
//hide
{
#
$
(
'
#image_0
'
).
attr
(
'
hidden
'
,
true
);
#
}
$
(
'
#image_0
'
).
html
(
$
(
selectedImage
).
html
());
//setting the selected image
{
#
$
(
selectedImage
).
attr
(
'
hidden
'
,
false
);
#
}
};
$
(
document
).
on
(
'
click
'
,
'
.img-link
'
,
function
(
e
)
{
//removing previously displayed detections
$
(
'
.detections
'
).
remove
();
//removing the no-content message
$
(
'
#no_detection_message_content
'
).
hide
();
//appearing the loader
$
(
'
#detection_loader
'
).
attr
(
'
hidden
'
,
false
);
let
img_src_arr
=
e
.
target
.
src
.
split
(
'
/
'
);
let
len
=
img_src_arr
.
length
;
let
src
=
img_src_arr
[
len
-
1
];
let
frame_name_arr
=
src
.
split
(
'
.
'
);
let
frame_name
=
frame_name_arr
[
0
];
//fetching the detection for the selected frame
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=
'
+
global_video_name
+
"
&frame_name=
"
+
frame_name
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
displayDetections
(
out
.
detections
,
frame_name
))
.
catch
((
error
)
=>
alert
(
'
this is an error
'
));
});
//the function to display detections
function
displayDetections
(
detections
,
frame_name
)
{
let
img_string
=
''
;
let
no_of_detections
=
detections
.
length
;
//disabling the loader
$
(
'
#detection_loader
'
).
attr
(
'
hidden
'
,
true
);
//appearing the no of detections number area
$
(
'
#detection_number_area
'
).
attr
(
'
hidden
'
,
false
);
$
(
'
#no_of_detections
'
).
text
(
no_of_detections
);
detections
.
map
((
detection
)
=>
{
img_string
+=
"
<img src='{% static '' %}FirstApp/activity/
"
+
global_video_name
+
"
/
"
+
frame_name
+
"
/
"
+
detection
+
"
' class='detections m-2' width='100' height='100' >
"
});
$
(
'
#detection_frames
'
).
prepend
(
img_string
);
}
//listening for click events in labels
$
(
'
.labels
'
).
click
(
function
()
{
let
label
=
Number
(
$
(
this
).
attr
(
'
data-number
'
));
//removing the previous student detection lists
$
(
'
.student_detection_lists
'
).
remove
();
//appearing the loader
$
(
'
#detection_student_loader
'
).
attr
(
'
hidden
'
,
false
);
//disappearing the no content message
$
(
'
#no_detection_student_content
'
).
attr
(
'
hidden
'
,
true
);
//fetching from the api
fetch
(
'
http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=
'
+
global_video_name
+
'
&label=
'
+
label
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
createDetectedStudentFrames
(
out
))
.
catch
((
error
)
=>
alert
(
'
this is the error:
'
+
error
))
});
//creating the detected students frames
function
createDetectedStudentFrames
(
detections
)
{
let
htmlString
=
""
;
//iterating through the student
detections
.
people
.
map
((
student
)
=>
{
let
title
=
student
.
split
(
'
.
'
)[
0
];
let
images
=
""
;
htmlString
+=
"
<div class='row p-3 student-detection-rows'>
"
;
let
student_count
=
0
;
//iterating through the frames
detections
.
response
.
map
((
frame
)
=>
{
let
frame_detections
=
frame
.
detections
;
if
(
frame_detections
.
includes
(
student
))
{
if
(
student_count
===
0
)
{
images
+=
"
<li class='list-group-item frame-0' id='image_0_
"
+
title
+
"
'>
"
;
}
else
{
images
+=
"
<li class='list-group-item other-student-frames' id='image_
"
+
student_count
+
"
_
"
+
title
+
"
' hidden>
"
;
}
images
+=
"
<img src='{% static '' %}FirstApp/Activity/
"
+
global_video_name
+
"
/
"
+
frame
.
frame
+
"
/
"
+
student
+
"
' width='200' height='200'>
"
;
images
+=
"
</li>
"
;
//increment the student count
student_count
++
;
}
});
htmlString
+=
"
<h6 class='font-italic'>
"
+
title
+
"
</h6>
"
;
htmlString
+=
"
<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>
"
;
htmlString
+=
images
;
htmlString
+=
"
</ul>
"
;
htmlString
+=
"
<div class='slidecontainer'>
"
;
htmlString
+=
"
<div class='row m-3'></div>
"
;
htmlString
+=
"
<div class='row'>
"
;
htmlString
+=
"
<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_
"
+
title
+
"
'></i></span>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
<input type='range' min='1' max='100' value='0' class='slider' id='slider_
"
+
title
+
"
'>
"
;
htmlString
+=
"
<p>No of frames: <span id='demo_
"
+
title
+
"
'></span></p>
"
;
htmlString
+=
"
</div>
"
;
htmlString
+=
"
</div>
"
;
});
//disappearing the loader
$
(
'
#detection_student_loader
'
).
attr
(
'
hidden
'
,
true
);
//append to the relevant html card content
$
(
'
#detection_students
'
).
append
(
htmlString
);
}
let
studentTimeVar
=
null
;
//playing the frames for each student detection
$
(
document
).
on
(
'
click
'
,
'
.play-pause-icon-student-frames
'
,
function
(
e
)
{
//defining the two possible classes
let
play_class
=
"
fas fa-play play-pause-icon-student-frames
"
;
let
pause_class
=
"
fas fa-pause play-pause-icon-student-frames
"
;
//retrieving the current icon class
let
current_class
=
$
(
this
).
attr
(
'
class
'
);
//assigning the correct class based on the icon clicked
let
new_class
=
(
current_class
===
play_class
)
?
pause_class
:
play_class
;
//setting the new class
$
(
this
).
attr
(
'
class
'
,
new_class
);
//extracting the title pf the clicked icon
let
title_part
=
$
(
this
).
attr
(
'
id
'
);
let
title
=
title_part
.
split
(
"
_
"
)[
1
];
//handling the slider
let
slider
=
document
.
getElementById
(
"
slider_
"
+
title
);
let
output
=
document
.
getElementById
(
"
demo_
"
+
title
);
//when the button is playing
if
(
current_class
===
play_class
)
{
studentTimeVar
=
setInterval
(()
=>
{
let
value
=
slider
.
value
;
let
new_slider_value
=
Number
(
value
)
+
1
;
slider
.
value
=
new_slider_value
;
output
.
innerHTML
=
new_slider_value
.
toString
();
let
selectedImage
=
'
#image_
'
+
Number
(
value
)
+
'
_
'
+
title
;
//displaying the relevant image
$
(
'
#image_0_
'
+
title
).
html
(
$
(
selectedImage
).
html
());
},
100
);
}
//when the button is paused
else
if
(
current_class
===
pause_class
)
{
clearInterval
(
studentTimeVar
);
}
})
});
...
...
@@ -854,85 +665,77 @@
alt=
"Loader"
>
</div>
<!--frames -->
.
<div
class=
"text-center p-4"
id=
"video_frames"
>
<!-- slide container -->
<div
id=
"slidecontainer"
hidden
>
<div
class=
"row m-3"
></div>
<!-- play/pause icon -->
<div
class=
"row"
>
<span><i
class=
"fas fa-play"
id=
"play_pause_icon_activity"
></i></span>
</div>
<input
type=
"range"
min=
"1"
max=
"100"
value=
"0"
class=
"slider"
id=
"myActivityRange"
>
<p>
No of frames:
<span
id=
"demo"
></span></p>
<!--this area will display the progress bars -->
<div
class=
"progress_area"
hidden
>
<!--Looking up and right -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"1"
>
<h4
class=
"small font-weight-bold"
>
Looking up and right
</h4>
</a>
<span
class=
"float-right"
id=
"looking_up_right_perct"
>
40%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"looking_up_right_width"
style=
"width: 20%"
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
</div>
</div>
<!--this area will display the progress bars -->
<div
class=
"progress_area"
hidden
>
<!--Looking up and right -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"1"
>
<h4
class=
"small font-weight-bold"
>
Looking up and right
</h4>
</a>
<span
class=
"float-right"
id=
"looking_up_right_perct"
>
40%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-danger"
role=
"progressbar"
id=
"looking_up_right_width"
style=
"width: 20%"
aria-valuenow=
"20"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--looking up and left -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"0"
>
<h4
class=
"small font-weight-bold"
>
Looking up and left
</h4>
</a>
<span
class=
"float-right"
id=
"looking_up_left_perct"
>
45%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"looking_up_left_width"
style=
"width: 40%"
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--looking up and left -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"0"
>
<h4
class=
"small font-weight-bold"
>
Looking up and left
</h4>
</a>
<span
class=
"float-right"
id=
"looking_up_left_perct"
>
45%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"looking_up_left_width"
style=
"width: 40%"
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--looking down and right -->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2"
>
<h4
class=
"small font-weight-bold"
>
Looking down and right
</h4>
</a>
<span
class=
"float-right"
id=
"looking_down_right_perct"
>
50%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"looking_down_right_width"
style=
"width: 60%"
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--looking down and right
-->
<a
href=
"#"
class=
"btn btn-link labels"
data-number=
"2
"
>
<h4
class=
"small font-weight-bold"
>
Looking down and righ
t
</h4>
</a>
<span
class=
"float-right"
id=
"looking_down_right_perct"
>
5
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar
"
role=
"progressbar"
id=
"looking_down_right_width
"
style=
"width: 60%
"
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--Looking down and left
-->
<a
href=
"#"
class=
"btn btn-link labels
"
>
<h4
class=
"small font-weight-bold"
>
Looking down and lef
t
</h4>
</a>
<span
class=
"float-right"
id=
"looking_down_left_perct"
>
6
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info
"
role=
"progressbar"
id=
"looking_down_left_width"
style=
"width: 80%
"
aria-valuenow=
"80"
aria-valuemin=
"0
"
aria-valuemax=
"100"
></div>
</div>
<!--Looking down and left-->
<a
href=
"#"
class=
"btn btn-link labels"
>
<h4
class=
"small font-weight-bold"
>
Looking down and left
</h4>
</a>
<span
class=
"float-right"
id=
"looking_down_left_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"looking_down_left_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--Looking front-->
<a
href=
"#"
class=
"btn btn-link labels"
>
<h4
class=
"small font-weight-bold"
>
Looking Front
</h4>
</a>
<span
class=
"float-right"
id=
"looking_front_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-gradient-dark"
role=
"progressbar"
id=
"looking_front_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--Looking front-->
<a
href=
"#"
class=
"btn btn-link labels"
>
<h4
class=
"small font-weight-bold"
>
Looking Front
</h4>
</a>
<span
class=
"float-right"
id=
"looking_front_perct"
>
60%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"looking_front_width"
style=
"width: 80%"
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!-- end of progress area -->
</div>
<!-- end of progress area -->
</div>
...
...
@@ -1007,10 +810,11 @@
<!--button -->
<div
class=
"text-right m-4"
>
<button
type=
"button"
class=
"btn btn-outline-success"
id=
"integrate_
activity
"
>
<button
type=
"button"
class=
"btn btn-outline-success"
id=
"integrate_
gaze
"
>
Process
</button>
</div>
</div>
</div>
</div>
...
...
@@ -1164,7 +968,8 @@
</a>
<span
class=
"float-right"
id=
"looking_down_right_instant_perct"
>
50%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"looking_down_right_instant_width"
<div
class=
"progress-bar"
role=
"progressbar"
id=
"looking_down_right_instant_width"
style=
"width: 60%"
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
...
...
@@ -1196,30 +1001,76 @@
</div>
<!--end of 1st column -->
<!--2nd column -->
<div
class=
"col-md-6"
>
<!--
2nd column -->
<div
class=
"col-md-6"
id=
"lecturer_video_column"
>
<div
class=
"text-center"
>
<span
class=
"h3 font-italic font-weight-bold"
>
Lecturer Performance
</span>
</div>
<!--display lecture video -->
<div
class=
"text-center m-3"
id=
"lecturer_video_section"
>
<!--temporary text -->
<div
class=
"text-center"
id=
"temp_lecturer_text"
>
<span
class=
"font-italic"
>
No video was found
</span>
<!--display lecturer video -->
<div
class=
"text-center m-3"
id=
"lecturer_video_section"
>
<video
width=
"500"
height=
"300"
id=
"lecturer_video"
controls
>
<source
src=
"#"
type=
"video/mp4"
>
Your browser does not support the video tag.
</video>
</div>
<!--end of lecturer video section -->
<!-- ajax loader section -->
<div
class=
"text-center mt-3"
id=
"lecturer_video_progress_loader"
>
<img
src=
"{% static 'FirstApp/images/ajax-loader-1.gif' %}"
alt=
"loader"
>
</div>
{#
<video
width=
"500"
height=
"300"
id=
"lecturer_video"
controls
>
#}
{#
<source
src=
"#"
#
}
{
#
type=
"video/mp4"
>
#}
{# Your browser does not support the video tag.#}
{#
</video>
#}
<!--progress bar section -->
<div
class=
"progress_area"
id=
"lecturer_video_progress"
hidden
>
<!--sitting -->
<h4
class=
"small font-weight-bold"
>
Sitting
</h4>
<span
class=
"float-right"
id=
"sitting_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-warning"
role=
"progressbar"
id=
"sitting_instant_value"
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"40"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--standing -->
<h4
class=
"small font-weight-bold"
>
Standing
</h4>
<span
class=
"float-right"
id=
"standing_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar"
role=
"progressbar"
id=
"standing_instant_value"
{
#
style=
"width: 0%"
#
}
aria-valuenow=
"60"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
<!--walking-->
<h4
class=
"small font-weight-bold"
>
Walking
</h4>
<span
class=
"float-right"
id=
"walking_instant"
>
0%
</span>
<div
class=
"progress mb-4"
>
<div
class=
"progress-bar bg-info"
role=
"progressbar"
id=
"walking_instant_value"
{
#
style=
"width: 80%"
#
}
aria-valuenow=
"80"
aria-valuemin=
"0"
aria-valuemax=
"100"
></div>
</div>
</div>
<!--end of progress bar section -->
</div>
<!--end of lecture video section -->
</div>
<!--end of 2nd column -->
</div>
<!--end of 1st row -->
...
...
FirstApp/templates/FirstApp/template.html
View file @
d8f6824a
...
...
@@ -41,11 +41,18 @@
<ul
class=
"navbar-nav bg-gradient-primary sidebar sidebar-dark accordion"
id=
"accordionSidebar"
>
<!-- Sidebar - Brand -->
<a
class=
"sidebar-brand d-flex align-items-center justify-content-center"
href=
"
index.html
"
>
<a
class=
"sidebar-brand d-flex align-items-center justify-content-center"
href=
"
/
"
>
<div
class=
"sidebar-brand-icon rotate-n-15"
>
<i
class=
"fas fa-laugh-wink"
></i>
</div>
{% if request.session.user_type == "Lecturer" %}
<div
class=
"sidebar-brand-text mx-3"
>
SLPES Lecturer
</div>
{% endif %}
{% if request.session.user_type == "Admin" %}
<div
class=
"sidebar-brand-text mx-3"
>
SLPES Admin
</div>
{% endif %}
</a>
<!-- Divider -->
...
...
@@ -66,6 +73,8 @@
Interface
</div>
{% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu -->
<li
class=
"nav-item"
>
<a
class=
"nav-link collapsed"
href=
"#"
data-toggle=
"collapse"
data-target=
"#collapseTwo"
aria-expanded=
"true"
aria-controls=
"collapseTwo"
>
...
...
@@ -83,6 +92,7 @@
</div>
</li>
<!-- Nav Item - Pages Collapse Menu -->
<li
class=
"nav-item"
>
<a
class=
"nav-link collapsed"
href=
"#"
data-toggle=
"collapse"
data-target=
"#collapseThree"
aria-expanded=
"true"
aria-controls=
"collapseThree"
>
...
...
@@ -97,6 +107,8 @@
</div>
</li>
<li
class=
"nav-item"
>
<a
class=
"nav-link collapsed"
href=
"#"
data-toggle=
"collapse"
data-target=
"#collapseFour"
aria-expanded=
"true"
aria-controls=
"collapseThree"
>
<i
class=
"fas fa-fw fa-cog"
></i>
...
...
@@ -127,6 +139,28 @@
</div>
</li>
{% endif %}
{% if request.session.user_type == "Admin" %}
<!-- Nav Item - Pages Collapse Menu -->
<li
class=
"nav-item"
>
<a
class=
"nav-link collapsed"
href=
"#"
data-toggle=
"collapse"
data-target=
"#collapsePages"
aria-expanded=
"true"
aria-controls=
"collapsePages"
>
<i
class=
"fas fa-fw fa-folder"
></i>
<span>
Pages
</span>
</a>
<div
id=
"collapsePages"
class=
"collapse"
aria-labelledby=
"headingPages"
data-parent=
"#accordionSidebar"
>
<div
class=
"bg-white py-2 collapse-inner rounded"
>
<!-- <h6 class="collapse-header">Login Screens:</h6>-->
<a
class=
"collapse-item"
href=
"/lecturer"
>
Dashboard
</a>
<a
class=
"collapse-item"
href=
"/lecturer/lecture-video"
>
Video Page
</a>
</div>
</div>
</li>
{% endif %}
<!-- Divider -->
<hr
class=
"sidebar-divider"
>
...
...
@@ -178,6 +212,8 @@
</div>
</ul>
<!-- End of Sidebar -->
<div
id=
"content-wrapper"
class=
"d-flex flex-column"
>
...
...
FirstApp/templates/FirstApp/user_direct.html
0 → 100644
View file @
d8f6824a
{% load static %}
<!DOCTYPE html>
<html
lang=
"en"
>
<head>
<meta
charset=
"utf-8"
>
<meta
http-equiv=
"X-UA-Compatible"
content=
"IE=edge"
>
<meta
name=
"viewport"
content=
"width=device-width, initial-scale=1, shrink-to-fit=no"
>
<meta
name=
"description"
content=
""
>
<meta
name=
"author"
content=
""
>
<title>
SLPES
</title>
<!-- Custom fonts for this template-->
<link
href=
"{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}"
rel=
"stylesheet"
type=
"text/css"
>
<link
href=
"https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel=
"stylesheet"
>
<!-- Custom styles for this template-->
<link
href=
"{% static 'FirstApp/css/sb-admin-2.min.css' %}"
rel=
"stylesheet"
>
</head>
<body
class=
"bg-gradient-primary"
>
<div
class=
"container"
>
<!-- Outer Row -->
<div
class=
"row justify-content-center"
>
<div
class=
"col-xl-10 col-lg-12 col-md-9"
>
<div
class=
"card o-hidden border-0 shadow-lg my-5"
>
<div
class=
"card-body p-0"
>
<!-- Nested Row within Card Body -->
<div
class=
"row"
>
<div
class=
"col-lg-6 d-none d-lg-block"
>
<img
src=
"{% static 'FirstApp/images/user_redirect.png' %}"
width=
"400"
height=
"500"
alt=
"No image"
>
</div>
<div
class=
"col-lg-6"
>
<div
class=
"p-5"
>
<div
class=
"text-center"
>
<h1
class=
"h4 text-gray-900 mb-4"
>
Select the user type
</h1>
</div>
<!--form -->
<form
action=
"/process-user-redirect"
method=
"POST"
name=
"loginForm"
class=
"user"
>
{% csrf_token %}
<div
class=
"form-check mx-3"
>
<input
class=
"form-check-input"
type=
"radio"
name=
"user_type"
id=
"admin"
value=
"admin"
checked
>
<label
class=
"form-check-label"
for=
"admin"
>
Admin
</label>
</div>
<div
style=
"padding-top: 20px"
>
<div
class=
"form-check mx-3"
>
<input
class=
"form-check-input"
type=
"radio"
name=
"user_type"
id=
"lecturer"
value=
"lecturer"
>
<label
class=
"form-check-label"
for=
"lecturer"
>
Lecturer
</label>
</div>
<div
style=
"padding-top: 20px"
>
<button
type=
"submit"
class=
"btn btn-primary btn-user btn-block"
>
Proceed
</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script
src=
"{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"
></script>
<script
src=
"{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"
></script>
<!-- Core plugin JavaScript-->
<script
src=
"{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"
></script>
<!-- Custom scripts for all pages-->
<script
src=
"{% static 'FirstApp/js/sb-admin-2.min.js' %}"
></script>
</body>
</html>
FirstApp/urls.py
View file @
d8f6824a
...
...
@@ -14,6 +14,7 @@ urlpatterns = [
path
(
'logout'
,
views
.
logoutView
),
path
(
'register-user'
,
views
.
register
),
path
(
'404'
,
views
.
view404
),
path
(
'401'
,
views
.
view401
),
path
(
'500'
,
views
.
view500
),
path
(
'blank'
,
views
.
blank
),
path
(
'gaze'
,
views
.
gaze
),
...
...
@@ -32,10 +33,20 @@ urlpatterns = [
# video results
path
(
'video_result'
,
views
.
video_result
),
# this is used
for
login
# this is used
to process
login
path
(
'process-login'
,
views
.
loggedInView
),
# this is used for login
# this is used to process admin login
path
(
'process-admin-login'
,
views
.
processAdminLogin
),
# this is used for user-redirect processing
path
(
'process-user-redirect'
,
views
.
processUserRedirect
),
# this is used for admin login page
path
(
'admin-login'
,
views
.
adminLogin
),
# this is used for activity
path
(
'activity'
,
views
.
activity
),
# tables view
...
...
@@ -44,6 +55,10 @@ urlpatterns = [
# test view (delete later)
path
(
'test'
,
views
.
test
),
# user direct view
path
(
'user-direct'
,
views
.
userDirect
),
url
(
r'^register'
,
views
.
RegisterViewSet
),
# re_path('video/?video_name<str:video_name>', views.video),
url
(
r'^teachers/'
,
views
.
teachersList
.
as_view
()),
...
...
@@ -140,6 +155,7 @@ urlpatterns = [
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url
(
r'^get-lecture-emotion-for-frame/$'
,
api
.
GetLectureEmotionRecognitionsForFrames
.
as_view
()),
###### POSE Section #####
# lecture video API (for Pose estimation)
url
(
r'^get-lecture-video-for-pose/$'
,
api
.
GetLectureVideoForPose
.
as_view
()),
...
...
@@ -187,6 +203,21 @@ urlpatterns = [
# retrieves lecture activity summary
url
(
r'^get-lecture-gaze-summary/$'
,
api
.
GetLectureGazeSummary
.
as_view
()),
# retrieves lecture activity summary
url
(
r'^get-activity-correlations/$'
,
api
.
GetLectureActivityCorrelations
.
as_view
()),
# retrieves lecture activity summary
url
(
r'^get-emotion-correlations/$'
,
api
.
GetLectureEmotionCorrelations
.
as_view
()),
# retrieves lecture activity summary
url
(
r'^get-gaze-correlations/$'
,
api
.
GetLectureGazeCorrelations
.
as_view
()),
##### OTHERS #####
# retrieves lecture recorded video name
url
(
r'^get-lecture-recorded-video-name/$'
,
api
.
GetLecturerRecordedVideo
.
as_view
()),
# routers
# path('', include(router.urls)),
...
...
FirstApp/views.py
View file @
d8f6824a
...
...
@@ -109,100 +109,117 @@ class LectureViewSet(APIView):
####### VIEWS ######
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
hello
(
request
):
username
=
request
.
user
.
username
# retrieve the lecturer
lecturer
=
request
.
session
[
'lecturer'
]
# retrieve the lecturer's timetable slots
lecturer_timetable
=
FacultyTimetable
.
objects
.
filter
()
# serialize the timetable
lecturer_timetable_serialized
=
FacultyTimetableSerializer
(
lecturer_timetable
,
many
=
True
)
lecturer_details
=
[]
# loop through the serialized timetable
for
timetable
in
lecturer_timetable_serialized
.
data
:
# retrieve daily timetable
daily_timetable
=
timetable
[
'timetable'
]
# loop through the daily timetable
for
day_timetable
in
daily_timetable
:
date
=
''
lecture_index
=
0
try
:
username
=
request
.
user
.
username
# retrieve the lecturer
lecturer
=
request
.
session
[
'lecturer'
]
# loop through each timeslots
for
slots
in
day_timetable
:
user_type
=
request
.
session
[
'user_type'
]
if
slots
==
"date"
:
date
=
day_timetable
[
slots
]
print
(
'user_type: '
,
user_type
)
elif
slots
==
"time_slots"
:
slot
=
day_timetable
[
slots
]
# retrieve the lecturer's timetable slots
lecturer_timetable
=
FacultyTimetable
.
objects
.
filter
()
# loop through each slot
for
lecture
in
slot
:
# serialize the timetable
lecturer_timetable_serialized
=
FacultyTimetableSerializer
(
lecturer_timetable
,
many
=
True
)
# check whether the lecturer is the current lecturer
if
lecturer
==
lecture
[
'lecturer'
][
'id'
]:
lecturer_lecture_details
=
{}
lecturer_lecture_details
[
'date'
]
=
date
lecturer_lecture_details
[
'start_time'
]
=
lecture
[
'start_time'
]
lecturer_lecture_details
[
'end_time'
]
=
lecture
[
'end_time'
]
lecturer_lecture_details
[
'subject_name'
]
=
lecture
[
'subject'
][
'name'
]
lecturer_lecture_details
[
'index'
]
=
lecture_index
lecturer_lecture_details
[
'lecturer'
]
=
lecture
[
'lecturer'
][
'id'
]
# append to the lecturer_details
lecturer_details
.
append
(
lecturer_lecture_details
)
lecturer_details
=
[]
# increment the index
lecture_index
+=
1
# loop through the serialized timetable
for
timetable
in
lecturer_timetable_serialized
.
data
:
# sorting the dates in lecturer_details list
# for details in lecturer_details:
lecturer_details
.
sort
(
key
=
lambda
date
:
datetime
.
strptime
(
str
(
date
[
'date'
]),
"
%
Y-
%
m-
%
d"
),
reverse
=
True
)
# retrieve daily timetable
daily_timetable
=
timetable
[
'timetable'
]
# loop through the daily timetable
for
day_timetable
in
daily_timetable
:
date
=
''
lecture_index
=
0
# loop through each timeslots
for
slots
in
day_timetable
:
if
slots
==
"date"
:
date
=
day_timetable
[
slots
]
elif
slots
==
"time_slots"
:
slot
=
day_timetable
[
slots
]
# loop through each slot
for
lecture
in
slot
:
# check whether the lecturer is the current lecturer
if
lecturer
==
lecture
[
'lecturer'
][
'id'
]:
lecturer_lecture_details
=
{}
lecturer_lecture_details
[
'date'
]
=
date
lecturer_lecture_details
[
'start_time'
]
=
lecture
[
'start_time'
]
lecturer_lecture_details
[
'end_time'
]
=
lecture
[
'end_time'
]
lecturer_lecture_details
[
'subject_name'
]
=
lecture
[
'subject'
][
'name'
]
lecturer_lecture_details
[
'index'
]
=
lecture_index
lecturer_lecture_details
[
'lecturer'
]
=
lecture
[
'lecturer'
][
'id'
]
# append to the lecturer_details
lecturer_details
.
append
(
lecturer_lecture_details
)
# increment the index
lecture_index
+=
1
# sorting the dates in lecturer_details list
# for details in lecturer_details:
lecturer_details
.
sort
(
key
=
lambda
date
:
datetime
.
strptime
(
str
(
date
[
'date'
]),
"
%
Y-
%
m-
%
d"
),
reverse
=
True
)
obj
=
{
'Message'
:
'Student and Lecturer Performance Enhancement System'
,
'username'
:
username
}
folder
=
os
.
path
.
join
(
BASE_DIR
,
os
.
path
.
join
(
'static
\\
FirstApp
\\
videos'
))
videoPaths
=
[
os
.
path
.
join
(
folder
,
file
)
for
file
in
os
.
listdir
(
folder
)]
videos
=
[]
durations
=
[]
#
# for videoPath in videoPaths:
# video = Video()
# cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# duration = int(frame_count / fps)
# durations.append(duration)
# videoName = os.path.basename(videoPath)
# # videoName = videos.append(os.path.basename(videoPath))
# durationObj = datetime.timedelta(seconds=duration)
# video.path = videoPath
# video.name = videoName
# video.duration = str(durationObj)
# videos.append(video)
context
=
{
'object'
:
obj
,
'Videos'
:
videos
,
'durations'
:
durations
,
'template_name'
:
'FirstApp/template.html'
,
'lecturer_details'
:
lecturer_details
,
"lecturer"
:
lecturer
}
return
render
(
request
,
'FirstApp/Home.html'
,
context
)
# in case of keyerror exception
except
KeyError
as
exc
:
return
redirect
(
'/401'
)
obj
=
{
'Message'
:
'Student and Lecturer Performance Enhancement System'
,
'username'
:
username
}
folder
=
os
.
path
.
join
(
BASE_DIR
,
os
.
path
.
join
(
'static
\\
FirstApp
\\
videos'
))
videoPaths
=
[
os
.
path
.
join
(
folder
,
file
)
for
file
in
os
.
listdir
(
folder
)]
videos
=
[]
durations
=
[]
#
# for videoPath in videoPaths:
# video = Video()
# cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# duration = int(frame_count / fps)
# durations.append(duration)
# videoName = os.path.basename(videoPath)
# # videoName = videos.append(os.path.basename(videoPath))
# durationObj = datetime.timedelta(seconds=duration)
# video.path = videoPath
# video.name = videoName
# video.duration = str(durationObj)
# videos.append(video)
context
=
{
'object'
:
obj
,
'Videos'
:
videos
,
'durations'
:
durations
,
'template_name'
:
'FirstApp/template.html'
,
'lecturer_details'
:
lecturer_details
,
"lecturer"
:
lecturer
}
return
render
(
request
,
'FirstApp/Home.html'
,
context
)
except
Exception
as
exc
:
return
redirect
(
'/500'
)
# this method will handle 404 error page
def
view404
(
request
):
return
render
(
request
,
'FirstApp/404.html'
)
# this page will handle 401 error page
def
view401
(
request
):
return
render
(
request
,
'FirstApp/401.html'
)
# querying the database
def
blank
(
request
):
emotions
=
LectureEmotionReport
.
objects
.
all
()
.
order_by
(
'lecture_id'
)
return
render
(
request
,
'FirstApp/blank.html'
,
{
'details'
:
emotions
})
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
gaze
(
request
):
try
:
...
...
@@ -221,6 +238,11 @@ def gaze(request):
subject_list
.
append
(
subject_serialized
.
data
)
# handling the keyError
except
KeyError
as
exc
:
return
redirect
(
'/401'
)
# handling the general exceptions
except
Exception
as
exc
:
return
redirect
(
'/500'
)
...
...
@@ -240,7 +262,7 @@ def processGaze(request):
# the corresponding view for pose estimation
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
pose
(
request
):
try
:
...
...
@@ -295,7 +317,7 @@ def webcam(request):
return
redirect
(
'/'
)
# to process video for emotion detection
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
video
(
request
):
title
=
'Student and Lecturer Performance Enhancement System'
video_name
=
request
.
GET
.
get
(
'video_name'
)
...
...
@@ -310,7 +332,7 @@ def video(request):
# extractor view
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
extractor
(
request
):
folder
=
os
.
path
.
join
(
BASE_DIR
,
os
.
path
.
join
(
'static
\\
FirstApp
\\
videos'
))
videoPaths
=
[
os
.
path
.
join
(
folder
,
file
)
for
file
in
os
.
listdir
(
folder
)]
...
...
@@ -358,7 +380,7 @@ def child(request):
return
render
(
request
,
'FirstApp/child.html'
,
{
'template_name'
:
'FirstApp/base.html'
})
# displaying video results
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
video_result
(
request
):
try
:
...
...
@@ -434,7 +456,11 @@ def video_result(request):
# append to the list
due_lecture_list
.
append
(
obj
)
# handling the keyError
except
KeyError
as
exc
:
return
redirect
(
'/401'
)
# handling the general exceptions
except
Exception
as
exc
:
print
(
'what is wrong?: '
,
exc
)
return
redirect
(
'/500'
)
...
...
@@ -444,7 +470,7 @@ def video_result(request):
# view for emotion page
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
emotion_view
(
request
):
try
:
...
...
@@ -463,6 +489,11 @@ def emotion_view(request):
subject_list
.
append
(
subject_serialized
.
data
)
# handling the keyError
except
KeyError
as
exc
:
return
redirect
(
'/401'
)
# handling the general exceptions
except
Exception
as
exc
:
return
redirect
(
'/500'
)
...
...
@@ -490,6 +521,7 @@ def loggedInView(request):
login
(
request
,
user
)
# setting up the session
request
.
session
[
'lecturer'
]
=
lecturer
.
id
request
.
session
[
'user_type'
]
=
"Lecturer"
return
redirect
(
'/'
)
...
...
@@ -506,7 +538,7 @@ def logoutView(request):
logout
(
request
)
return
redirect
(
'/
login
'
)
return
redirect
(
'/
user-direct
'
)
# 500 error page
...
...
@@ -519,7 +551,7 @@ def tables(request):
return
render
(
request
,
"FirstApp/tables.html"
)
@
login_required
(
login_url
=
'/
login
'
)
@
login_required
(
login_url
=
'/
user-direct
'
)
def
activity
(
request
):
try
:
...
...
@@ -538,6 +570,11 @@ def activity(request):
subject_list
.
append
(
subject_serialized
.
data
)
# handling the keyError
except
KeyError
as
exc
:
return
redirect
(
'/401'
)
# handling the general exception
except
Exception
as
exc
:
return
redirect
(
'/500'
)
...
...
@@ -545,4 +582,61 @@ def activity(request):
def
test
(
request
):
return
render
(
request
,
"FirstApp/pdf_template.html"
)
\ No newline at end of file
return
render
(
request
,
"FirstApp/pdf_template.html"
)
# this method will handle user directing function
def
userDirect
(
request
):
return
render
(
request
,
"FirstApp/user_direct.html"
)
# this method will handle user redirection process
def
processUserRedirect
(
request
):
if
request
.
POST
:
user_type
=
request
.
POST
.
get
(
'user_type'
)
if
user_type
==
'admin'
:
return
redirect
(
'/admin-login'
)
elif
user_type
==
'lecturer'
:
return
redirect
(
'/login'
)
return
redirect
(
'/500'
)
# admin login page
def
adminLogin
(
request
):
return
render
(
request
,
"FirstApp/admin_login.html"
)
# this method will process admin login
def
processAdminLogin
(
request
):
username
=
"not logged in"
message
=
"Invalid Username or Password"
adminLoginForm
=
AdminLoginForm
(
request
.
POST
)
print
(
'message: '
,
message
)
try
:
# if the details are valid, let the user log in
if
adminLoginForm
.
is_valid
():
email
=
adminLoginForm
.
cleaned_data
.
get
(
'email'
)
user
=
User
.
objects
.
get
(
email
=
email
)
admin
=
Admin
.
objects
.
get
(
email
=
email
)
login
(
request
,
user
)
# setting up the session
request
.
session
[
'admin'
]
=
admin
.
id
request
.
session
[
'user_type'
]
=
"Admin"
return
redirect
(
'/lecturer'
)
else
:
message
=
"Please provide correct credntials"
except
Exception
as
exc
:
print
(
'exception: '
,
exc
)
return
render
(
request
,
'FirstApp/admin_login.html'
,
{
'message'
:
message
})
\ No newline at end of file
MonitorLecturerApp/api.py
View file @
d8f6824a
...
...
@@ -210,3 +210,44 @@ class LecturerAudioSummaryPeriodAPI(APIView):
})
# this section is for student and lecturer behavior integration
class
StudentLecturerIntegratedAPI
(
APIView
):
def
get
(
self
,
request
):
video_name
=
request
.
query_params
.
get
(
'video_name'
)
# finding the existence of Lecture activity frame recognition record
isExist
=
LecturerActivityFrameRecognitions
.
objects
.
filter
(
lecturer_meta_id__lecturer_video_id__lecture_video_name
=
video_name
)
.
exists
()
if
(
isExist
):
lecture_activity_frame_recognitions
=
LecturerActivityFrameRecognitions
.
objects
.
filter
(
lecturer_meta_id__lecturer_video_id__lecture_video_name
=
video_name
)
lecture_activity_frame_recognitions_ser
=
LecturerActivityFrameRecognitionsSerializer
(
lecture_activity_frame_recognitions
,
many
=
True
)
lecture_activity_frame_recognitions_data
=
lecture_activity_frame_recognitions_ser
.
data
[
0
]
frame_detections
=
lecture_activity_frame_recognitions_data
[
'frame_recognition_details'
]
fps
=
lecture_activity_frame_recognitions_data
[
'fps'
]
int_fps
=
int
(
fps
)
return
Response
({
"frame_recognitions"
:
frame_detections
,
"fps"
:
fps
})
else
:
# frame_recognitions = classroom_activity.get_lecturer_activity_for_frames(video_name)
frame_recognitions
,
fps
=
classroom_activity
.
save_frame_recognition
(
video_name
)
int_fps
=
int
(
fps
)
# print('frame recognitions: ', frame_recognitions)
return
Response
({
"frame_recognitions"
:
frame_recognitions
,
"fps"
:
fps
})
MonitorLecturerApp/logic/classroom_activity.py
View file @
d8f6824a
...
...
@@ -5,6 +5,13 @@ import numpy as np
import
cv2
import
os
from
FirstApp.logic.custom_sorter
import
custom_object_sorter
from
FirstApp.logic.id_generator
import
generate_new_id
from
MonitorLecturerApp.models
import
LecturerVideoMetaData
,
LecturerActivityFrameRecognitions
,
\
LecturerActivityFrameRecognitionDetails
from
MonitorLecturerApp.serializers
import
LecturerVideoMetaDataSerializer
def
activity_recognition
(
video_name
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"MonitorLecturerApp
\\
models"
)
...
...
@@ -108,3 +115,162 @@ def activity_recognition(video_name):
# this method will calculated lecturer activity for frames
def
get_lecturer_activity_for_frames
(
video_name
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
lecturer_videos
\\
{}"
.
format
(
video_name
))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"MonitorLecturerApp
\\
models"
)
CLASSIFIER_PATH
=
os
.
path
.
join
(
CLASSIFIER_DIR
,
"keras_model_updated.h5"
)
# load our serialized persosn detection model from disk
print
(
"[INFO] loading model..."
)
np
.
set_printoptions
(
suppress
=
True
)
class_labels
=
[
'Seated Teaching'
,
'Teaching by Standing'
,
'Teaching by Walking'
]
model
=
tensorflow
.
keras
.
models
.
load_model
(
CLASSIFIER_PATH
)
model
.
compile
(
optimizer
=
'adam'
,
loss
=
tf
.
keras
.
losses
.
SparseCategoricalCrossentropy
(
from_logits
=
True
),
metrics
=
[
'accuracy'
])
data
=
np
.
ndarray
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
np
.
float32
)
size
=
(
224
,
224
)
# iteration
video
=
cv2
.
VideoCapture
(
VIDEO_DIR
)
no_of_frames
=
video
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
)
fps
=
video
.
get
(
cv2
.
CAP_PROP_FPS
)
print
(
'fps: '
,
fps
)
frame_count
=
0
# frame activity recognitions
frame_activity_recognitions
=
[]
# for testing purposes
print
(
'starting the frame activity recognition process'
)
# looping through the frames
while
(
frame_count
<
no_of_frames
):
# define the count variables for each frame
sitting_count
=
0
standing_count
=
0
walking_count
=
0
ret
,
image
=
video
.
read
()
# derive the frame name
frame_name
=
"frame-{}"
.
format
(
frame_count
)
frame_details
=
{}
frame_details
[
'frame_name'
]
=
frame_name
detection
=
cv2
.
resize
(
image
,
size
)
image_array
=
np
.
asarray
(
detection
)
normalized_image_array
=
(
image_array
.
astype
(
np
.
float32
)
/
127.0
)
-
1
# Load the image into the array
data
[
0
]
=
normalized_image_array
# run the inference
prediction
=
model
.
predict
(
data
)
label
=
class_labels
[
prediction
.
argmax
()]
# increment the relevant count, based on the label
if
(
label
==
class_labels
[
0
]):
sitting_count
+=
1
elif
(
label
==
class_labels
[
1
]):
standing_count
+=
1
elif
(
label
==
class_labels
[
2
]):
walking_count
+=
1
print
(
'current frame: '
,
frame_count
)
# increment frame count
frame_count
+=
1
# calculating the percentages for the frame
sitting_perct
=
float
(
sitting_count
)
*
100
standing_perct
=
float
(
standing_count
)
*
100
walking_perct
=
float
(
walking_count
)
*
100
# adding the percentage values to the frame details
frame_details
[
'sitting_perct'
]
=
sitting_perct
frame_details
[
'standing_perct'
]
=
standing_perct
frame_details
[
'walking_perct'
]
=
walking_perct
# push to all the frame details
frame_activity_recognitions
.
append
(
frame_details
)
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions
=
custom_object_sorter
(
frame_activity_recognitions
)
# for testing purposes
print
(
'ending the frame activity recognition process'
)
# return the detected frame percentages
return
sorted_activity_frame_recognitions
,
fps
# this section will handle saving activity entities to the database
def
save_frame_recognition
(
video_name
):
# for testing purposes
print
(
'starting the saving activity frame recognition process'
)
# retrieve the lecture activity id
lec_activity
=
LecturerVideoMetaData
.
objects
.
filter
(
lecturer_video_id__lecture_video_name
=
video_name
)
lec_activity_ser
=
LecturerVideoMetaDataSerializer
(
lec_activity
,
many
=
True
)
lec_activity_data
=
lec_activity_ser
.
data
[
0
]
lec_activity_id
=
lec_activity_data
[
'id'
]
# create a new lecture activity frame detections id
last_lec_activity_frame_recognitions
=
LecturerActivityFrameRecognitions
.
objects
.
order_by
(
'lecturer_activity_frame_recognition_id'
)
.
last
()
new_lecture_activity_frame_recognitions_id
=
"LLAFR00001"
if
(
last_lec_activity_frame_recognitions
is
None
)
else
\
generate_new_id
(
last_lec_activity_frame_recognitions
.
lecturer_activity_frame_recognition_id
)
# calculate the frame detections
frame_detections
,
fps
=
get_lecturer_activity_for_frames
(
video_name
)
frame_recognition_details
=
[]
# save the new lecture activity frame recognitions
for
detection
in
frame_detections
:
lec_activity_frame_recognition_details
=
LecturerActivityFrameRecognitionDetails
()
lec_activity_frame_recognition_details
.
frame_name
=
detection
[
'frame_name'
]
lec_activity_frame_recognition_details
.
sitting_perct
=
detection
[
'sitting_perct'
]
lec_activity_frame_recognition_details
.
standing_perct
=
detection
[
'standing_perct'
]
lec_activity_frame_recognition_details
.
walking_perct
=
detection
[
'walking_perct'
]
frame_recognition_details
.
append
(
lec_activity_frame_recognition_details
)
lec_activity_frame_recognitions
=
LecturerActivityFrameRecognitions
()
lec_activity_frame_recognitions
.
lecturer_activity_frame_recognition_id
=
new_lecture_activity_frame_recognitions_id
lec_activity_frame_recognitions
.
lecturer_meta_id_id
=
lec_activity_id
lec_activity_frame_recognitions
.
frame_recognition_details
=
frame_recognition_details
lec_activity_frame_recognitions
.
fps
=
float
(
fps
)
lec_activity_frame_recognitions
.
save
()
# for testing purposes
print
(
'ending the saving activity frame recognition process'
)
# now return the frame detections
return
frame_detections
,
fps
MonitorLecturerApp/migrations/0005_lectureractivityframerecognitions.py
0 → 100644
View file @
d8f6824a
# Generated by Django 2.2.11 on 2020-10-25 10:09
import
MonitorLecturerApp.models
from
django.db
import
migrations
,
models
import
django.db.models.deletion
import
djongo.models.fields
class
Migration
(
migrations
.
Migration
):
dependencies
=
[
(
'MonitorLecturerApp'
,
'0004_lecturervideometadata_lecturer_video_id'
),
]
operations
=
[
migrations
.
CreateModel
(
name
=
'LecturerActivityFrameRecognitions'
,
fields
=
[
(
'id'
,
models
.
AutoField
(
auto_created
=
True
,
primary_key
=
True
,
serialize
=
False
,
verbose_name
=
'ID'
)),
(
'lecturer_activity_frame_recognition_id'
,
models
.
CharField
(
max_length
=
15
)),
(
'frame_recognition_details'
,
djongo
.
models
.
fields
.
ArrayField
(
model_container
=
MonitorLecturerApp
.
models
.
LecturerActivityFrameRecognitionDetails
)),
(
'lecturer_meta_id'
,
models
.
ForeignKey
(
on_delete
=
django
.
db
.
models
.
deletion
.
CASCADE
,
to
=
'MonitorLecturerApp.LecturerVideoMetaData'
)),
],
),
]
MonitorLecturerApp/migrations/0006_lectureractivityframerecognitions_fps.py
0 → 100644
View file @
d8f6824a
# Generated by Django 2.2.11 on 2020-10-25 10:52
from
django.db
import
migrations
,
models
class
Migration
(
migrations
.
Migration
):
dependencies
=
[
(
'MonitorLecturerApp'
,
'0005_lectureractivityframerecognitions'
),
]
operations
=
[
migrations
.
AddField
(
model_name
=
'lectureractivityframerecognitions'
,
name
=
'fps'
,
field
=
models
.
FloatField
(
default
=
30.0
),
),
]
MonitorLecturerApp/models.py
View file @
d8f6824a
...
...
@@ -87,3 +87,27 @@ class LecturerAudioText (models.Model):
def
__str__
(
self
):
return
self
.
lecturer_audio_text_id
# this abstract class will contain lecture activity frame recognition details
class
LecturerActivityFrameRecognitionDetails
(
models
.
Model
):
frame_name
=
models
.
CharField
(
max_length
=
15
)
sitting_perct
=
models
.
FloatField
()
standing_perct
=
models
.
FloatField
()
walking_perct
=
models
.
FloatField
()
class
Meta
:
abstract
=
True
# this class will contain lecture activity frame recognitions
class
LecturerActivityFrameRecognitions
(
models
.
Model
):
lecturer_activity_frame_recognition_id
=
models
.
CharField
(
max_length
=
15
)
lecturer_meta_id
=
models
.
ForeignKey
(
LecturerVideoMetaData
,
on_delete
=
models
.
CASCADE
)
frame_recognition_details
=
models
.
ArrayField
(
LecturerActivityFrameRecognitionDetails
)
fps
=
models
.
FloatField
(
default
=
30.0
)
def
__str__
(
self
):
return
self
.
lecturer_activity_frame_recognition_id
MonitorLecturerApp/serializers.py
View file @
d8f6824a
...
...
@@ -2,7 +2,7 @@ from rest_framework import serializers
from
FirstApp.serializers
import
LecturerSerializer
,
SubjectSerializer
from
LectureSummarizingApp.models
import
LectureAudioSummary
from
.models
import
RegisterTeacher
from
.models
import
RegisterTeacher
,
LecturerActivityFrameRecognitions
from
.models
import
LecturerAudioText
,
LecturerVideoMetaData
,
LecturerVideo
,
LectureRecordedVideo
...
...
@@ -43,4 +43,36 @@ class LecturerVideoMetaDataSerializer(serializers.ModelSerializer):
class
Meta
:
model
=
LecturerVideoMetaData
fields
=
'__all__'
\ No newline at end of file
fields
=
'__all__'
# lecture activity frame recognition serializer
class
LecturerActivityFrameRecognitionsSerializer
(
serializers
.
ModelSerializer
):
lecturer_meta_id
=
LecturerVideoMetaDataSerializer
()
frame_recognition_details
=
serializers
.
SerializerMethodField
()
# this method will be used to serialize the 'frame_recogition_details' field
def
get_frame_recognition_details
(
self
,
obj
):
return_data
=
[]
for
frame_recognition
in
obj
.
frame_recognition_details
:
recognition
=
{}
recognition
[
"frame_name"
]
=
frame_recognition
.
frame_name
recognition
[
"sitting_perct"
]
=
frame_recognition
.
sitting_perct
recognition
[
"standing_perct"
]
=
frame_recognition
.
standing_perct
recognition
[
"walking_perct"
]
=
frame_recognition
.
walking_perct
return_data
.
append
(
recognition
)
# return the data
return
return_data
class
Meta
:
model
=
LecturerActivityFrameRecognitions
fields
=
'__all__'
MonitorLecturerApp/templates/MonitorLecturerApp/index.html
View file @
d8f6824a
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html
lang=
"en"
>
<head>
<meta
charset=
"utf-8"
>
<meta
http-equiv=
"X-UA-Compatible"
content=
"IE=edge"
>
<meta
name=
"viewport"
content=
"width=device-width, initial-scale=1, shrink-to-fit=no"
>
<meta
name=
"description"
content=
""
>
<meta
name=
"author"
content=
""
>
<title>
SLPES
</title>
{% load static %}
<!-- Custom fonts for this template-->
<link
rel=
"shortcut icon"
href=
"{% static 'FirstApp/images/favicon.ico' %}"
type=
"image/x-icon"
/>
<link
href=
"{% static 'FirstApp/css/all.min.css' %}"
rel=
"stylesheet"
type=
"text/css"
>
<link
href=
"https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel=
"stylesheet"
>
<!-- Custom styles for this template-->
<link
href=
"{% static 'FirstApp/css/sb-admin-2.min.css' %}"
rel=
"stylesheet"
>
</head>
<body
id=
"page-top"
>
{% block javascript %}
{% load static %}
<!-- Bootstrap core JavaScript-->
<script
src=
"{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"
></script>
<script
src=
"{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"
></script>
...
...
@@ -321,6 +302,8 @@
});
</script>
{% endblock %}
<!-- Page Wrapper -->
<div
id=
"wrapper"
>
...
...
@@ -353,23 +336,23 @@
<div
class=
"sidebar-heading"
>
</div>
<!-- Nav Item - Pages Collapse Menu -->
<li
class=
"nav-item"
>
<a
class=
"nav-link collapsed"
href=
"#"
data-toggle=
"collapse"
data-target=
"#collapsePages"
aria-expanded=
"true"
aria-controls=
"collapsePages"
>
<i
class=
"fas fa-fw fa-folder"
></i>
<span>
Pages
</span>
</a>
<div
id=
"collapsePages"
class=
"collapse"
aria-labelledby=
"headingPages"
data-parent=
"#accordionSidebar"
>
<div
class=
"bg-white py-2 collapse-inner rounded"
>
<!-- <h6 class="collapse-header">Login Screens:</h6>-->
<a
class=
"collapse-item"
href=
"index.html"
>
Dashboard
</a>
<a
class=
"collapse-item"
href=
"/lecturer/lecture-video"
>
Video Page
</a>
</div>
</div>
</li>
{##}
{#
<!-- Nav Item - Pages Collapse Menu -->
#}
{#
<li
class=
"nav-item"
>
#}
{#
<a
class=
"nav-link collapsed"
href=
"#"
data-toggle=
"collapse"
data-target=
"#collapsePages"
#
}
{
#
aria-expanded=
"true"
aria-controls=
"collapsePages"
>
#}
{#
<i
class=
"fas fa-fw fa-folder"
></i>
#}
{#
<span>
Pages
</span>
#}
{#
</a>
#}
{#
<div
id=
"collapsePages"
class=
"collapse"
aria-labelledby=
"headingPages"
data-parent=
"#accordionSidebar"
>
#}
{#
<div
class=
"bg-white py-2 collapse-inner rounded"
>
#}
{#
<!-- <h6 class="collapse-header">Login Screens:</h6>-->
#}
{#
<a
class=
"collapse-item"
href=
"index.html"
>
Dashboard
</a>
#}
{#
<a
class=
"collapse-item"
href=
"/lecturer/lecture-video"
>
Video Page
</a>
#}
{##}
{#
</div>
#}
{#
</div>
#}
{#
</li>
#}
<!-- Divider -->
<hr
class=
"sidebar-divider d-none d-md-block"
>
...
...
@@ -392,7 +375,8 @@
<!-- End of Topbar -->
{% block 'container-fluid' %}
{% load static %}
<!-- Begin Page Content -->
<div
class=
"container-fluid"
>
...
...
@@ -647,6 +631,7 @@
</div>
<!-- /.container-fluid -->
{% endblock %}
</div>
<!-- End of Main Content -->
...
...
@@ -667,6 +652,8 @@
</div>
<!-- End of Page Wrapper -->
{% block 'modal' %}
<!-- Scroll to Top Button-->
<a
class=
"scroll-to-top rounded"
href=
"#page-top"
>
<i
class=
"fas fa-angle-up"
></i>
...
...
@@ -686,7 +673,7 @@
<div
class=
"modal-body"
>
Select "Logout" below if you are ready to end your current session.
</div>
<div
class=
"modal-footer"
>
<button
class=
"btn btn-secondary"
type=
"button"
data-dismiss=
"modal"
>
Cancel
</button>
<a
class=
"btn btn-primary"
href=
"
login.html
"
>
Logout
</a>
<a
class=
"btn btn-primary"
href=
"
/logout
"
>
Logout
</a>
</div>
</div>
</div>
...
...
@@ -870,6 +857,9 @@
<script
src=
"{% static 'FirstApp/js/demo/chart-area-demo.js' %}"
></script>
<script
src=
"{% static 'FirstApp/js/demo/chart-pie-demo.js' %}"
></script>
{% endblock %}
</body>
</html>
MonitorLecturerApp/templates/MonitorLecturerApp/lecVideo.html
View file @
d8f6824a
{% extends '
MonitorLecturer
App/template.html' %}
{% extends '
First
App/template.html' %}
<!DOCTYPE html>
<html
lang=
"en"
>
<body
id=
"page-top"
>
...
...
@@ -154,6 +154,8 @@
<tbody>
{% for video in Videos %}
{# {% for video in lecturer_videos %} #}
<tr>
<td>
{{video.name}}
</td>
<td>
{{video.duration}}
</td>
...
...
MonitorLecturerApp/urls.py
View file @
d8f6824a
...
...
@@ -24,6 +24,9 @@ urlpatterns = [
path
(
'lecture-video'
,
views
.
lecVideo
),
# path('Video', views.hello)
# delete this path later
path
(
'test-frame-recognitions'
,
views
.
testFrameRecognitions
),
##### LECTURER ACTIVITY SECTION #####
# API to retrieve activity recognition
url
(
r'^activities/$'
,
api
.
ActivityRecognitionAPI
.
as_view
()),
...
...
@@ -31,6 +34,9 @@ urlpatterns = [
# API to retrieve lecturer video meta data results
url
(
r'^get-lecturer-video-results/$'
,
api
.
GetLectureVideoResultsAPI
.
as_view
()),
# API to retrieve lecturer video frame recognitions
url
(
r'^get-lecturer-video-frame-recognitions/$'
,
api
.
StudentLecturerIntegratedAPI
.
as_view
()),
##### END OF LECTURER ACTIVITY SECTION #####
...
...
MonitorLecturerApp/views.py
View file @
d8f6824a
from
django.shortcuts
import
render
from
django.shortcuts
import
render
,
redirect
from
django.http
import
HttpResponse
from
django.conf.urls
import
url
from
rest_framework
import
routers
...
...
@@ -43,68 +43,82 @@ def startup (request) :
def
hello
(
request
):
# page = '<h1>THIS IS MY HOME</h1>' + '<h2> Hello Ishan</h2>' + '<button>Click Me</button>'
obj
=
{
'Message'
:
'Student and Lecturer Performance Enhancement System'
}
folder
=
os
.
path
.
join
(
BASE_DIR
,
os
.
path
.
join
(
'static
\\
FirstApp
\\
lecturer_videos'
))
videoPaths
=
[
os
.
path
.
join
(
folder
,
file
)
for
file
in
os
.
listdir
(
folder
)]
videos
=
[]
durations
=
[]
# retrieve audio details from db
lecture_audio
=
LectureAudio
.
objects
.
all
()
lec_audio_serializer
=
LectureAudioSerializer
(
lecture_audio
,
many
=
True
)
lec_audio_data
=
lec_audio_serializer
.
data
try
:
lec_list
=
[
]
admin
=
request
.
session
[
'admin'
]
for
audio
in
lec_audio_data
:
lec_audio_object
=
{}
lec_audio_object
[
"id"
]
=
audio
[
"id"
]
lec_audio_object
[
"date"
]
=
audio
[
"lecturer_date"
]
lec_audio_object
[
"subject"
]
=
audio
[
"subject"
][
"name"
]
lec_audio_object
[
"lecturer"
]
=
audio
[
"lecturer"
][
"fname"
]
+
" "
+
audio
[
"lecturer"
][
"lname"
]
lec_audio_object
[
"lecturer_id"
]
=
audio
[
"lecturer"
][
"id"
]
obj
=
{
'Message'
:
'Student and Lecturer Performance Enhancement System'
}
folder
=
os
.
path
.
join
(
BASE_DIR
,
os
.
path
.
join
(
'static
\\
FirstApp
\\
lecturer_videos'
))
videoPaths
=
[
os
.
path
.
join
(
folder
,
file
)
for
file
in
os
.
listdir
(
folder
)]
videos
=
[]
durations
=
[]
# append to the list
lec_list
.
append
(
lec_audio_object
)
# retrieve audio details from db
lecture_audio
=
LectureAudio
.
objects
.
all
()
lec_audio_serializer
=
LectureAudioSerializer
(
lecture_audio
,
many
=
True
)
lec_audio_data
=
lec_audio_serializer
.
data
# the list needs to be sorted by the date
lec_list
.
sort
(
key
=
lambda
date
:
dt
.
strptime
(
str
(
date
[
'date'
]),
"
%
Y-
%
m-
%
d"
),
reverse
=
True
)
lec_list
=
[]
# retrieve exsiting lecture recorded videos
lec_video_meta
=
LecturerVideoMetaData
.
objects
.
all
()
lec_video_meta_ser
=
LecturerVideoMetaDataSerializer
(
lec_video_meta
,
many
=
True
)
lec_video_meta_data
=
lec_video_meta_ser
.
data
for
audio
in
lec_audio_data
:
lec_audio_object
=
{}
lec_audio_object
[
"id"
]
=
audio
[
"id"
]
lec_audio_object
[
"date"
]
=
audio
[
"lecturer_date"
]
lec_audio_object
[
"subject"
]
=
audio
[
"subject"
][
"name"
]
lec_audio_object
[
"lecturer"
]
=
audio
[
"lecturer"
][
"fname"
]
+
" "
+
audio
[
"lecturer"
][
"lname"
]
lec_audio_object
[
"lecturer_id"
]
=
audio
[
"lecturer"
][
"id"
]
for
videoPath
in
videoPaths
:
video
=
LecturerVideo
()
video
=
{}
cap
=
cv2
.
VideoCapture
(
videoPath
)
fps
=
cap
.
get
(
cv2
.
CAP_PROP_FPS
)
# OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count
=
int
(
cap
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
))
duration
=
int
(
frame_count
/
fps
)
durations
.
append
(
duration
)
videoName
=
os
.
path
.
basename
(
videoPath
)
# videoName = videos.append(os.path.basename(videoPath))
durationObj
=
datetime
.
timedelta
(
seconds
=
duration
)
video
[
'path'
]
=
videoPath
video
[
'name'
]
=
videoName
video
[
'duration'
]
=
str
(
durationObj
)
video
[
'video_id'
]
=
None
# append to the list
lec_list
.
append
(
lec_audio_object
)
#
checking whether this video already exists
for
recorded_lecture
in
lec_video_meta_data
:
#
the list needs to be sorted by the date
lec_list
.
sort
(
key
=
lambda
date
:
dt
.
strptime
(
str
(
date
[
'date'
]),
"
%
Y-
%
m-
%
d"
),
reverse
=
True
)
print
(
'recorded lecture: '
,
recorded_lecture
)
# retrieve exsiting lecture recorded videos
lec_video_meta
=
LecturerVideoMetaData
.
objects
.
all
()
lec_video_meta_ser
=
LecturerVideoMetaDataSerializer
(
lec_video_meta
,
many
=
True
)
lec_video_meta_data
=
lec_video_meta_ser
.
data
if
videoName
==
recorded_lecture
[
'lecturer_video_id'
][
'lecture_video_name'
]:
video
[
'isAvailable'
]
=
True
video
[
'video_id'
]
=
recorded_lecture
[
'lecturer_video_id'
][
'id'
]
for
videoPath
in
videoPaths
:
video
=
LecturerVideo
()
video
=
{}
cap
=
cv2
.
VideoCapture
(
videoPath
)
fps
=
cap
.
get
(
cv2
.
CAP_PROP_FPS
)
# OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count
=
int
(
cap
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
))
duration
=
int
(
frame_count
/
fps
)
durations
.
append
(
duration
)
videoName
=
os
.
path
.
basename
(
videoPath
)
# videoName = videos.append(os.path.basename(videoPath))
durationObj
=
datetime
.
timedelta
(
seconds
=
duration
)
video
[
'path'
]
=
videoPath
video
[
'name'
]
=
videoName
video
[
'duration'
]
=
str
(
durationObj
)
video
[
'video_id'
]
=
None
# checking whether this video already exists
for
recorded_lecture
in
lec_video_meta_data
:
videos
.
append
(
video
)
print
(
'Video Name: '
,
video
[
'name'
])
context
=
{
'object'
:
obj
,
'Videos'
:
videos
,
'durations'
:
durations
,
'template_name'
:
'MonitorLecturerApp/template.html'
,
'lec_list'
:
lec_list
}
return
render
(
request
,
'MonitorLecturerApp/index.html'
,
context
)
print
(
'recorded lecture: '
,
recorded_lecture
)
if
videoName
==
recorded_lecture
[
'lecturer_video_id'
][
'lecture_video_name'
]:
video
[
'isAvailable'
]
=
True
video
[
'video_id'
]
=
recorded_lecture
[
'lecturer_video_id'
][
'id'
]
videos
.
append
(
video
)
print
(
'Video Name: '
,
video
[
'name'
])
context
=
{
'object'
:
obj
,
'Videos'
:
videos
,
'durations'
:
durations
,
'template_name'
:
'MonitorLecturerApp/template.html'
,
'lec_list'
:
lec_list
}
return
render
(
request
,
'MonitorLecturerApp/index.html'
,
context
)
# in case the 'admin' session is not there
except
KeyError
as
exc
:
return
redirect
(
'/401'
)
# in case of general exceptions
except
Exception
as
exc
:
print
(
'exception: '
,
exc
)
return
redirect
(
'/500'
)
def
view404
(
request
):
...
...
@@ -173,3 +187,6 @@ def lecVideo(request):
# for audioPath in audiopaths:
# audio = tAudio()
def
testFrameRecognitions
(
request
):
return
render
(
request
,
"MonitorLecturerApp/test_frame_recognitions.html"
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment