Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2020-101
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Sachith Fernando
2020-101
Commits
9fa0f329
Commit
9fa0f329
authored
Dec 19, 2020
by
I.K Seneviratne
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Committing the modification in some files.
parent
505c9017
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
129 additions
and
71 deletions
+129
-71
FirstApp/emotion_detector.py
FirstApp/emotion_detector.py
+7
-6
FirstApp/logic/activity_recognition.py
FirstApp/logic/activity_recognition.py
+119
-63
FirstApp/templates/FirstApp/video_results.html
FirstApp/templates/FirstApp/video_results.html
+3
-2
No files found.
FirstApp/emotion_detector.py
View file @
9fa0f329
...
@@ -216,18 +216,19 @@ def get_frame_emotion_recognition(video_name):
...
@@ -216,18 +216,19 @@ def get_frame_emotion_recognition(video_name):
surprise_count
=
0
surprise_count
=
0
# get the detections
# get the detections
detections
=
ar
.
person_detection
(
image
,
net
)
detections
,
persons
=
ar
.
person_detection
(
image
,
net
)
# to count the extracted detections for a frame
# to count the extracted detections for a frame
detection_count
=
0
detection_count
=
0
# if there are detections
# if there are detections
if
(
len
(
detections
)
>
0
):
if
(
len
(
detections
)
>
0
):
# loop through the detections
# loop through the detections
for
detection
in
detecti
ons
:
for
person
in
pers
ons
:
label
=
emotion_recognition
(
classifier
,
face_classifier
,
detecti
on
)
label
=
emotion_recognition
(
classifier
,
face_classifier
,
pers
on
)
# checking for the label
# checking for the label
if
label
==
class_labels
[
0
]:
if
label
==
class_labels
[
0
]:
...
@@ -422,17 +423,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
...
@@ -422,17 +423,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count
=
0
neutral_count
=
0
detection_count
=
0
detection_count
=
0
detections
=
ar
.
person_detection
(
image
,
net
)
detections
,
persons
=
ar
.
person_detection
(
image
,
net
)
# if there are detections
# if there are detections
if
(
len
(
detections
)
>
0
):
if
(
len
(
detections
)
>
0
):
# looping through the detections in each frame
# looping through the detections in each frame
for
detection
in
detecti
ons
:
for
person
in
pers
ons
:
# run the model and get the emotion label
# run the model and get the emotion label
label
=
emotion_recognition
(
classifier
,
face_classifier
,
detecti
on
)
label
=
emotion_recognition
(
classifier
,
face_classifier
,
pers
on
)
# increment the count based on the label
# increment the count based on the label
if
label
==
class_labels
[
0
]:
if
label
==
class_labels
[
0
]:
...
...
FirstApp/logic/activity_recognition.py
View file @
9fa0f329
...
@@ -38,8 +38,9 @@ def activity_recognition(video_path):
...
@@ -38,8 +38,9 @@ def activity_recognition(video_path):
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_path
))
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_path
))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_04.h5"
)
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"static
\\
FirstApp
\\
activity"
)
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_06.h5"
)
# ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
# files required for person detection
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
...
@@ -55,7 +56,9 @@ def activity_recognition(video_path):
...
@@ -55,7 +56,9 @@ def activity_recognition(video_path):
np
.
set_printoptions
(
suppress
=
True
)
np
.
set_printoptions
(
suppress
=
True
)
# define the student activity labels
# define the student activity labels
class_labels
=
[
'Phone checking'
,
'Listening'
,
'Note taking'
]
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels
=
[
'Phone checki...'
,
'Listening'
,
'Note taking'
]
# load the model
# load the model
model
=
tensorflow
.
keras
.
models
.
load_model
(
CLASSIFIER_DIR
)
model
=
tensorflow
.
keras
.
models
.
load_model
(
CLASSIFIER_DIR
)
...
@@ -81,13 +84,17 @@ def activity_recognition(video_path):
...
@@ -81,13 +84,17 @@ def activity_recognition(video_path):
# for testing purposes
# for testing purposes
print
(
'starting the activity recognition process'
)
print
(
'starting the activity recognition process'
)
# initiailizing the video writer
vid_cod
=
cv2
.
VideoWriter_fourcc
(
*
'XVID'
)
output
=
cv2
.
VideoWriter
(
"videos/cam_video.mp4"
,
vid_cod
,
30.0
,
size
)
# looping through the frames
# looping through the frames
while
(
frame_count
<
no_of_frames
):
while
(
frame_count
<
no_of_frames
):
ret
,
image
=
video
.
read
()
ret
,
image
=
video
.
read
()
image
=
cv2
.
resize
(
image
,
size
)
#
image = cv2.resize(image, size)
# perform person detection on the extracted image
# perform person detection on the extracted image
detections
=
person_detection
(
image
,
net
)
detections
,
persons
=
person_detection
(
image
,
net
)
# this is for testing purposes
# this is for testing purposes
print
(
'frame count: '
,
frame_count
)
print
(
'frame count: '
,
frame_count
)
...
@@ -102,13 +109,26 @@ def activity_recognition(video_path):
...
@@ -102,13 +109,26 @@ def activity_recognition(video_path):
# initialize the detection count
# initialize the detection count
detection_count
=
0
detection_count
=
0
# to iterate each person
no_of_persons
=
0
# looping through the person detections of the frame
# looping through the person detections of the frame
for
detection
in
detections
:
for
detection
in
detections
:
detection
=
cv2
.
resize
(
detection
,
size
)
# get the coordinates for the detection
startX
=
detection
[
'startX'
]
startY
=
detection
[
'startY'
]
endX
=
detection
[
'endX'
]
endY
=
detection
[
'endY'
]
# detection = cv2.resize(detection, size)
# draw the coordinates of the persons' identified
cv2
.
rectangle
(
image
,
(
startX
,
startY
),
(
endX
,
endY
),
(
0
,
255
,
0
),
5
)
image_array
=
np
.
asarray
(
detection
)
image_array
=
np
.
asarray
(
persons
[
no_of_persons
])
normalized_image_array
=
(
detection
.
astype
(
np
.
float32
)
/
127.0
)
-
1
image_array_resized
=
cv2
.
resize
(
image_array
,
size
)
# normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
normalized_image_array
=
(
image_array_resized
.
astype
(
np
.
float32
)
/
127.0
)
-
1
# Load the image into the array
# Load the image into the array
data
[
0
]
=
normalized_image_array
data
[
0
]
=
normalized_image_array
...
@@ -132,6 +152,9 @@ def activity_recognition(video_path):
...
@@ -132,6 +152,9 @@ def activity_recognition(video_path):
# increment the frame count
# increment the frame count
frame_count
+=
1
frame_count
+=
1
# write the frame to the video writer
output
.
write
(
image
)
# calculating the percentages for each label
# calculating the percentages for each label
phone_perct
=
float
(
phone_checking_count
/
total_detections
)
*
100
if
total_detections
>
0
else
0
phone_perct
=
float
(
phone_checking_count
/
total_detections
)
*
100
if
total_detections
>
0
else
0
...
@@ -163,6 +186,7 @@ def person_detection(image, net):
...
@@ -163,6 +186,7 @@ def person_detection(image, net):
# set the threshold balue
# set the threshold balue
threshold
=
0.2
threshold
=
0.2
detected_person
=
[]
detected_person
=
[]
persons
=
[]
# initialize the list of class labels MobileNet SSD was trained to
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
# detect, then generate a set of bounding box colors for each class
...
@@ -211,14 +235,22 @@ def person_detection(image, net):
...
@@ -211,14 +235,22 @@ def person_detection(image, net):
startX
=
0
if
startX
<
0
else
startX
startX
=
0
if
startX
<
0
else
startX
startY
=
0
if
startY
<
0
else
startY
startY
=
0
if
startY
<
0
else
startY
# extract the person
# this dictionary will contain the bounding box coordinates
coordinates
=
{}
person
=
image
[
startY
:
startY
+
endY
,
startX
:
startX
+
endX
]
person
=
image
[
startY
:
startY
+
endY
,
startX
:
startX
+
endX
]
detected_person
.
append
(
person
)
coordinates
[
'startX'
]
=
startX
coordinates
[
'startY'
]
=
startY
coordinates
[
'endX'
]
=
endX
coordinates
[
'endY'
]
=
endY
persons
.
append
(
person
)
detected_person
.
append
(
coordinates
)
person_count
+=
1
person_count
+=
1
# return the detection person list
# return the detection person list
return
detected_person
return
detected_person
,
persons
# this method will recognize the activity for each frame
# this method will recognize the activity for each frame
...
@@ -233,7 +265,8 @@ def get_frame_activity_recognition(video_name):
...
@@ -233,7 +265,8 @@ def get_frame_activity_recognition(video_name):
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_name
))
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_name
))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_04.h5"
)
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_06.h5"
)
# files required for person detection
# files required for person detection
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
...
@@ -247,7 +280,9 @@ def get_frame_activity_recognition(video_name):
...
@@ -247,7 +280,9 @@ def get_frame_activity_recognition(video_name):
np
.
set_printoptions
(
suppress
=
True
)
np
.
set_printoptions
(
suppress
=
True
)
# class labels
# class labels
class_labels
=
[
'Phone checking'
,
'Listening'
,
'Note taking'
]
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels
=
[
'Phone checki...'
,
'Listening'
,
'Note taking'
]
# load the activity recogntion model
# load the activity recogntion model
model
=
tensorflow
.
keras
.
models
.
load_model
(
CLASSIFIER_DIR
)
model
=
tensorflow
.
keras
.
models
.
load_model
(
CLASSIFIER_DIR
)
...
@@ -295,19 +330,27 @@ def get_frame_activity_recognition(video_name):
...
@@ -295,19 +330,27 @@ def get_frame_activity_recognition(video_name):
detection_count
=
0
detection_count
=
0
detected_percentages
=
[]
detected_percentages
=
[]
detections
=
person_detection
(
image
,
net
)
detections
,
persons
=
person_detection
(
image
,
net
)
# if there are detections
# if there are detections
if
(
len
(
detections
)
>
0
):
if
(
len
(
detections
)
>
0
):
no_of_persons
=
0
# loop through each detection in the frame
# loop through each detection in the frame
for
detection
in
detections
:
for
detection
in
detections
:
detection
=
cv2
.
resize
(
detection
,
size
)
# get the coordinates for the detection
startX
=
detection
[
'startX'
]
startY
=
detection
[
'startY'
]
endX
=
detection
[
'endX'
]
endY
=
detection
[
'endY'
]
image_array
=
np
.
asarray
(
detection
)
image_array
=
np
.
asarray
(
persons
[
no_of_persons
])
normalized_image_array
=
(
image_array
.
astype
(
np
.
float32
)
/
127.0
)
-
1
image_array_resized
=
cv2
.
resize
(
image_array
,
size
)
# normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
normalized_image_array
=
(
image_array_resized
.
astype
(
np
.
float32
)
/
127.0
)
-
1
# Load the image into the array
# Load the image into the array
data
[
0
]
=
normalized_image_array
data
[
0
]
=
normalized_image_array
...
@@ -427,10 +470,21 @@ def get_student_activity_summary_for_period(activities):
...
@@ -427,10 +470,21 @@ def get_student_activity_summary_for_period(activities):
def
activity_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
def
activity_frame_groupings
(
video_name
,
frame_landmarks
,
frame_group_dict
):
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
BASE_DIR
=
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))))
EXTRACTED_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
activity
\\
{}"
.
format
(
video_name
))
EXTRACTED_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
activity
\\
{}"
.
format
(
video_name
))
VIDEO_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"assets
\\
FirstApp
\\
videos
\\
{}"
.
format
(
video_name
))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_05.h5"
)
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
CLASSIFIER_DIR
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
student_activity_version_06.h5"
)
# files required for person detection
config_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.prototxt.txt"
)
model_file
=
os
.
path
.
join
(
BASE_DIR
,
"FirstApp
\\
classifiers
\\
MobileNetSSD_deploy.caffemodel"
)
# load our serialized person detection model from disk
print
(
"[INFO] loading model..."
)
net
=
cv2
.
dnn
.
readNetFromCaffe
(
config_file
,
model_file
)
np
.
set_printoptions
(
suppress
=
True
)
np
.
set_printoptions
(
suppress
=
True
)
...
@@ -443,11 +497,16 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
...
@@ -443,11 +497,16 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
data
=
np
.
ndarray
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
np
.
float32
)
data
=
np
.
ndarray
(
shape
=
(
1
,
224
,
224
,
3
),
dtype
=
np
.
float32
)
size
=
(
224
,
224
)
size
=
(
224
,
224
)
# initializing the count variables
# class labels
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels
=
[
'Phone checki...'
,
'Listening'
,
'Note taking'
]
# iteration
video
=
cv2
.
VideoCapture
(
VIDEO_DIR
)
no_of_frames
=
video
.
get
(
cv2
.
CAP_PROP_FRAME_COUNT
)
frame_count
=
0
frame_count
=
0
# class labels
class_labels
=
[
'Phone checking'
,
'Listening'
,
'Note taking'
]
# get the frame differences for each frame group
# get the frame differences for each frame group
frame_group_diff
=
{}
frame_group_diff
=
{}
...
@@ -463,9 +522,8 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
...
@@ -463,9 +522,8 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_diff
[
key
]
=
diff
if
diff
>
0
else
1
frame_group_diff
[
key
]
=
diff
if
diff
>
0
else
1
# looping through the frames
# looping through the frames
for
frame
in
os
.
listdir
(
EXTRACTED_DIR
):
# for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
while
(
frame_count
<
no_of_frames
):
FRAME_FOLDER
=
os
.
path
.
join
(
EXTRACTED_DIR
,
frame
)
# initializing the variables
# initializing the variables
phone_count
=
0
phone_count
=
0
...
@@ -473,57 +531,55 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
...
@@ -473,57 +531,55 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
listen_count
=
0
listen_count
=
0
detection_count
=
0
detection_count
=
0
# looping through the detections in each frame
ret
,
image
=
video
.
read
()
for
detections
in
os
.
listdir
(
FRAME_FOLDER
):
# checking whether the image contains only one person
detections
,
persons
=
person_detection
(
image
,
net
)
if
"frame"
not
in
detections
:
# get the label for this image
IMAGE_PATH
=
os
.
path
.
join
(
FRAME_FOLDER
,
detections
)
image
=
cv2
.
imread
(
IMAGE_PATH
)
image
=
cv2
.
resize
(
image
,
size
)
# looping through the detections in each frame
for
person
in
persons
:
image_array
=
np
.
asarray
(
image
)
image
=
cv2
.
resize
(
person
,
size
)
normalized_image_array
=
(
image_array
.
astype
(
np
.
float32
)
/
127.0
)
-
1
# Load the image into the array
image_array
=
np
.
asarray
(
image
)
data
[
0
]
=
normalized_image_array
normalized_image_array
=
(
image_array
.
astype
(
np
.
float32
)
/
127.0
)
-
1
# run the inference
# Load the image into the array
prediction
=
model
.
predict
(
data
)
data
[
0
]
=
normalized_image_array
# get the predicted label
# run the inference
label
=
class_labels
[
prediction
.
argmax
()]
prediction
=
model
.
predict
(
data
)
# increment the count based on the label
# get the predicted label
if
label
==
class_labels
[
0
]:
label
=
class_labels
[
prediction
.
argmax
()]
phone_count
+=
1
elif
label
==
class_labels
[
1
]:
listen_count
+=
1
elif
label
==
class_labels
[
2
]:
note_count
+=
1
# increment the detection count
# increment the count based on the label
detection_count
+=
1
if
label
==
class_labels
[
0
]:
phone_count
+=
1
elif
label
==
class_labels
[
1
]:
listen_count
+=
1
elif
label
==
class_labels
[
2
]:
note_count
+=
1
# increment the detection count
detection_count
+=
1
# finding the time landmark that the current frame is in
# finding the time landmark that the current frame is in
for
i
in
frame_landmarks
:
for
i
in
frame_landmarks
:
index
=
frame_landmarks
.
index
(
i
)
index
=
frame_landmarks
.
index
(
i
)
j
=
index
+
1
j
=
index
+
1
# checking whether the next index is within the range
# checking whether the next index is within the range
if
j
<
len
(
frame_landmarks
):
if
j
<
len
(
frame_landmarks
):
next_value
=
frame_landmarks
[
j
]
next_value
=
frame_landmarks
[
j
]
# checking the correct time landmark range
# checking the correct time landmark range
if
(
frame_count
>=
i
)
&
(
frame_count
<
next_value
):
if
(
frame_count
>=
i
)
&
(
frame_count
<
next_value
):
frame_name
=
"{}-{}"
.
format
(
i
,
next_value
)
frame_name
=
"{}-{}"
.
format
(
i
,
next_value
)
frame_group_dict
[
frame_name
][
'phone_count'
]
+=
phone_count
frame_group_dict
[
frame_name
][
'phone_count'
]
+=
phone_count
frame_group_dict
[
frame_name
][
'listen_count'
]
+=
listen_count
frame_group_dict
[
frame_name
][
'listen_count'
]
+=
listen_count
frame_group_dict
[
frame_name
][
'note_count'
]
+=
note_count
frame_group_dict
[
frame_name
][
'note_count'
]
+=
note_count
frame_group_dict
[
frame_name
][
'detection_count'
]
+=
detection_count
frame_group_dict
[
frame_name
][
'detection_count'
]
+=
detection_count
# increment the frame count
# increment the frame count
frame_count
+=
1
frame_count
+=
1
...
...
FirstApp/templates/FirstApp/video_results.html
View file @
9fa0f329
...
@@ -40,7 +40,7 @@
...
@@ -40,7 +40,7 @@
$
(
document
).
ready
(
function
()
{
$
(
document
).
ready
(
function
()
{
let
folder
=
''
;
let
folder
=
''
;
$
(
'
#activity_loader
'
).
attr
(
'
hidden
'
,
false
);
{
#
$
(
'
#activity_loader
'
).
attr
(
'
hidden
'
,
false
);
#
}
{
#
$
(
'
#emotion_loader
'
).
attr
(
'
hidden
'
,
false
);
#
}
{
#
$
(
'
#emotion_loader
'
).
attr
(
'
hidden
'
,
false
);
#
}
{
#
$
(
'
#gaze_loader
'
).
attr
(
'
hidden
'
,
false
);
#
}
{
#
$
(
'
#gaze_loader
'
).
attr
(
'
hidden
'
,
false
);
#
}
...
@@ -298,7 +298,7 @@
...
@@ -298,7 +298,7 @@
}
}
//this is a test function (delete later)
//this is a test function (delete later)
/*
let interval = setInterval(() => {
let interval = setInterval(() => {
{#let url = 'http://127.0.0.1:8000/get-random_number';#}
{#let url = 'http://127.0.0.1:8000/get-random_number';#}
...
@@ -355,6 +355,7 @@
...
@@ -355,6 +355,7 @@
}
}
*/
});
});
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment