Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2020-101
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Sachith Fernando
2020-101
Commits
74fc015b
Commit
74fc015b
authored
Jan 10, 2021
by
LiniEisha
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'QA_RELEASE' into IT17100908_Lecture_Summarizing
parents
21895570
f4b30584
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
172 additions
and
45 deletions
+172
-45
FirstApp/MongoModels.py
FirstApp/MongoModels.py
+4
-0
FirstApp/api.py
FirstApp/api.py
+62
-20
FirstApp/automation_process.py
FirstApp/automation_process.py
+19
-0
FirstApp/emotion_detector.py
FirstApp/emotion_detector.py
+5
-5
FirstApp/logic/activity_recognition.py
FirstApp/logic/activity_recognition.py
+3
-3
FirstApp/logic/batch_process.py
FirstApp/logic/batch_process.py
+2
-0
FirstApp/logic/head_gaze_estimation.py
FirstApp/logic/head_gaze_estimation.py
+5
-5
FirstApp/logic/scheduler_tasks.py
FirstApp/logic/scheduler_tasks.py
+25
-0
FirstApp/templates/FirstApp/video_results.html
FirstApp/templates/FirstApp/video_results.html
+16
-6
FirstApp/views.py
FirstApp/views.py
+29
-4
integrated_slpes/settings.py
integrated_slpes/settings.py
+1
-1
integrated_slpes/urls.py
integrated_slpes/urls.py
+1
-1
No files found.
FirstApp/MongoModels.py
View file @
74fc015b
...
...
@@ -79,6 +79,10 @@ class DailyTimeTable(models.Model):
subject
=
models
.
ForeignKey
(
Subject
,
on_delete
=
models
.
CASCADE
)
lecturer
=
models
.
ForeignKey
(
Lecturer
,
on_delete
=
models
.
CASCADE
)
location
=
models
.
CharField
(
max_length
=
10
)
# new temp fields
lecturer_name
=
models
.
CharField
(
max_length
=
20
,
default
=
''
)
subject_name
=
models
.
CharField
(
max_length
=
20
,
default
=
''
)
no_of_students
=
models
.
IntegerField
(
default
=
4
)
def
__str__
(
self
):
return
self
.
location
...
...
FirstApp/api.py
View file @
74fc015b
...
...
@@ -11,11 +11,16 @@ each method will return an HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
import
json
from
random
import
Random
from
apscheduler.jobstores.mongodb
import
MongoDBJobStore
from
MonitorLecturerApp.models
import
LectureRecordedVideo
,
LecturerVideoMetaData
from
MonitorLecturerApp.serializers
import
LectureRecordedVideoSerializer
,
LecturerVideoMetaDataSerializer
from
rest_framework.views
import
*
from
integrated_slpes.wsgi
import
application
from
.logic
import
activity_recognition
as
ar
from
.
import
emotion_detector
as
ed
,
automation_process
as
ap
from
.logic
import
id_generator
as
ig
...
...
@@ -23,10 +28,15 @@ from .logic import pdf_file_generator as pdf
from
.logic
import
head_gaze_estimation
as
hge
from
.logic
import
video_extraction
as
ve
from
.
logic
import
student_behavior_process
as
sbp
from
.logic.scheduler_tasks
import
task_scheduler
from
.serializers
import
*
from
braces.views
import
CsrfExemptMixin
from
django.core.handlers.wsgi
import
WSGIRequest
from
django.http.request
import
HttpRequest
import
datetime
import
os
class
LectureViewSet
(
APIView
):
...
...
@@ -1539,19 +1549,36 @@ class CheckStudentBehaviorAvailability(APIView):
def
get
(
self
,
request
):
video_name
=
request
.
query_params
.
get
(
'video_name'
)
#
# isActivityExist = LectureActivityFrameGroupings.objects.filter(
# lecture_activity_id__lecture_video_id__video_name=video_name).exists()
#
# isEmotionExist = LectureEmotionFrameGroupings.objects.filter(
# lecture_emotion_id__lecture_video_id__video_name=video_name).exists()
#
# isGazeExist = LectureGazeFrameGroupings.objects.filter(
# lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
print
(
'video name: '
,
video_name
)
# retrieve the 'MongoDbJobStore' instance
jobs
=
MongoDBJobStore
()
.
get_all_jobs
()
print
(
'jobs: '
,
jobs
)
# initialize the variables
isActivityExist
=
False
isEmotionExist
=
False
isGazeExist
=
False
# if there are scheduled jobs
if
len
(
jobs
)
>
0
:
# retrieve the activity frame groupings
isActivityExist
=
LectureActivityFrameGroupings
.
objects
.
filter
(
lecture_activity_id__lecture_video_id__video_name
=
video_name
)
.
exists
()
# retrieve the emotion frame groupings
isEmotionExist
=
LectureEmotionFrameGroupings
.
objects
.
filter
(
lecture_emotion_id__lecture_video_id__video_name
=
video_name
)
.
exists
()
# retrieve the gaze frame groupings
isGazeExist
=
LectureGazeFrameGroupings
.
objects
.
filter
(
lecture_gaze_id__lecture_video_id__video_name
=
video_name
)
.
exists
()
isActivityExist
=
bool
(
Random
()
.
randint
(
0
,
2
))
isEmotionExist
=
bool
(
Random
()
.
randint
(
0
,
2
))
isGazeExist
=
bool
(
Random
()
.
randint
(
0
,
2
))
else
:
isActivityExist
=
True
isEmotionExist
=
True
isGazeExist
=
True
# isActivityExist = bool(Random().randint(0,2))
# isEmotionExist = bool(Random().randint(0,2))
# isGazeExist = bool(Random().randint(0,2))
return
Response
({
"isActivityExist"
:
isActivityExist
,
...
...
@@ -1657,13 +1684,28 @@ class AutomationProcess(APIView):
})
def
post
(
self
,
request
):
lecturer
=
request
.
data
[
'lecturer'
]
subject
=
request
.
data
[
'subject'
]
subject_code
=
request
.
data
[
'subject_code'
]
video_length
=
request
.
data
[
'video_length'
]
processed
=
ap
.
automation_process
(
lecturer
=
lecturer
,
subject
=
subject
,
subject_code
=
subject_code
,
video_length
=
video_length
)
processed
=
False
return
Response
({
"is_processed"
:
processed
})
\ No newline at end of file
try
:
lecturer
=
request
.
data
[
'lecturer'
]
subject
=
request
.
data
[
'subject'
]
subject_code
=
request
.
data
[
'subject_code'
]
video_length
=
request
.
data
[
'video_length'
]
# processed = ap.automation_process(lecturer=lecturer, subject=subject, subject_code=subject_code, video_length=video_length)
# run the scheduler
scheduler
=
task_scheduler
(
lecturer
=
lecturer
,
subject
=
subject
,
subject_code
=
subject_code
,
video_length
=
video_length
)
processed
=
True
return
Response
({
"is_processed"
:
processed
,
})
except
Exception
as
exc
:
print
(
'Exception: '
,
exc
)
return
Response
({
"is_processed"
:
processed
,
})
\ No newline at end of file
FirstApp/automation_process.py
View file @
74fc015b
import
requests
import
json
from
.MongoModels
import
LectureVideo
from
.
logic
import
batch_process
as
bp
from
MonitorLecturerApp.logic
import
lecturer_batch_process
as
lbp
import
datetime
# this method will save the lecture video
#
...
...
@@ -43,6 +46,11 @@ import datetime
# return response[0]
# this method will handle the batch processing and video/audio saving pf the system
from
.logic.batch_process
import
student_behavior_batch_process
from
.serializers
import
LectureVideoSerializer
# @background(schedule=5)
def
automation_process
(
lecturer
,
subject
,
subject_code
,
video_length
=
"00:20:00"
):
current_date
=
datetime
.
datetime
.
now
()
.
date
()
...
...
@@ -55,6 +63,7 @@ def automation_process(lecturer, subject, subject_code, video_length="00:20:00")
lecturer_video_name
=
str
(
current_date
)
+
"_{}_lecturer_video.mp4"
.
format
(
subject_code
)
lecturer_audio_name
=
str
(
current_date
)
+
"_{}_lecturer_audio.wav"
.
format
(
subject_code
)
# this variable will be passed in the individual batch process
student_video_id
=
0
...
...
@@ -80,9 +89,11 @@ def automation_process(lecturer, subject, subject_code, video_length="00:20:00")
# create the lecturer audio
lecturer_audio_content
=
{}
# save the student video
student_video_response
=
bp
.
save_student_lecture_video
(
student_video_content
)
# student_video_response = save_student_lecture_video(student_video_content)
print
(
'student video response: '
,
student_video_response
)
student_video_id
=
student_video_response
[
'id'
]
# save the lecturer video
...
...
@@ -93,6 +104,13 @@ def automation_process(lecturer, subject, subject_code, video_length="00:20:00")
# save the lecturer audio
for
i
in
range
(
100
):
print
(
'outer loop: '
,
i
)
for
j
in
range
(
10000
):
print
(
'inner loop: '
,
j
)
# start the batch processing for lecture summarization component
# lecture_summary_batch_process = lecture_summarization_batch_process(audio_name)
lecture_summary_batch_process
=
True
...
...
@@ -117,6 +135,7 @@ def automation_process(lecturer, subject, subject_code, video_length="00:20:00")
# return the status
return
is_all_processed
# test the above method using 'main' method
# if __name__ == '__main__':
#
...
...
FirstApp/emotion_detector.py
View file @
74fc015b
...
...
@@ -523,11 +523,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
group_detection_count
=
frame_group_details
[
'detection_count'
]
# calculate the frame group emotion percentages
frame_group_happy_perct
=
float
(
frame_group_happy_count
/
group_detection_count
)
*
100
frame_group_sad_perct
=
float
(
frame_group_sad_count
/
group_detection_count
)
*
100
frame_group_angry_perct
=
float
(
frame_group_angry_count
/
group_detection_count
)
*
100
frame_group_surprise_perct
=
float
(
frame_group_surprise_count
/
group_detection_count
)
*
100
frame_group_neutral_perct
=
float
(
frame_group_neutral_count
/
group_detection_count
)
*
100
frame_group_happy_perct
=
float
(
frame_group_happy_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_sad_perct
=
float
(
frame_group_sad_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_angry_perct
=
float
(
frame_group_angry_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_surprise_perct
=
float
(
frame_group_surprise_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_neutral_perct
=
float
(
frame_group_neutral_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
# assign the values to the same dictionary
frame_group_dict
[
key
][
'happy_perct'
]
=
round
(
frame_group_happy_perct
,
1
)
...
...
FirstApp/logic/activity_recognition.py
View file @
74fc015b
...
...
@@ -615,9 +615,9 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
group_detection_count
=
frame_group_details
[
'detection_count'
]
frame_group_phone_perct
=
float
(
frame_group_phone_count
/
group_detection_count
)
*
100
frame_group_listen_perct
=
float
(
frame_group_listen_count
/
group_detection_count
)
*
100
frame_group_note_perct
=
float
(
frame_group_note_count
/
group_detection_count
)
*
100
frame_group_phone_perct
=
float
(
frame_group_phone_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_listen_perct
=
float
(
frame_group_listen_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_note_perct
=
float
(
frame_group_note_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
# assign the values to the same dictionary
frame_group_dict
[
key
][
'phone_perct'
]
=
round
(
frame_group_phone_perct
,
1
)
...
...
FirstApp/logic/batch_process.py
View file @
74fc015b
import
requests
import
json
from
background_task
import
background
def
student_behavior_batch_process
(
video_id
,
video_name
):
...
...
@@ -30,6 +31,7 @@ def student_behavior_batch_process(video_id, video_name):
# this method will save the student lecture video
# @background(schedule=5)
def
save_student_lecture_video
(
student_video
):
data_dumps
=
json
.
dumps
(
student_video
)
...
...
FirstApp/logic/head_gaze_estimation.py
View file @
74fc015b
...
...
@@ -854,11 +854,11 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_upright_perct
=
float
(
frame_group_upright_count
/
group_detection_count
)
*
100
frame_group_upleft_perct
=
float
(
frame_group_upleft_count
/
group_detection_count
)
*
100
frame_group_downright_perct
=
float
(
frame_group_downright_count
/
group_detection_count
)
*
100
frame_group_downleft_perct
=
float
(
frame_group_downleft_count
/
group_detection_count
)
*
100
frame_group_front_perct
=
float
(
frame_group_front_count
/
group_detection_count
)
*
100
frame_group_upright_perct
=
float
(
frame_group_upright_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_upleft_perct
=
float
(
frame_group_upleft_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_downright_perct
=
float
(
frame_group_downright_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_downleft_perct
=
float
(
frame_group_downleft_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
frame_group_front_perct
=
float
(
frame_group_front_count
/
group_detection_count
)
*
100
if
group_detection_count
>
0
else
0
# assign the values to the same dictionary
frame_group_dict
[
key
][
'upright_perct'
]
=
round
(
frame_group_upright_perct
,
1
)
...
...
FirstApp/logic/scheduler_tasks.py
0 → 100644
View file @
74fc015b
from
apscheduler.schedulers.background
import
BackgroundScheduler
from
apscheduler.jobstores.mongodb
import
MongoDBJobStore
import
datetime
as
d
from
datetime
import
datetime
# this method will schedule the automation process task
from
FirstApp.automation_process
import
automation_process
def
task_scheduler
(
lecturer
,
subject
,
subject_code
,
video_length
):
jobstores
=
{
'mongo'
:
MongoDBJobStore
(),
}
sched
=
BackgroundScheduler
(
jobstores
=
jobstores
)
after_20s
=
datetime
.
now
()
+
d
.
timedelta
(
seconds
=
30
)
sched
.
add_job
(
automation_process
,
args
=
[
lecturer
,
subject
,
subject_code
,
video_length
],
trigger
=
'date'
,
run_date
=
after_20s
,
id
=
'Automation_1'
)
sched
.
start
()
job
=
sched
.
get_job
(
job_id
=
'Automation_1'
)
MongoDBJobStore
()
.
add_job
(
job
=
job
)
return
sched
\ No newline at end of file
FirstApp/templates/FirstApp/video_results.html
View file @
74fc015b
...
...
@@ -224,6 +224,12 @@
let
video_id
=
$
(
this
).
attr
(
"
data-video-id
"
);
let
video_name
=
$
(
this
).
attr
(
"
data-video-name
"
);
//display the 'processing' message
$
(
'
#processing
'
).
attr
(
'
hidden
'
,
false
);
//hide the button
$
(
this
).
hide
();
//display the activity loader
$
(
'
#activity_loader
'
).
attr
(
"
hidden
"
,
false
);
...
...
@@ -278,7 +284,7 @@
//sending the get request to process the lecture gaze estimations
fetch
(
'
http://127.0.0.1:8000/process-lecture-gaze-estimation/?lecture_video_name=
'
+
global_video_name
+
'
&lecture_video_id=
'
+
global_lecture_video_id
)
.
then
((
res
)
=>
res
.
json
())
.
then
((
out
)
=>
handleGazeResponse
(
out
.
response
,
e
))
.
then
((
out
)
=>
handleGazeResponse
(
out
.
response
))
.
catch
((
error
)
=>
alert
(
'
error:
'
+
error
));
}
}
...
...
@@ -297,20 +303,22 @@
}
}
//this is a test function (delete later)
/*
let interval = setInterval(() => {
//this is a test function (delete later)
//get the due lecture video name
var due_lecture_video_name = "{{ due_lecture_video_name }}";
let interval = setInterval(() => {
{#let url = 'http://127.0.0.1:8000/get-random_number';#}
let url = 'http://127.0.0.1:8000/check-availability';
let url = 'http://127.0.0.1:8000/check-availability/?video_name=' + due_lecture_video_name;
fetch(url)
.then((res) => res.json())
.then((out) => displayProcess(out))
.catch((err) => alert('error: ' + err))
},
10
000);
},
5
000);
//this function will handle the displaying loaders and status in the workflow
...
...
@@ -356,6 +364,7 @@
}
*/
...
...
@@ -440,6 +449,7 @@
<td
class=
"font-weight-bolder"
>
{{ lecture.start_time }}
</td>
<td
class=
"font-weight-bolder"
>
{{ lecture.end_time }}
</td>
<td>
<span
class=
"font-italic text-success"
id=
"processing"
hidden
>
Processing
</span>
<button
type=
"button"
class=
"btn btn-success batch_process"
data-video-id=
"{{ lecture.video_id }}"
data-video-name=
"{{ lecture.video_name }}"
...
...
FirstApp/views.py
View file @
74fc015b
...
...
@@ -29,11 +29,18 @@ from django.contrib.auth import (
logout
,
)
from
django.contrib.auth.decorators
import
login_required
from
.
serializers
import
*
from
.
forms
import
*
import
os
import
datetime
as
d
from
datetime
import
datetime
from
apscheduler.schedulers.background
import
BackgroundScheduler
from
apscheduler.triggers.date
import
DateTrigger
from
apscheduler.jobstores.mongodb
import
MongoDBJobStore
# Create your views here.
...
...
@@ -53,6 +60,11 @@ def hello(request):
print
(
'user_type: '
,
user_type
)
print
(
'request type: '
,
type
(
request
))
# test the scheduler
# test_scheduler()
# retrieve the lecturer's timetable slots
lecturer_timetable
=
FacultyTimetable
.
objects
.
filter
()
...
...
@@ -122,6 +134,7 @@ def hello(request):
return
redirect
(
'/401'
)
except
Exception
as
exc
:
print
(
'exception: '
,
exc
)
return
redirect
(
'/500'
)
# this method will handle 404 error page
...
...
@@ -232,8 +245,9 @@ def video_result(request):
for
item
in
to_do_lecture_list
:
isDate
=
item
[
'date'
]
==
str
(
day_timetable
[
'date'
])
print
(
'item date: '
,
item
[
'date'
])
print
(
'timetable date: '
,
str
(
day_timetable
[
'date'
]))
# print('item date: ', item['date'])
# print('timetable date: ', str(day_timetable['date']))
# isLecturer = item['lecturer'] ==
# check for the particular lecture on the day
if
isDate
:
...
...
@@ -246,6 +260,12 @@ def video_result(request):
isLecturer
=
item
[
'lecturer'
]
==
slot
[
'lecturer'
][
'id'
]
isSubject
=
item
[
'subject'
]
==
slot
[
'subject'
][
'id'
]
print
(
'item lecturer: '
,
item
[
'lecturer'
])
print
(
'timetable lecturer: '
,
slot
[
'lecturer'
][
'id'
])
print
(
'item subject: '
,
item
[
'subject'
])
print
(
'timetable subject: '
,
slot
[
'subject'
][
'id'
])
if
isLecturer
&
isSubject
:
obj
=
{}
obj
[
'date'
]
=
item
[
'date'
]
...
...
@@ -265,13 +285,17 @@ def video_result(request):
# handling the general exceptions
except
Exception
as
exc
:
print
(
'
what is wrong?
: '
,
exc
)
print
(
'
Exception
: '
,
exc
)
return
redirect
(
'/500'
)
print
(
'due lectures: '
,
due_lecture_list
)
due_lecture_video_name
=
due_lecture_list
[
0
][
'video_name'
]
if
len
(
due_lecture_list
)
>
0
else
"Test.mp4"
# due_lecture_video_name = "Test.mp4"
print
(
'due lecture video name: '
,
due_lecture_video_name
)
return
render
(
request
,
"FirstApp/video_results.html"
,
{
"lecturer"
:
lecturer
,
"due_lectures"
:
due_lecture_list
})
{
"lecturer"
:
lecturer
,
"due_lectures"
:
due_lecture_list
,
"due_lecture_video_name"
:
due_lecture_video_name
})
# view for emotion page
...
...
@@ -377,6 +401,7 @@ def activity(request):
# handling the general exception
except
Exception
as
exc
:
print
(
'exception: '
,
exc
)
return
redirect
(
'/500'
)
return
render
(
request
,
"FirstApp/activity.html"
,
{
"lecturer_subjects"
:
lecturer_subjects
,
"subjects"
:
subject_list
,
"lecturer"
:
lecturer
})
...
...
integrated_slpes/settings.py
View file @
74fc015b
...
...
@@ -43,7 +43,7 @@ INSTALLED_APPS = [
'django.contrib.staticfiles'
,
'bootstrap4'
,
'rest_framework'
,
'os'
'os'
,
]
MIDDLEWARE
=
[
...
...
integrated_slpes/urls.py
View file @
74fc015b
...
...
@@ -25,5 +25,5 @@ urlpatterns = [
path
(
'lecturer/'
,
include
(
'MonitorLecturerApp.urls'
)),
# path('lecturer/', include('MonitorLecturerApp.urls')),
path
(
'summary/'
,
include
(
'LectureSummarizingApp.urls'
)),
path
(
'record/'
,
include
(
'LectureSummarizingApp.urls'
))
path
(
'record/'
,
include
(
'LectureSummarizingApp.urls'
))
,
]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment