Commit a577e3e7 authored by SohanDanushka's avatar SohanDanushka

Merge branch 'QA_RELEASE' into db_and_monitoring_IT17097284

parents 71039e2f ea733587
......@@ -10,6 +10,7 @@ from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
from . import record
from . import test as t
from rest_framework.views import *
......@@ -171,3 +172,29 @@ class InitiateLecture(APIView):
return Response({
"response": "success"
})
class stopRecording(APIView):
def get(self, request):
t.isStop = 1
return Response({
"response": "stopped"
})
def post(self, request):
pass
# test method (delete later)
class TestAPI(APIView):
def get(self, request):
t.isStop = 0
param = request.query_params.get('param')
# t.test()
t.IPWebcamTest()
return Response({
"response": "started"
})
def post(self, request):
pass
\ No newline at end of file
......@@ -23,15 +23,12 @@ maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector
class IPWebCam(object):
def __init__(self):
self.url = "http://192.168.8.100:8080/shot.jpg"
self._count = 0
self.url = "http://192.168.8.103:8080/shot.jpg"
def __del__(self):
cv2.destroyAllWindows()
def get_frame(self):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
imgResp = urllib.request.urlopen(self.url)
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
img= cv2.imdecode(imgNp,-1)
......@@ -46,9 +43,6 @@ class IPWebCam(object):
frame_flip = cv2.flip(resize,1)
ret, jpeg = cv2.imencode('.jpg', frame_flip)
# capture frame and save on a given time in order to run the face recognition
sleep(3); cv2.imwrite("%d.jpg" % self._count, img)
self._count =+1
return jpeg.tobytes()
......
......@@ -43,6 +43,50 @@ function toggleLectureLive() {
y.style.display = "none";
}
}
var timer = false;
//this is a test function
function testAPI() {
timer = true
startTimer()
let param = 'sachith';
//call the API
fetch('http://127.0.0.1:8000/attendance/test-api/?param=' + param)
.then((res) => res.json())
.then((out) => {})
.catch((err) => alert('error: ' + err));
}
var time = 'time';
function f() {
let param = 'sachith';
//call the API
fetch('http://127.0.0.1:8000/attendance/stop-api/?param=' + param)
.then((res) => res.json())
.then((out) => {
timer = false
startTimer();
})
.catch((err) => alert('error: ' + err));
}
function startTimer() {
var min = 0;
var seconds = 0;
if (timer) {
var sec = 0;
function pad ( val ) { return val > 9 ? val : "0" + val; }
setInterval( function(){
min = pad(parseInt(sec/60,10));
seconds = pad(++sec%60)
document.getElementById("seconds").innerHTML=pad(++sec%60);
document.getElementById("minutes").innerHTML=pad(parseInt(sec/60,10));
}, 1000);
} else {
document.getElementById("secondsStop").innerHTML=seconds;
document.getElementById("minutesStop").innerHTML=min;
}
}
</script>
{% endblock %}
......@@ -60,13 +104,21 @@ function toggleLectureLive() {
<div class="card-body">
<button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button>
{# <button type="button" class="btn btn-success" id="test_btn" onclick="testAPI()">Test</button>#}
</div>
<span id="minutes"></span>:<span id="seconds"></span>
<span id="minutesStop"></span>:<span id="secondsStop"></span>
<div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%">
<div class="row justify-content-center">
<img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}">
</div>
<div class="row justify-content-center">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle"><i class="fas fa-video"></i></button>
<div class="col">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle" onclick="testAPI()"><i class="fas fa-video"></i></button>
</div>
<div class="col">
<button style="display: block; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle" onclick="f()"><i class="fas fa-square"></i></button>
</div>
</div>
</div>
</div>
......
import urllib3
import urllib.request as req
import cv2
import numpy as np
import time
isStop = 0
def IPWebcamTest():
# Replace the URL with your own IPwebcam shot.jpg IP:port
# url = 'http://192.168.2.35:8080/shot.jpg'
url = 'http://192.168.8.103:8080/shot.jpg'
# url = 'http://192.168.1.11:8080/startvideo?force=1&tag=rec'
# url = 'http://192.168.1.11:8080/stopvideo?force=1'
size = (600, 600)
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
# vid_cod = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
# output = cv2.VideoWriter("cam_video.avi", vid_cod, 20.0, (640, 480))
# output = cv2.VideoWriter("cam_video.mp4", vid_cod, 20.0, size)
output = cv2.VideoWriter("cam_video.mp4", vid_cod, 10.0, size)
no_of_frames = 0
while True:
# Use urllib to get the image from the IP camera
imgResp = req.urlopen(url)
# imgResp = urllib3.respon
# Numpy to convert into a array
imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
# Finally decode the array to OpenCV usable format ;)
img = cv2.imdecode(imgNp, -1)
# resize the image
img = cv2.resize(img, (600, 600))
# put the image on screen
# cv2.imshow('IPWebcam', img)
# write to the output writer
output.write(img)
# To give the processor some less stress
# time.sleep(0.1)
# time.sleep(1)
no_of_frames += 1
if isStop == 1:
break
# imgResp.release()
# cv2.destroyAllWindows()
print('no of frames: ', no_of_frames)
\ No newline at end of file
......@@ -2,7 +2,7 @@ from django.urls import path
from .api import student_list, student_detail, subject_list, subject_detail, attendance_list, StudentAPIView, \
StudentDetails
from django.conf.urls import url
from .api import FileView, InitiateLecture
from .api import *
from . import views
urlpatterns = [
......@@ -19,5 +19,10 @@ urlpatterns = [
url(r'^upload/$', FileView.as_view(), name='file-upload'),
path('webcam_feed', views.webcam_feed, name='webcam_feed'),
# this url will initiate the lecture
url(r'^process-initiate-lecture/$', InitiateLecture.as_view())
url(r'^process-initiate-lecture/$', InitiateLecture.as_view()),
# this url will be used for testing
url(r'^test-api/$', TestAPI.as_view()),
url(r'^stop-api/$', stopRecording.as_view())
]
from django.shortcuts import render
from django.http.response import StreamingHttpResponse
from AttendanceApp.camera import IPWebCam
from FirstApp.MongoModels import LectureVideo
from FirstApp.serializers import LectureVideoSerializer
def initiate_lecture(request):
lecture_video = LectureVideo.objects.all()
lecture_video_ser = LectureVideoSerializer(lecture_video, many=True)
print('lecture video data: ', lecture_video_ser.data)
return render(request, "AttendanceApp/Initiate_lecture.html")
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def webcam_feed(request):
return StreamingHttpResponse(gen(IPWebCam()),
content_type='multipart/x-mixed-replace; boundary=frame')
......@@ -15,3 +15,9 @@ admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation)
admin.site.register(Admin)
admin.site.register(AdminCredentialDetails)
admin.site.register(LectureActivityFrameRecognitions)
admin.site.register(LectureActivityFrameGroupings)
admin.site.register(LectureEmotionFrameRecognitions)
admin.site.register(LectureEmotionFrameGroupings)
admin.site.register(LectureGazeFrameRecognitions)
admin.site.register(LectureGazeFrameGroupings)
\ No newline at end of file
......@@ -11,11 +11,10 @@ each method will return an HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
from random import Random
from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData
from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer
from .MongoModels import *
from rest_framework.views import *
from .logic import activity_recognition as ar
from . import emotion_detector as ed
......@@ -23,7 +22,9 @@ from .logic import id_generator as ig
from .logic import pdf_file_generator as pdf
from .logic import head_gaze_estimation as hge
from .logic import video_extraction as ve
from . logic import student_behavior_process as sbp
from .serializers import *
from braces.views import CsrfExemptMixin
import datetime
......@@ -139,13 +140,45 @@ class LectureVideoViewSet(APIView):
return Response(serializer.data)
def post(self, request):
# get the request data
# data = request.data
#
# # retrieve the last lecture video details
# last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# # create the next lecture video id
# new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
#
# # create the new lecture video
# LectureVideo(
# lecture_video_id=new_lecture_video_id,
# lecturer_id=data['lecturer_id'],
# subject_id=data['subject_id'],
# video_name=data['video_name'],
# video_length=data['video_length'],
# date=data['date']
# ).save()
#
# # return the successful response
# return Response({
# "response": "Successfully created",
#
# }, status=status.HTTP_201_CREATED)
# serializer = LectureVideoSerializer(data=request.data, many=True)
serializer = LectureVideoSerializer(data=request.data)
# serializer.create(validated_data=request.data)
if serializer.is_valid(raise_exception=ValueError):
print('valid')
serializer.create(validated_data=request.data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.error_messages,
status=status.HTTP_400_BAD_REQUEST)
# return Response(serializer.error_messages,
# status=status.HTTP_400_BAD_REQUEST)
# this API will retrieve a lecture video details
......@@ -374,17 +407,19 @@ class LectureEmotionProcess(APIView):
def get(self, request):
video_name = request.query_params.get('lecture_video_name')
video_id = request.query_params.get('lecture_video_id')
# video_id = request.query_params.get('lecture_video_id')
int_video_id = int(request.query_params.get('lecture_video_id'))
percentages = ed.detect_emotion(video_name)
percentages.calcPercentages()
self.save_emotion_report(video_id, percentages)
self.save_emotion_report(int_video_id, percentages)
return Response({"response": True})
def post(self, request):
pass
def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
# lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
......@@ -499,7 +534,8 @@ class ProcessLectureGazeEstimation(APIView):
pass
def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
# lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
......@@ -757,6 +793,9 @@ class GetLectureActivitySummary(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
phone_perct = request.query_params.get('phone_perct')
listen_perct = request.query_params.get('listen_perct')
note_perct = request.query_params.get('note_perct')
# checking the existence of lecture activity frame grouping records in the db
isExist = LectureActivityFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists()
......@@ -792,10 +831,14 @@ class GetLectureActivitySummary(APIView):
class_labels = ['phone_perct', 'listen_perct', 'note_perct']
# get the comments list
comments = sbp.generate_student_behavior_comments("Activity", phone_perct=phone_perct, listen_perct=listen_perct, note_perct=note_perct)
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"activity_labels": class_labels
"activity_labels": class_labels,
"comments": comments
})
# else:
......@@ -920,65 +963,65 @@ class GetLectureEmotionSummary(APIView):
"emotion_labels": class_labels
})
# else:
#
# frame_landmarks = []
#
# # retrieve frame landmarks from db
# lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
# lecture_video_id__video_name=video_name)
# lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
# lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
#
# retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
#
# # creating a new list to display in the frontend
# for landmark in retrieved_frame_landmarks:
# frame_landmarks.append(int(landmark['landmark']))
#
#
# l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
# frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
#
#
#
# # save the frame group details into db (temp method)
#
# last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
# new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
# ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
#
# # retrieve the lecture emotion id
# lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
# lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
# lec_emotion_id = lec_emotion_ser.data[0]['id']
#
# # create the frame group details
# frame_group_details = []
#
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails'
# lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
# lec_emotion_frame_group_details.frame_group = key
# lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
#
# frame_group_details.append(lec_emotion_frame_group_details)
#
#
# new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
# new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
# new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
# new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
#
# # save
# new_lec_emotion_frame_groupings.save()
#
#
# return Response({
# "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages,
# "emotion_labels": emotion_labels
# })
else:
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(int(landmark['landmark']))
l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db (temp method)
last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_id = lec_emotion_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
lec_emotion_frame_group_details.frame_group = key
lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_emotion_frame_group_details)
new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_emotion_frame_groupings.save()
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"emotion_labels": emotion_labels
})
# this API will retrieve lecture gaze summary
......@@ -1163,7 +1206,6 @@ class GetLectureActivityCorrelations(APIView):
activity_correlations = ar.get_activity_correlations(individual_lec_activities, lec_recorded_activity_data)
print('activity correlations: ', activity_correlations)
return Response({
"correlations": activity_correlations
......@@ -1269,3 +1311,257 @@ class GetLectureGazeCorrelations(APIView):
return Response({
"correlations": gaze_correlations
})
# this class will handle the student activity-emotion correlations
class GetStudentActivityEmotionCorrelations(APIView):
def get(self, request):
# get the day option
option = request.query_params.get('option')
# get the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# initialize the student behavior count
student_behavior_count = 0
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# get the actual date
previous_date = current_date - option_date
# initialize the lists
individual_lec_activities = []
individual_lec_emotions = []
activity_emotion_correlations = []
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# retrieving lecture emotions
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture activities
if len(lec_activity) > 0:
student_behavior_count += 1
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
_, individual_lec_activities, _ = ar.get_student_activity_summary_for_period(activity_data)
# if there are lecture emotions
if len(lec_emotion) > 0:
student_behavior_count += 1
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
_, individual_lec_emotions, _ = ed.get_student_emotion_summary_for_period(emotion_data)
# if both student activity, emotion are available
if student_behavior_count == 2:
# find the correlations between student activity and gaze estimations
activity_emotion_correlations = sbp.calculate_student_activity_emotion_correlations(individual_lec_activities, individual_lec_emotions)
return Response({
"correlations": activity_emotion_correlations
})
# this class will handle the student activity-emotion correlations
class GetStudentActivityGazeCorrelations(APIView):
def get(self, request):
# get the day option
option = request.query_params.get('option')
# get the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# initialize the student behavior count
student_behavior_count = 0
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# get the actual date
previous_date = current_date - option_date
# initialize the lists
individual_lec_activities = []
individual_lec_gaze = []
activity_gaze_correlations = []
# retrieving lecture gaze estimations
lec_gaze = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture activities
if len(lec_activity) > 0:
student_behavior_count += 1
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
_, individual_lec_activities, _ = ar.get_student_activity_summary_for_period(activity_data)
# if there are gaze estimations
if len(lec_gaze) > 0:
student_behavior_count += 1
gaze_serializer = LectureGazeEstimationSerializer(lec_gaze, many=True)
gaze_data = gaze_serializer.data
_, individual_lec_gaze, _ = hge.get_student_gaze_estimation_summary_for_period(gaze_data)
# if there are any recorded lectures
if student_behavior_count == 2:
# find the correlations between student activity and gaze estimations
activity_gaze_correlations = sbp.calculate_student_activity_gaze_correlations(individual_lec_activities, individual_lec_gaze)
return Response({
"correlations": activity_gaze_correlations
})
# this class will handle the student emotion-gaze correlations
class GetStudentEmotionGazeCorrelations(APIView):
def get(self, request):
# get the day option
option = request.query_params.get('option')
# get the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# initialize the student behavior count
student_behavior_count = 0
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# get the actual date
previous_date = current_date - option_date
# initialize the lists
individual_lec_emotions = []
individual_lec_gaze = []
emotion_gaze_correlations = []
# retrieving lecture gaze estimations
lec_gaze = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# retrieving lecture emotions
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture emotions
if len(lec_emotion) > 0:
student_behavior_count += 1
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
_, individual_lec_emotions, _ = ed.get_student_emotion_summary_for_period(emotion_data)
# if there are gaze estimations
if len(lec_gaze) > 0:
student_behavior_count += 1
gaze_serializer = LectureGazeEstimationSerializer(lec_gaze, many=True)
gaze_data = gaze_serializer.data
_, individual_lec_gaze, _ = hge.get_student_gaze_estimation_summary_for_period(gaze_data)
# if there are any recorded lectures
if student_behavior_count == 2:
# find the correlations between student activity and gaze estimations
emotion_gaze_correlations = sbp.calculate_student_emotion_gaze_correlations(individual_lec_emotions, individual_lec_gaze)
return Response({
"correlations": emotion_gaze_correlations
})
##### BATCH PROCESS SECTION #####
# perform the student behavior analysis as a batch process
class BatchProcess(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
video_id = request.query_params.get('video_id')
return Response({
"response": True
})
# this API will check whether the lecture activity frame groupings exist
class CheckStudentBehaviorAvailability(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
#
# isActivityExist = LectureActivityFrameGroupings.objects.filter(
# lecture_activity_id__lecture_video_id__video_name=video_name).exists()
#
# isEmotionExist = LectureEmotionFrameGroupings.objects.filter(
# lecture_emotion_id__lecture_video_id__video_name=video_name).exists()
#
# isGazeExist = LectureGazeFrameGroupings.objects.filter(
# lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
isActivityExist = bool(Random().randint(0,2))
isEmotionExist = bool(Random().randint(0,2))
isGazeExist = bool(Random().randint(0,2))
return Response({
"isActivityExist": isActivityExist,
"isEmotionExist": isEmotionExist,
"isGazeExist": isGazeExist
})
# this API will perform some random task (delete later)
class TestRandom(APIView):
def get(self, request):
random = Random().randint(0, 100)
return Response({
"response": random
})
......@@ -52,6 +52,8 @@ def emotion_recognition(classifier, face_classifier, image):
roi_gray = gray[y:y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
# draw a rectangle
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float') / 255.0
......@@ -63,6 +65,9 @@ def emotion_recognition(classifier, face_classifier, image):
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
# put the emotion label
cv2.putText(image, label, (x, y), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)
return label
......@@ -79,6 +84,7 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
EMOTION_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\emotion")
meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
......@@ -99,6 +105,20 @@ def detect_emotion(video):
# for testing purposes
print('starting the emotion recognition process')
# get width and height of the video frames
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# get the video frame size
size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(EMOTION_DIR, video)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
while (count_frames < frame_count):
# Grab a single frame of video
ret, frame = cap.read()
......@@ -135,6 +155,9 @@ def detect_emotion(video):
# for testing purposes
print('emotion frame count: ', count_frames)
# write the video frame to the video writer
output.write(frame)
count_frames += 1
# setting up the counted values
......@@ -146,8 +169,13 @@ def detect_emotion(video):
meta_data.surprise_count = count_surprise
cap.release()
output.release()
cv2.destroyAllWindows()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes
print('ending the emotion recognition process')
......@@ -198,6 +226,8 @@ def get_frame_emotion_recognition(video_name):
# for testing purposes
print('starting the emotion frame recognition process')
# looping through the frames
while (frame_count < no_of_frames):
......@@ -216,18 +246,19 @@ def get_frame_emotion_recognition(video_name):
surprise_count = 0
# get the detections
detections = ar.person_detection(image, net)
detections, persons = ar.person_detection(image, net)
# to count the extracted detections for a frame
detection_count = 0
# if there are detections
if (len(detections) > 0):
# loop through the detections
for detection in detections:
for person in persons:
label = emotion_recognition(classifier, face_classifier, detection)
label = emotion_recognition(classifier, face_classifier, person)
# checking for the label
if label == class_labels[0]:
......@@ -422,17 +453,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0
detection_count = 0
detections = ar.person_detection(image, net)
detections, persons = ar.person_detection(image, net)
# if there are detections
if (len(detections) > 0):
# looping through the detections in each frame
for detection in detections:
for person in persons:
# run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, detection)
label = emotion_recognition(classifier, face_classifier, person)
# increment the count based on the label
if label == class_labels[0]:
......@@ -639,10 +670,14 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data
# this variable will be used to store the correlations
correlations = []
limit = 10
# limit = 10
limit = len(individual_lec_emotions)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))]
# declare the correlation data dictionary
corr_data = {}
# student activity labels
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
lecturer_activity_labels = ['seated', 'standing', 'walking']
......@@ -662,31 +697,72 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
value = int(data['seated_count'])
value1 = int(data['standing_count'])
value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_emotions:
value = int(data['happy_perct'])
value1 = int(data['sad_perct'])
value2 = int(data['angry_perct'])
value3 = int(data['surprise_perct'])
value4 = int(data['neutral_perct'])
if value != 0:
happy_perct_list.append(int(data['happy_perct']))
if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
if len(happy_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[4]] = neutral_perct_list
if (len(sitting_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
......
......@@ -38,7 +38,8 @@ def activity_recognition(video_path):
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
......@@ -55,7 +56,9 @@ def activity_recognition(video_path):
np.set_printoptions(suppress=True)
# define the student activity labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels = ['Phone checki...', 'Listening', 'Note taking']
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
......@@ -81,13 +84,20 @@ def activity_recognition(video_path):
# for testing purposes
print('starting the activity recognition process')
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(ACTIVITY_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
# looping through the frames
while (frame_count < no_of_frames):
ret, image = video.read()
image = cv2.resize(image, size)
# image = cv2.resize(image, size)
# perform person detection on the extracted image
detections = person_detection(image, net)
detections, persons = person_detection(image, net)
# this is for testing purposes
print('frame count: ', frame_count)
......@@ -102,13 +112,26 @@ def activity_recognition(video_path):
# initialize the detection count
detection_count = 0
# to iterate each person
no_of_persons = 0
# looping through the person detections of the frame
for detection in detections:
detection = cv2.resize(detection, size)
# get the coordinates for the detection
startX = detection['startX']
startY = detection['startY']
endX = detection['endX']
endY = detection['endY']
# detection = cv2.resize(detection, size)
# draw the coordinates of the persons' identified
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 5)
image_array = np.asarray(detection)
normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
image_array = np.asarray(persons[no_of_persons])
image_array_resized = cv2.resize(image_array, size)
# normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
normalized_image_array = (image_array_resized.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
......@@ -120,18 +143,36 @@ def activity_recognition(video_path):
# counting the detections under each label
if (label == class_labels[0]):
label = "Phone checking"
phone_checking_count += 1
elif (label == class_labels[1]):
listening_count += 1
elif (label == class_labels[2]):
label = "Writing"
note_taking_count += 1
# vertical_pos = startY + int(endY / 2)
vertical_pos = int(endY / 2)
# put the identified label above the detected person
# cv2.putText(image, label, (startX, vertical_pos), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image, label, (startX, vertical_pos), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 255, 0), 10)
# increment the no.of persons
no_of_persons += 1
# increment the detection count
detection_count += 1
# increment the frame count
frame_count += 1
# resize the image
image = cv2.resize(image, (224, 224))
# write the frame to the video writer
output.write(image)
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
......@@ -143,6 +184,10 @@ def activity_recognition(video_path):
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes
print('activity recognition process is over')
......@@ -163,6 +208,7 @@ def person_detection(image, net):
# set the threshold balue
threshold = 0.2
detected_person = []
persons = []
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
......@@ -211,14 +257,22 @@ def person_detection(image, net):
startX = 0 if startX < 0 else startX
startY = 0 if startY < 0 else startY
# extract the person
# this dictionary will contain the bounding box coordinates
coordinates = {}
person = image[startY:startY + endY, startX:startX + endX]
detected_person.append(person)
coordinates['startX'] = startX
coordinates['startY'] = startY
coordinates['endX'] = endX
coordinates['endY'] = endY
persons.append(person)
detected_person.append(coordinates)
person_count += 1
# return the detection person list
return detected_person
return detected_person, persons
# this method will recognize the activity for each frame
......@@ -233,7 +287,8 @@ def get_frame_activity_recognition(video_name):
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
......@@ -247,7 +302,9 @@ def get_frame_activity_recognition(video_name):
np.set_printoptions(suppress=True)
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels = ['Phone checki...', 'Listening', 'Note taking']
# load the activity recogntion model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
......@@ -295,19 +352,27 @@ def get_frame_activity_recognition(video_name):
detection_count = 0
detected_percentages = []
detections = person_detection(image, net)
detections, persons = person_detection(image, net)
# if there are detections
if (len(detections) > 0):
no_of_persons = 0
# loop through each detection in the frame
for detection in detections:
detection = cv2.resize(detection, size)
# get the coordinates for the detection
startX = detection['startX']
startY = detection['startY']
endX = detection['endX']
endY = detection['endY']
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
image_array = np.asarray(persons[no_of_persons])
image_array_resized = cv2.resize(image_array, size)
# normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
normalized_image_array = (image_array_resized.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
......@@ -427,10 +492,21 @@ def get_student_activity_summary_for_period(activities):
def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized person detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True)
......@@ -443,11 +519,16 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# initializing the count variables
# class labels
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels = ['Phone checki...', 'Listening', 'Note taking']
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group
frame_group_diff = {}
......@@ -463,9 +544,8 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# for frame in os.listdir(EXTRACTED_DIR):
while (frame_count < no_of_frames):
# initializing the variables
phone_count = 0
......@@ -473,16 +553,14 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
listen_count = 0
detection_count = 0
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
ret, image = video.read()
detections, persons = person_detection(image, net)
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# looping through the detections in each frame
for person in persons:
image = cv2.resize(image, size)
image = cv2.resize(person, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
......@@ -674,10 +752,14 @@ def get_activity_correlations(individual_lec_activities, lec_recorded_activity_d
# this variable will be used to store the correlations
correlations = []
limit = 10
# limit = 10
limit = len(individual_lec_activities)
data_index = ['lecture-{}'.format(i+1) for i in range(len(individual_lec_activities))]
# declare the correlation data dictionary
corr_data = {}
# student activity labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
lecturer_activity_labels = ['seated', 'standing', 'walking']
......@@ -694,29 +776,63 @@ def get_activity_correlations(individual_lec_activities, lec_recorded_activity_d
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
value = int(data['seated_count'])
value1 = int(data['standing_count'])
value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_activities:
value = int(data['phone_perct'])
value1 = int(data['listening_perct'])
value2 = int(data['writing_perct'])
if value != 0:
phone_perct_list.append(int(data['phone_perct']))
if value1 != 0:
listen_perct_list.append(int(data['listening_perct']))
if value2 != 0:
note_perct_list.append(int(data['writing_perct']))
corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
if (len(phone_perct_list)) == len(individual_lec_activities):
corr_data[student_activity_labels[0]] = phone_perct_list
if (len(listen_perct_list)) == len(individual_lec_activities):
corr_data[student_activity_labels[1]] = listen_perct_list
if (len(note_perct_list)) == len(individual_lec_activities):
corr_data[student_activity_labels[2]] = note_perct_list
if (len(sitting_perct_list)) == len(individual_lec_activities):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_activities):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_activities):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
......
import requests
def batch_process(video_id, video_name):
# call the activity process
activity_resp = requests.get('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
# call the emotion process
emotion_resp = requests.get('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
# call the gaze process
gaze_resp = requests.get('http://127.0.0.1:8000/process-lecture-gaze-estimation/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
pass
# this method will save the lecture video
def save_student_lecture_video(student_video):
# call the API
student_video_save_resp = requests.post('http://127.0.0.1:8000/lecture-video', student_video)
\ No newline at end of file
......@@ -171,6 +171,7 @@ def process_gaze_estimation(video_path):
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
......@@ -211,6 +212,18 @@ def process_gaze_estimation(video_path):
# for testing purposes
print('starting the gaze estimation process')
# get the frame sizes
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
frame_size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(GAZE_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, frame_size)
# iterate the video frames
while True:
ret, img = cap.read()
......@@ -285,14 +298,19 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1
elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1
elif isLookingFront:
cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1
......@@ -304,6 +322,9 @@ def process_gaze_estimation(video_path):
# for testing purposes
print('gaze estimation count: ', frame_count)
# write to the video writer
output.write(img)
# increment the frame count
frame_count += 1
......@@ -330,6 +351,12 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows()
cap.release()
output.release()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes
print('ending the gaze estimation process')
......@@ -538,6 +565,7 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
......@@ -979,10 +1007,15 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
# limit = 10
limit = len(individual_lec_gaze)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))]
# declare the correlation data dictionary
corr_data = {}
# student gaze labels
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
lecturer_activity_labels = ['seated', 'standing', 'walking']
......@@ -1001,28 +1034,72 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
value = int(data['seated_count'])
value1 = int(data['standing_count'])
value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_gaze:
value = int(data['looking_up_and_right_perct'])
value1 = int(data['looking_up_and_left_perct'])
value2 = int(data['looking_down_and_right_perct'])
value3 = int(data['looking_down_and_left_perct'])
value4 = int(data['looking_front_perct'])
if value != 0:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
'Down and Left': downleft_perct_list, 'Front': front_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
if (len(upright_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[4]] = front_perct_list
if (len(sitting_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
......
import pandas as pd
from . import utilities as ut
def calculate_student_activity_emotion_correlations(lec_activities, lec_emotions):
# this variable will be used to store the correlations
correlations = []
# limit = 10
limit = len(lec_activities)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(lec_activities))]
# define the correlation data dictionary
corr_data = {}
# student gaze labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
# lecture activity data list (student)
phone_perct_list = []
note_perct_list = []
listen_perct_list = []
# lecture emotion data list (student)
happy_perct_list = []
sad_perct_list = []
angry_perct_list = []
surprise_perct_list = []
neutral_perct_list = []
# loop through the lecture activity data
for data in lec_activities:
value = int(data['phone_perct'])
value1 = int(data['listening_perct'])
value2= int(data['writing_perct'])
if value != 0:
phone_perct_list.append(int(data['phone_perct']))
if value1 != 0:
listen_perct_list.append(int(data['listening_perct']))
if value2 != 0:
note_perct_list.append(int(data['writing_perct']))
# loop through the lecture emotion data
for data in lec_emotions:
value = int(data['happy_perct'])
value1 = int(data['sad_perct'])
value2 = int(data['angry_perct'])
value3 = int(data['surprise_perct'])
value4 = int(data['neutral_perct'])
if value != 0:
happy_perct_list.append(int(data['happy_perct']))
if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
if len(phone_perct_list) == len(lec_activities):
corr_data[student_activity_labels[0]] = phone_perct_list
if len(listen_perct_list) == len(lec_activities):
corr_data[student_activity_labels[1]] = listen_perct_list
if len(note_perct_list) == len(lec_activities):
corr_data[student_activity_labels[2]] = note_perct_list
if len(happy_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[4]] = neutral_perct_list
# corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
# 'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# }
print('data: ', corr_data)
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
df = df[(df.T != 0).any()]
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentActivity = index[0] in student_activity_labels
# check whether the second index is a lecturer activity
isStudentEmotion = index[1] in student_emotion_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentActivity & isStudentEmotion:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
# this method will calculate the student activity-gaze correlations
def calculate_student_activity_gaze_correlations(lec_activities, lec_gaze):
# this variable will be used to store the correlations
correlations = []
limit = len(lec_activities)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(lec_activities))]
# this dictionary contains the correlation data
corr_data = {}
# student gaze labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
# student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
# lecture activity data list (student)
phone_perct_list = []
note_perct_list = []
listen_perct_list = []
# lecture gaze estimation data list (student)
upright_perct_list = []
upleft_perct_list = []
downright_perct_list = []
downleft_perct_list = []
front_perct_list = []
# loop through the lecture activity data
for data in lec_activities:
value = int(data['phone_perct'])
value1 = int(data['listening_perct'])
value2 = int(data['writing_perct'])
if value != 0:
phone_perct_list.append(int(data['phone_perct']))
if value1 != 0:
listen_perct_list.append(int(data['listening_perct']))
if value2 != 0:
note_perct_list.append(int(data['writing_perct']))
# loop through the lecture activity data
for data in lec_gaze:
value = int(data['looking_up_and_right_perct'])
value1 = int(data['looking_up_and_left_perct'])
value2 = int(data['looking_down_and_right_perct'])
value3 = int(data['looking_down_and_left_perct'])
value4 = int(data['looking_front_perct'])
if value != 0:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
if (len(phone_perct_list)) == len(lec_activities):
corr_data[student_activity_labels[0]] = phone_perct_list
if (len(listen_perct_list)) == len(lec_activities):
corr_data[student_activity_labels[1]] = listen_perct_list
if (len(note_perct_list)) == len(lec_activities):
corr_data[student_activity_labels[2]] = note_perct_list
if (len(upright_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[4]] = front_perct_list
# corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
# 'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list
# }
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print(pd_series)
print('length of pd_series: ', len(pd_series))
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentActivity = index[0] in student_activity_labels
# check whether the second index is a student gaze estimation
isStudentGaze = index[1] in student_gaze_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentActivity & isStudentGaze:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
print('correlations: ', correlations)
# return the list
return correlations
# this method will calculate the student activity-gaze correlations
def calculate_student_emotion_gaze_correlations(lec_emotions, lec_gaze):
# this variable will be used to store the correlations
correlations = []
limit = len(lec_emotions)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(lec_emotions))]
# this dictionary will contain the correlation data
corr_data = {}
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
# lecture emotion data list (student)
happy_perct_list = []
sad_perct_list = []
angry_perct_list = []
surprise_perct_list = []
neutral_perct_list = []
# lecture gaze estimation data list (student)
upright_perct_list = []
upleft_perct_list = []
downright_perct_list = []
downleft_perct_list = []
front_perct_list = []
# loop through the lecture emotion data
for data in lec_emotions:
value = int(data['happy_perct'])
value1 = int(data['sad_perct'])
value2 = int(data['angry_perct'])
value3 = int(data['surprise_perct'])
value4 = int(data['neutral_perct'])
if value != 0:
happy_perct_list.append(int(data['happy_perct']))
if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
# loop through the lecture gaze data
for data in lec_gaze:
value = int(data['looking_up_and_right_perct'])
value1 = int(data['looking_up_and_left_perct'])
value2 = int(data['looking_down_and_right_perct'])
value3 = int(data['looking_down_and_left_perct'])
value4 = int(data['looking_front_perct'])
if value != 0:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
if len(happy_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[4]] = neutral_perct_list
if (len(upright_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[4]] = front_perct_list
# corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# 'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list
# }
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentEmotion = index[0] in student_emotion_labels
# check whether the second index is a student gaze estimation
isStudentGaze = index[1] in student_gaze_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentEmotion & isStudentGaze:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
# this method will provide comments on the student behavior
def generate_student_behavior_comments(category, **kwargs):
# declare the comments list
comments = []
if category == "Activity":
float_phone_perct = float(kwargs.get('phone_perct'))
float_listen_perct = float(kwargs.get('listen_perct'))
float_note_perct = float(kwargs.get('note_perct'))
# set the threshold value list
THRESHOLDS = [40, 20, 30]
if int(float_phone_perct) >= THRESHOLDS[0]:
comments.append("Special Attention needs to be given to reduce student phone checking")
if int(float_listen_perct) < THRESHOLDS[1]:
comments.append("Consider taking steps to increase student attention")
if int(float_note_perct) < THRESHOLDS[2]:
comments.append("Try to pursue students to take important notes during the lecture")
elif category == "Emotion":
print('Emotion')
elif category == "Gaze":
print('Gaze')
# return the comment list
return comments
# this method will remove the redundant pairs in pandas dataframe
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
......@@ -8,6 +9,7 @@ def get_redundant_pairs(df):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
# this method will return the top specified correlations
def get_top_abs_correlations(df, n):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
......
......@@ -307,3 +307,8 @@ def get_frame_landmarks(video_name):
# now return the frame landmarks
return frame_landmarks
# this method will save leture video (student)
def save_lecture_student_video():
pass
\ No newline at end of file
......@@ -18,8 +18,12 @@ there are two fields inside "Meta" class, as follows.
from rest_framework import serializers
from djongo import models
from .MongoModels import *
from . models import VideoMeta
from .logic import id_generator as ig
# from datetime import datetime as dt
import datetime
# lecture serializer
......@@ -190,6 +194,110 @@ class LectureVideoSerializer(serializers.ModelSerializer):
model = LectureVideo
fields = '__all__'
# this method will validate the input data
def to_internal_value(self, data):
lecturer = None
subject = None
lecturer_data = data.get('lecturer')
subject_data = data.get('subject')
# serialize the lecturer data
lecturer = Lecturer.objects.filter(id=lecturer_data)
subject = Subject.objects.filter(id=subject_data)
lecturer_ser_data = LecturerSerializer(lecturer, many=True).data[0]
subject_ser_data = SubjectSerializer(subject, many=True).data[0]
# retrieve the last lecture video details
last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# create the next lecture video id
new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
# if both subject and lecturer details are available
if len(lecturer) == 1 & len(subject) == 1:
str_video_length = data.get('video_length')
video_length_parts = str_video_length.split(':')
video_length = datetime.timedelta(minutes=int(video_length_parts[0]), seconds=int(video_length_parts[1]),
milliseconds=int(video_length_parts[2]))
# this data will be passed as validated data
validated_data = {
'lecture_video_id': new_lecture_video_id,
'lecturer': lecturer_ser_data,
'subject': subject_ser_data,
'date': data.get('date'),
'video_name': data.get('video_name'),
'video_length': video_length
}
return super(LectureVideoSerializer, self).to_internal_value(validated_data)
# this method will override the 'create' method
def create(self, validated_data):
lecturer = None
subject = None
lecturer_data = validated_data.pop('lecturer')
subject_data = validated_data.pop('subject')
# serialize the lecturer data
lecturer = Lecturer.objects.filter(id=lecturer_data)
subject = Subject.objects.filter(id=subject_data)
# retrieve the last lecture video details
last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# create the next lecture video id
new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
# if both subject and lecturer details are available
if len(lecturer) == 1 & len(subject) == 1:
str_video_length = validated_data.pop('video_length')
video_length_parts = str_video_length.split(':')
video_length = datetime.timedelta(minutes=int(video_length_parts[0]), seconds=int(video_length_parts[1]), milliseconds=int(video_length_parts[2]))
lecture_video, created = LectureVideo.objects.update_or_create(
lecture_video_id=new_lecture_video_id,
lecturer=lecturer[0],
subject=subject[0],
date=validated_data.pop('date'),
video_name=validated_data.pop('video_name'),
video_length=video_length
)
# faculty_data = validated_data.pop('faculty')
# serialized_faculty = FacultySerializer(data=faculty_data)
#
# if (serialized_faculty.is_valid()):
# # faculty, faculty_created = Faculty.objects.get_or_create(defaults={}, faculty_id=serialized_faculty.data['faculty_id'])
# faculty = Faculty.objects.filter(faculty_id=serialized_faculty.data['faculty_id'])
#
# if (len(faculty) == 1):
# lecturer, created = Lecturer.objects.update_or_create(
# faculty=faculty[0],
# lecturer_id=validated_data.pop('lecturer_id'),
# fname=validated_data.pop('fname'),
# lname=validated_data.pop('lname'),
# email=validated_data.pop('email'),
# telephone=validated_data('telephone')
# )
#
# return lecturer
#
return lecture_video
return None
# lecture video time landmarks serializer
class LectureVideoTimeLandmarksSerializer(serializers.ModelSerializer):
......
......@@ -257,8 +257,13 @@
//change the innerHTML of the clicked button
e.target.innerHTML = "<span class='font-italic'>Processing</span>";
let phone_perct = $('#phone_perct').text().split("%")[0];
let listen_perct = $('#listening_perct').text().split("%")[0];
let note_perct = $('#writing_perct').text().split("%")[0];
//fetch the activity summary details
fetch('http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=' + global_video_name)
fetch('http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=' + global_video_name + '&phone_perct=' + phone_perct + '&note_perct=' + note_perct + '&listen_perct=' + listen_perct)
.then((res) => res.json())
.then((out) => activityFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
......@@ -295,16 +300,30 @@
//this function will handle the retrieved activity frame group percentages
function activityFrameGroupPercentages(response, e) {
//remove the previous comments
$('#student_activity_comments').empty();
lecture_activity_frame_group_percentages = response.frame_group_percentages;
let frame_landmarks = response.frame_landmarks;
{#let frame_group_dict = response.frame_group_dict;#}
let activity_labels = response.activity_labels;
let comment_list = response.comments;
//define a html string
let htmlString = "";
for (let i = 0; i < comment_list.length; i++) {
htmlString += "<p class='font-italic font-weight-bold'>";
htmlString += comment_list[i];
htmlString += "</p>";
}
//change the button back to original
e.target.innerHTML = "Summary";
//append the html string to the comments list
$('#student_activity_comments').append(htmlString);
//open the modal
$('#ActivitySummaryModal').modal();
......@@ -1017,6 +1036,67 @@
});
//this method will handle the student activity-emotion correlations advanced analysis
$('#student_activity_emotion_corr').click(function () {
//open the modal
$('#student_activity_emotion_advanced_modal').modal();
//show the loader
$('#student_activity_emotion_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-student-activity-emotion-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayActivityEmotionCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will handle the student activity-gaze correlations advanced analysis
$('#student_activity_gaze_corr').click(function () {
//open the modal
$('#student_activity_gaze_advanced_modal').modal();
//show the loader
$('#student_activity_gaze_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-student-activity-gaze-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayActivityGazeCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will handle the student emotion-gaze correlations advanced analysis
$('#student_emotion_gaze_corr').click(function () {
//open the modal
$('#student_emotion_gaze_advanced_modal').modal();
//show the loader
$('#student_emotion_gaze_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-student-emotion-gaze-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayEmotionGazeCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will display the activity correlations in a table
function displayActivityCorrelations(correlations) {
......@@ -1032,17 +1112,13 @@
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
......@@ -1088,17 +1164,13 @@
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
......@@ -1145,17 +1217,13 @@
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
......@@ -1187,6 +1255,189 @@
}
//this method will display the student activity-emotion correlations in a table
function displayActivityEmotionCorrelations(correlations) {
let htmlString = "";
if (correlations.length !== 0) {
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
} else {
htmlString += "<tr>";
htmlString += "<td colspan='3'>";
htmlString += "<span class='font-italic'>No correlations were found</span>";
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#student_activity_emotion_corr_tbody').append(htmlString);
//hide the loader
$('#student_activity_emotion_corr_loader').hide();
//show the table
$('#student_activity_emotion_corr_table').attr('hidden', false);
}
//this method will display the student activity-gaze correlations in a table
function displayActivityGazeCorrelations(correlations) {
let htmlString = "";
if (correlations.length !== 0) {
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
} else {
htmlString += "<tr>";
htmlString += "<td colspan='3'>";
htmlString += "<span class='font-italic'>No correlations were found</span>";
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#student_activity_gaze_corr_tbody').append(htmlString);
//hide the loader
$('#student_activity_gaze_corr_loader').hide();
//show the table
$('#student_activity_gaze_corr_table').attr('hidden', false);
}
//this method will display the student emotion-gaze correlations in a table
function displayEmotionGazeCorrelations(correlations) {
let htmlString = "";
if (correlations.length !== 0) {
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
} else {
htmlString += "<tr>";
htmlString += "<td colspan='3'>";
htmlString += "<span class='font-italic'>No correlations were found</span>";
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#student_emotion_gaze_corr_tbody').append(htmlString);
//hide the loader
$('#student_emotion_gaze_corr_loader').hide();
//show the table
$('#student_emotion_gaze_corr_table').attr('hidden', false);
}
});
</script>
......@@ -1214,86 +1465,7 @@
</div>
<!-- Content Row -->
{# <div class="row">#}
{##}
{# <!-- Earnings (Monthly) Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-primary shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-primary text-uppercase mb-1">Earnings (Monthly)</div>#}
{# <div class="h5 mb-0 font-weight-bold text-gray-800">$40,000</div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-calendar fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!-- Earnings (Monthly) Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-success shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-success text-uppercase mb-1">Earnings (Annual)</div>#}
{# <div class="h5 mb-0 font-weight-bold text-gray-800">$215,000</div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-dollar-sign fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!-- Earnings (Monthly) Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-info shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-info text-uppercase mb-1">Tasks</div>#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col-auto">#}
{# <div class="h5 mb-0 mr-3 font-weight-bold text-gray-800">50%</div>#}
{# </div>#}
{# <div class="col">#}
{# <div class="progress progress-sm mr-2">#}
{# <div class="progress-bar bg-info" role="progressbar" style="width: 50%" aria-valuenow="50" aria-valuemin="0" aria-valuemax="100"></div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-clipboard-list fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!-- Pending Requests Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-warning shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-warning text-uppercase mb-1">Pending Requests</div>#}
{# <div class="h5 mb-0 font-weight-bold text-gray-800">18</div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-comments fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# </div>#}
<!-- Content Row -->
......@@ -1418,6 +1590,23 @@
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
<!-- button to view advanced analysis dropdown -->
{# <div class="dropdown">#}
{# <button class="btn btn-secondary dropdown-toggle float-right mr-2" type="button"#}
{# id="activity_advanced_dropdown" data-toggle="dropdown"#}
{# aria-haspopup="true" aria-expanded="false">#}
{# Advanced Analysis#}
{# </button>#}
{# <div class="dropdown-menu"#}
{# aria-labelledby="activity_advanced_dropdown">#}
{# <button class="dropdown-item" id="activity_advanced_btn">Student vs. Lecturer</button>#}
{# <button class="dropdown-item">Student vs. Student</button>#}
{# </div>#}
{# </div>#}
<!-- end of button to view advanced analysis dropdown -->
</div>
</div>
<!-- end of Activity card -->
......@@ -1501,6 +1690,22 @@
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
<!-- button to view advanced analysis dropdown -->
{# <div class="dropdown">#}
{# <button class="btn btn-secondary dropdown-toggle float-right mr-2" type="button"#}
{# id="emotion_advanced_dropdown" data-toggle="dropdown"#}
{# aria-haspopup="true" aria-expanded="false">#}
{# Advanced Analysis#}
{# </button>#}
{# <div class="dropdown-menu"#}
{# aria-labelledby="emotion_advanced_dropdown">#}
{# <button class="dropdown-item" id="emotion_advanced_btn">Student vs. Lecturer</button>#}
{# <button class="dropdown-item">Student vs. Student</button>#}
{# </div>#}
{# </div>#}
<!-- end of button to view advanced analysis dropdown -->
</div>
</div>
......@@ -1578,11 +1783,56 @@
</button>
<!-- end of button to view advanced analysis -->
<!-- button to view advanced analysis dropdown -->
{# <div class="dropdown">#}
{# <button class="btn btn-secondary dropdown-toggle float-right mr-2" type="button"#}
{# id="gaze_advanced_dropdown" data-toggle="dropdown"#}
{# aria-haspopup="true" aria-expanded="false">#}
{# Advanced Analysis#}
{# </button>#}
{# <div class="dropdown-menu"#}
{# aria-labelledby="gaze_advanced_dropdown">#}
{# <button class="dropdown-item" id="gaze_advanced_btn">Student vs. Lecturer</button>#}
{# <button class="dropdown-item">Student vs. Student</button>#}
{# </div>#}
{# </div>#}
<!-- end of button to view advanced analysis dropdown -->
</div>
</div>
<!-- end of Gaze estimation card -->
<!-- advanced analysis for student-student correlations -->
{# <div class="float-right m-2">#}
{# <button type="button" class="btn btn-success" id="student_student_corr">#}
{# Advanced Analysis#}
{# </button>#}
{# </div>#}
<!-- end of advanced analysis for student-student correlations -->
<!-- button to view advanced analysis dropdown -->
<div class="dropdown">
<button class="btn btn-secondary dropdown-toggle float-right mr-2"
type="button"
id="student_student_advanced_dropdown" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
Advanced Analysis
</button>
<div class="dropdown-menu"
aria-labelledby="student_student_advanced_dropdown">
<button class="dropdown-item" id="student_activity_emotion_corr">
Activity vs. Emotion
</button>
<button class="dropdown-item" id="student_activity_gaze_corr">Activity
vs. Gaze
</button>
<button class="dropdown-item" id="student_emotion_gaze_corr">Emotion vs.
Gaze
</button>
</div>
</div>
<!-- end of button to view advanced analysis dropdown -->
</div>
<!-- end of student behavior summary -->
......@@ -2047,6 +2297,21 @@
</div>
<div class="modal-body">
<div id="ActivityChartContainer" style="height: 370px; max-width: 920px; margin: 0px auto;"></div>
<!-- Notes header -->
<div class="modal-header mt-4">
<h3>Notes</h3>
</div>
<!-- End of Notes header -->
<!-- Comments row -->
<div class="row mt-3">
<div class="col-lg-6" id="student_activity_comments">
</div>
</div>
<!-- End of Comments row -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
......@@ -2364,6 +2629,143 @@
<!-- end of gaze advanced analysis modal -->
<!-- student activity-emotion advanced analysis modal -->
<div class="modal fade" id="student_activity_emotion_advanced_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5>Student Behavior Correlations</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Activity vs. Student Emotion</h3>
<!-- ajax loader -->
<div class="text-center" id="student_activity_emotion_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="student_activity_emotion_corr_table" hidden>
<thead>
<tr>
<th>Student Activity</th>
<th>Student Emotion</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="student_activity_emotion_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of student activity-emotion advanced analysis modal -->
<!-- student activity-gaze advanced analysis modal -->
<div class="modal fade" id="student_activity_gaze_advanced_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5>Student Behavior Correlations</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Activity vs. Student Emotions</h3>
<!-- ajax loader -->
<div class="text-center" id="student_activity_gaze_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="student_activity_gaze_corr_table" hidden>
<thead>
<tr>
<th>Student Activity</th>
<th>Student Gaze</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="student_activity_gaze_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of student activity-gaze advanced analysis modal -->
<!-- student emotion-gaze advanced analysis modal -->
<div class="modal fade" id="student_emotion_gaze_advanced_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5>Student Behavior Correlations</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Emotion vs, Student Gaze</h3>
<!-- ajax loader -->
<div class="text-center" id="student_emotion_gaze_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="student_emotion_gaze_corr_table" hidden>
<thead>
<tr>
<th>Student Emotion</th>
<th>Student Gaze</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="student_emotion_gaze_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of student emotion-gaze advanced analysis modal -->
{% endblock %}
<!--scripts-->
{% block 'scripts' %}
......
......@@ -241,7 +241,13 @@
//to handle the 'integrate' modal
$('#integrate_activity').click(function () {
//define the student video src
{#global_video_name = "Video_test_9.mp4";#}
{#global_video_name = "Video_test_9_annotated.mp4";#}
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#let video_src = "{% static '' %}FirstApp/video/" + global_video_name;#}
{#let video_src = "{% static '' %}/FirstApp/activity/" + global_video_name;#}
{#let video_src = "{% static '' %}FirstApp/emotion/" + global_video_name;#}
//assign the video src
$('#student_video').attr('src', video_src);
......@@ -1078,6 +1084,11 @@
type="video/mp4">
Your browser does not support the video tag.
</video>
{# <video width="500" height="300" id="student_video" controls>#}
{# <source src="{% static 'FirstApp/videos/Video_test_2.mp4' %}"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div>
<!--end of student video section -->
......
......@@ -74,6 +74,8 @@
real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-emotion-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
......@@ -143,7 +145,8 @@
$('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length);
$('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id;
{#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name;
......@@ -241,32 +244,67 @@
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
{#fetch('http://127.0.0.1:8000/get-random-number')#}
{#.then((res) => res.json())#}
{#.then((out) => alert(out.response))#}
{#.catch((err) => alert('err: ' + err));#}
//fetch the lecture recorded video name
fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
{#global_lecturer_video_name = "Test_3.mp4";#}
{#global_lecturer_video_name = "Lecturer_Video_4.mp4";#}
{##}
{#//define the lecturer video src#}
{#let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;#}
{##}
{##}
{##}
{#//assign the video src#}
{#$('#lecturer_video').attr('src', lecturer_video_src);#}
{##}
{#$('#integrate_modal').modal();#}
{#//fetch data from the API#}
{#fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name)#}
{# .then((res) => res.json())#}
{# .then((out) => displayEmotionRecognitionForFrame(out.response))#}
{# .catch((err) => alert('error: ' + err));#}
});
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
//fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => displayEmotionRecognitionForFrame(out.response))
.catch((err) => alert('error: ' + err));
});
}
//this function will display the emotion percentages for each frame
......@@ -338,7 +376,7 @@
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err))
.catch((err) => alert('error: ' + err));
}
......
......@@ -74,6 +74,8 @@
real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-video-gaze-estimation-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
......@@ -142,7 +144,8 @@
$('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length);
$('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id;
{#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name;
......@@ -239,21 +242,28 @@
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
//fetch the lecture recorded video name
fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
{#global_lecturer_video_name = "Test_3.mp4";#}
{#global_lecturer_video_name = "Lecturer_Video_4.mp4";#}
{##}
{#//define the lecturer video src#}
{#let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;#}
{##}
{#//assign the video src#}
{#$('#lecturer_video').attr('src', lecturer_video_src);#}
{##}
{#$('#integrate_modal').modal();#}
$('#integrate_modal').modal();
//fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name)
......@@ -264,6 +274,23 @@
});
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
alert('hello');
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
}
//this function will load the activity recognition for frames
function displayGazeEstimationForFrame(response) {
......
<!DOCTYPE html>
<html lang="en">
{% block head %}
<head>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
......@@ -10,47 +10,60 @@
<title>SLPES</title>
{% load static %}
<link rel="shortcut icon" href="{% static 'FirstApp/images/favicon.ico' %}" type="image/x-icon" />
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<link rel="shortcut icon" href="{% static 'FirstApp/images/favicon.ico' %}" type="image/x-icon"/>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
crossorigin="anonymous">
<link rel="stylesheet" href="{% static 'FirstApp/css/sb-admin-2.min.css' %}">
<link rel="stylesheet" href="{% static 'FirstApp/css/slider.css' %}">
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="{% static 'FirstApp/css/snackbar.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<link rel="stylesheet" href="{% static 'FirstApp/css/all.min.css' %}">
<link href="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.css' %}" rel="stylesheet">
<!-- this link will import process workflow CSS -->
<link href="{% static 'FirstApp/css/process-worflow.css' %}" rel="stylesheet" type="text/css">
</head>
</head>
{% endblock %}
<body id="page-top">
<!-- Page Wrapper -->
<!-- Page Wrapper -->
{% block javascript %}
{% block javascript %}
{% load static %}
<script type="text/javascript" src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
{% endblock %}
<div id="wrapper">
{% endblock %}
<div id="wrapper">
<!-- Sidebar -->
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="/">
<div class="sidebar-brand-icon rotate-n-15">
<i class="fas fa-laugh-wink"></i>
</div>
{# <div class="sidebar-brand-icon rotate-n-15">#}
{# <i class="fas fa-laugh-wink"></i>#}
{# </div>#}
{# <div class="sidebar-brand-icon">#}
{# <i class="fas fa-chalkboard-teacher"></i>#}
{# </div>#}
{% if request.session.user_type == "Lecturer" %}
<div class="sidebar-brand-icon">
<i class="fas fa-chalkboard-teacher"></i>
</div>
<div class="sidebar-brand-text mx-3">SLPES Lecturer</div>
{% endif %}
{% if request.session.user_type == "Admin" %}
<div class="sidebar-brand-icon">
<i class="fa fa-user" aria-hidden="true"></i>
</div>
<div class="sidebar-brand-text mx-3">SLPES Admin</div>
{% endif %}
</a>
......@@ -77,26 +90,29 @@
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
<i class="fas fa-fw fa-cog"></i>
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo"
aria-expanded="true" aria-controls="collapseTwo">
<i class="fa fa-calculator" aria-hidden="true"></i>
<span>Estimations</span>
</a>
<div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Components:</h6>
{# <a class="collapse-item" href="/pose">Pose</a>#}
{# <a class="collapse-item" href="/pose">Pose</a>#}
<a class="collapse-item" href="/gaze">Gaze</a>
<a class="collapse-item" href="/emotion">Emotion</a>
<a class="collapse-item" href="/activity">Activity</a>
</div>ac
</div>
ac
</div>
</li>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i>
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree"
aria-expanded="true" aria-controls="collapseThree">
<i class="fa fa-graduation-cap" aria-hidden="true"></i>
<span>Lecture</span>
</a>
<div id="collapseThree" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar">
......@@ -110,8 +126,9 @@
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i>
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour"
aria-expanded="true" aria-controls="collapseThree">
<i class="fa fa-eye" aria-hidden="true"></i>
<span>Attendance</span>
</a>
<div id="collapseFour" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar">
......@@ -124,17 +141,19 @@
<!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities" aria-expanded="true" aria-controls="collapseUtilities">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities"
aria-expanded="true" aria-controls="collapseUtilities">
<i class="fas fa-fw fa-wrench"></i>
<span>Utilities</span>
</a>
<div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities" data-parent="#accordionSidebar">
<div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities"
data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Custom Utilities:</h6>
{# <a class="collapse-item" href="/extract">Video Extractor</a>#}
{# <a class="collapse-item" href="/extract">Video Extractor</a>#}
<a class="collapse-item" href="/video_result">Video Results</a>
{# <a class="collapse-item" href="utilities-animation.html">Animations</a>#}
{# <a class="collapse-item" href="utilities-other.html">Other</a>#}
{# <a class="collapse-item" href="utilities-animation.html">Animations</a>#}
{# <a class="collapse-item" href="utilities-other.html">Other</a>#}
</div>
</div>
</li>
......@@ -164,44 +183,44 @@
<!-- Divider -->
<hr class="sidebar-divider">
{# <!-- Heading -->#}
{# <div class="sidebar-heading">#}
{# Addons#}
{# </div>#}
{# <!-- Heading -->#}
{# <div class="sidebar-heading">#}
{# Addons#}
{# </div>#}
<!-- Nav Item - Pages Collapse Menu -->
{# <li class="nav-item">#}
{# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">#}
{# <i class="fas fa-fw fa-folder"></i>#}
{# <span>Pages</span>#}
{# </a>#}
{# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#}
{# <div class="bg-white py-2 collapse-inner rounded">#}
{# <h6 class="collapse-header">Login Screens:</h6>#}
{# <a class="collapse-item" href="/login">Login</a>#}
{# <a class="collapse-item" href="/register">Register</a>#}
{# <a class="collapse-item" href="/forgot-password">Forgot Password</a>#}
{# <div class="collapse-divider"></div>#}
{# <h6 class="collapse-header">Other Pages:</h6>#}
{# <a class="collapse-item" href="/404">404 Page</a>#}
{# <a class="collapse-item" href="/blank">Blank Page</a>#}
{# </div>#}
{# </div>#}
{# </li>#}
{# <!-- Nav Item - Charts -->#}
{# <li class="nav-item">#}
{# <a class="nav-link" href="charts.html">#}
{# <i class="fas fa-fw fa-chart-area"></i>#}
{# <span>Charts</span></a>#}
{# </li>#}
{##}
{# <!-- Nav Item - Tables -->#}
{# <li class="nav-item">#}
{# <a class="nav-link" href="/tables">#}
{# <i class="fas fa-fw fa-table"></i>#}
{# <span>Tables</span></a>#}
{# </li>#}
{# <li class="nav-item">#}
{# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">#}
{# <i class="fas fa-fw fa-folder"></i>#}
{# <span>Pages</span>#}
{# </a>#}
{# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#}
{# <div class="bg-white py-2 collapse-inner rounded">#}
{# <h6 class="collapse-header">Login Screens:</h6>#}
{# <a class="collapse-item" href="/login">Login</a>#}
{# <a class="collapse-item" href="/register">Register</a>#}
{# <a class="collapse-item" href="/forgot-password">Forgot Password</a>#}
{# <div class="collapse-divider"></div>#}
{# <h6 class="collapse-header">Other Pages:</h6>#}
{# <a class="collapse-item" href="/404">404 Page</a>#}
{# <a class="collapse-item" href="/blank">Blank Page</a>#}
{# </div>#}
{# </div>#}
{# </li>#}
{# <!-- Nav Item - Charts -->#}
{# <li class="nav-item">#}
{# <a class="nav-link" href="charts.html">#}
{# <i class="fas fa-fw fa-chart-area"></i>#}
{# <span>Charts</span></a>#}
{# </li>#}
{##}
{# <!-- Nav Item - Tables -->#}
{# <li class="nav-item">#}
{# <a class="nav-link" href="/tables">#}
{# <i class="fas fa-fw fa-table"></i>#}
{# <span>Tables</span></a>#}
{# </li>#}
<!-- Divider -->
<hr class="sidebar-divider d-none d-md-block">
......@@ -230,169 +249,172 @@
</button>
<!-- Topbar Search -->
{# <form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">#}
{# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
{# </div>#}
{# </div>#}
{# </form>#}
{# <form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">#}
{# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
{# </div>#}
{# </div>#}
{# </form>#}
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
{# <li class="nav-item dropdown no-arrow d-sm-none">#}
{# <a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-search fa-fw"></i>#}
{# </a>#}
{# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">#}
{# <form class="form-inline mr-auto w-100 navbar-search">#}
{# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
{# </div>#}
{# </div>#}
{# </form>#}
{# </div>#}
{# </li>#}
{##}
{# <!-- Nav Item - Alerts -->#}
{# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-bell fa-fw"></i>#}
{# <!-- Counter - Alerts -->#}
{# <span class="badge badge-danger badge-counter">3+</span>#}
{# </a>#}
{# <!-- Dropdown - Alerts -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">#}
{# <h6 class="dropdown-header">#}
{# Alerts Center#}
{# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-primary">#}
{# <i class="fas fa-file-alt text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 12, 2019</div>#}
{# <span class="font-weight-bold">A new monthly report is ready to download!</span>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-success">#}
{# <i class="fas fa-donate text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 7, 2019</div>#}
{# $290.29 has been deposited into your account!#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-warning">#}
{# <i class="fas fa-exclamation-triangle text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 2, 2019</div>#}
{# Spending Alert: We've noticed unusually high spending for your account.#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>#}
{# </div>#}
{# </li>#}
{# <li class="nav-item dropdown no-arrow d-sm-none">#}
{# <a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-search fa-fw"></i>#}
{# </a>#}
{# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">#}
{# <form class="form-inline mr-auto w-100 navbar-search">#}
{# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
{# </div>#}
{# </div>#}
{# </form>#}
{# </div>#}
{# </li>#}
{##}
{# <!-- Nav Item - Alerts -->#}
{# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-bell fa-fw"></i>#}
{# <!-- Counter - Alerts -->#}
{# <span class="badge badge-danger badge-counter">3+</span>#}
{# </a>#}
{# <!-- Dropdown - Alerts -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">#}
{# <h6 class="dropdown-header">#}
{# Alerts Center#}
{# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-primary">#}
{# <i class="fas fa-file-alt text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 12, 2019</div>#}
{# <span class="font-weight-bold">A new monthly report is ready to download!</span>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-success">#}
{# <i class="fas fa-donate text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 7, 2019</div>#}
{# $290.29 has been deposited into your account!#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-warning">#}
{# <i class="fas fa-exclamation-triangle text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 2, 2019</div>#}
{# Spending Alert: We've noticed unusually high spending for your account.#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>#}
{# </div>#}
{# </li>#}
<!-- Nav Item - Messages -->
{# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-envelope fa-fw"></i>#}
{# <!-- Counter - Messages -->#}
{# <span class="badge badge-danger badge-counter">7</span>#}
{# </a>#}
{# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">#}
{# <h6 class="dropdown-header">#}
{# Message Center#}
{# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#}
{# </div>#}
{# <div class="font-weight-bold">#}
{# <div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>#}
{# <div class="small text-gray-500">Emily Fowler · 58m</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">#}
{# <div class="status-indicator"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>#}
{# <div class="small text-gray-500">Jae Chun · 1d</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">#}
{# <div class="status-indicator bg-warning"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>#}
{# <div class="small text-gray-500">Morgan Alvarez · 2d</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>#}
{# <div class="small text-gray-500">Chicken the Dog · 2w</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>#}
{# </div>#}
{# </li>#}
{# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-envelope fa-fw"></i>#}
{# <!-- Counter - Messages -->#}
{# <span class="badge badge-danger badge-counter">7</span>#}
{# </a>#}
{# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">#}
{# <h6 class="dropdown-header">#}
{# Message Center#}
{# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#}
{# </div>#}
{# <div class="font-weight-bold">#}
{# <div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>#}
{# <div class="small text-gray-500">Emily Fowler · 58m</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">#}
{# <div class="status-indicator"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>#}
{# <div class="small text-gray-500">Jae Chun · 1d</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">#}
{# <div class="status-indicator bg-warning"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>#}
{# <div class="small text-gray-500">Morgan Alvarez · 2d</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>#}
{# <div class="small text-gray-500">Chicken the Dog · 2w</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>#}
{# </div>#}
{# </li>#}
<div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mr-2 d-none d-lg-inline text-gray-600 small">{{ request.user.username }}</span>
{% load static %}
<img class="img-profile rounded-circle" src="{% static 'FirstApp/images/user_profile.png' %}">
<img class="img-profile rounded-circle"
src="{% static 'FirstApp/images/user_profile.png' %}">
</a>
<!-- Dropdown - User Information -->
<div class="dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="userDropdown">
<a class="dropdown-item" href="#">
<i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i>
Profile
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i>
Settings
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i>
Activity Log
</a>
<div class="dropdown-divider"></div>
<div class="dropdown-menu dropdown-menu-right shadow animated--grow-in"
aria-labelledby="userDropdown">
{# <a class="dropdown-item" href="#">#}
{# <i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i>#}
{# Profile#}
{# </a>#}
{# <a class="dropdown-item" href="#">#}
{# <i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i>#}
{# Settings#}
{# </a>#}
{# <a class="dropdown-item" href="#">#}
{# <i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i>#}
{# Activity Log#}
{# </a>#}
{# <div class="dropdown-divider"></div>#}
<a class="dropdown-item" href="#" data-toggle="modal" data-target="#logoutModal">
<i class="fas fa-sign-out-alt fa-sm fa-fw mr-2 text-gray-400"></i>
Logout
......@@ -420,9 +442,9 @@
</footer>
{% endblock %}
</div>
</div>
</div>
{% block 'modal' %}
{% block 'modal' %}
{% load static %}
<script type="text/javascript" src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
......@@ -453,13 +475,19 @@
</div>
</div>
{% endblock %}
{% endblock %}
{% block 'scripts' %}
{% block 'scripts' %}
{% load static %}
<script src="https://code.jquery.com/jquery-3.4.1.slim.min.js" integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js" integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js" integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.4.1.slim.min.js"
integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n"
crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js"
integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo"
crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js"
integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6"
crossorigin="anonymous"></script>
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
......@@ -476,7 +504,7 @@
<!-- Page level custom scripts -->
<script src="{% static 'FirstApp/js/demo/chart-area-demo.js' %}"></script>
<script src="{% static 'FirstApp/js/demo/chart-pie-demo.js' %}"></script>
{% endblock %}
{% endblock %}
</body>
</body>
</html>
\ No newline at end of file
......@@ -40,6 +40,10 @@
$(document).ready(function () {
let folder = '';
{#$('#activity_loader').attr('hidden', false);#}
{#$('#emotion_loader').attr('hidden', false);#}
{#$('#gaze_loader').attr('hidden', false);#}
//select a particular subject
//select a particular subject
$('input[type=radio]').click(function () {
......@@ -134,7 +138,6 @@
.catch((error) => alert('an error occurred: ' + error));
}
});
});
......@@ -228,7 +231,6 @@
global_video_name = video_name;
//perform activity recognition
fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
......@@ -292,14 +294,69 @@
//hide the activity loader
$('#gaze_loader').hide();
alert('good');
}
}
//this is a test function (delete later)
/*
let interval = setInterval(() => {
{#let url = 'http://127.0.0.1:8000/get-random_number';#}
let url = 'http://127.0.0.1:8000/check-availability';
fetch(url)
.then((res) => res.json())
.then((out) => displayProcess(out))
.catch((err) => alert('error: ' + err))
}, 10000);
//this function will handle the displaying loaders and status in the workflow
function displayProcess(response) {
//if the lecture activity has completed processing
if (response.isActivityExist) {
$('#step_1').attr("class", class1);
$('#activity_loader').hide();
$('#emotion_loader').attr('hidden', false);
}
//if the lecture emotion has completed processing
if (response.isEmotionExist) {
$('#step_2').attr("class", class1);
$('#emotion_loader').hide();
$('#gaze_loader').attr('hidden', false);
}
//if the lecture gaze has completed processing
if (response.isGazeExist) {
$('#step_3').attr("class", class1);
$('#gaze_loader').hide();
}
//if all the processes are completed
if (response.isActivityExist && response.isEmotionExist && response.isGazeExist) {
var x = document.getElementById("snackbar");
x.className = "show";
setTimeout(function () {
x.className = x.className.replace("show", "");
}, 3000);
//clear the setInterval function
clearInterval(interval);
}
}
*/
});
......@@ -326,6 +383,7 @@
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Lecture Video Results</h1>
<h2><span id="time_display"></span></h2>
</div>
<!--first row -->
......@@ -344,7 +402,7 @@
<div class="card-body">
<!--loading gif -->
{% if due_lectures.count == 0 %}
{% if due_lectures|length == 0 %}
<div class="text-center" id="no_subject_selected">
<span class="font-italic">No lecture is to be processed</span>
</div>
......@@ -357,6 +415,10 @@
<div class="text-center" id="no_timetable_content" hidden>
<span class="font-italic">Not included in the timetable</span>
</div>
<!-- if there are due lectures, display the table -->
{% if due_lectures %}
<!--displaying the timetable -->
<table class="table table-striped" id="timetable">
{# <caption id="timetable_caption"></caption>#}
......@@ -373,18 +435,23 @@
{% for lecture in due_lectures %}
<tr>
<td class="font-weight-bolder">{{ lecture.date }}</td>
{# <td>{{ lecture.subject }}</td>#}
{# <td>{{ lecture.subject }}</td>#}
<td class="font-weight-bolder">{{ lecture.subject_name }}</td>
<td class="font-weight-bolder">{{ lecture.start_time }}</td>
<td class="font-weight-bolder">{{ lecture.end_time }}</td>
<td>
<button type="button" class="btn btn-success batch_process" data-video-id="{{ lecture.video_id }}" data-video-name="{{ lecture.video_name }}" id="{{ lecture.subject }}">Process</button>
{# <span class="font-italic font-weight-bolder text-success">Processing</span>#}
<button type="button" class="btn btn-success batch_process"
data-video-id="{{ lecture.video_id }}"
data-video-name="{{ lecture.video_name }}"
id="{{ lecture.subject }}">Process
</button>
{# <span class="font-italic font-weight-bolder text-success">Processing</span>#}
</td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</div>
......@@ -420,15 +487,16 @@
style="font-size: 40px; padding-top: 10px; color: white"></i></a>
<div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">Perform Activity Recognition</span>
<br />
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="activity_loader" hidden>
<br/>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader" class="mt-2" id="activity_loader" hidden>
</div>
</div>
<!-- end of step 1 -->
<!-- step 2 -->
<div class="col-4 smpl-step-step disabled" id="step_2">
<div class="text-center smpl-step-num font-weight-bolder">Step 3</div>
<div class="text-center smpl-step-num font-weight-bolder">Step 2</div>
<div class="progress">
<div class="progress-bar"></div>
</div>
......@@ -436,8 +504,9 @@
style="font-size: 50px; padding-top: 10px; color: white"></i></a>
<div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">Study Student Emotions</span>
<br />
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="emotion_loader" hidden>
<br/>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader" class="mt-2" id="emotion_loader" hidden>
</div>
</div>
<!-- end of step 2 -->
......@@ -454,18 +523,17 @@
</a>
<div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">See students' Gazes</span>
<br />
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="gaze_loader" hidden>
<br/>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader" class="mt-2" id="gaze_loader" hidden>
</div>
</div>
<!-- end of step 3 -->
</div>
<!-- end of progress row -->
{# <!-- simulation button row -->#}
{# <div class="row">#}
{# <button type="button" class="btn btn-outline-danger" id="simulate_process">Simulate</button>#}
{# </div>#}
</div>
<!-- end of container -->
......@@ -478,6 +546,10 @@
</div>
<!-- end of progress row -->
<!-- snackbar -->
<div id="snackbar">The lecture is completely processed..</div>
<!-- end of snackbar -->
</div>
{% endblock %}
<!-- End of container-fluid -->
......
......@@ -151,21 +151,30 @@ urlpatterns = [
# retrieves lecture activity summary
url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()),
# retrieves lecture activity summary
# retrieves lecture emotion summary
url(r'^get-lecture-emotion-summary/$', api.GetLectureEmotionSummary.as_view()),
# retrieves lecture activity summary
# retrieves lecture gaze estimation summary
url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()),
# retrieves lecture activity summary
# retrieves student activity correlations with lecturer activity
url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()),
# retrieves lecture activity summary
# retrieves student emotion correlations with lecturer activity
url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()),
# retrieves lecture activity summary
# retrieves student gaze estimation correlations with lecturer activity
url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()),
# retrieves student activity-emotion correlations
url(r'^get-student-activity-emotion-correlations/$', api.GetStudentActivityEmotionCorrelations.as_view()),
# retrieves student activity-gaze correlations
url(r'^get-student-activity-gaze-correlations/$', api.GetStudentActivityGazeCorrelations.as_view()),
# retrieves student emotion-gaze correlations
url(r'^get-student-emotion-gaze-correlations/$', api.GetStudentEmotionGazeCorrelations.as_view()),
##### OTHERS #####
......@@ -173,6 +182,19 @@ urlpatterns = [
url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()),
##### BATCH PROCESS #####
# perform batch process for student behavior
url(r'^student-behavior-batch-process/$', api.BatchProcess.as_view()),
# check availability for student behavior components
url(r'^check-availability/$', api.CheckStudentBehaviorAvailability.as_view()),
# perform random task (delete later)
url(r'^get-random-number/$', api.TestRandom.as_view()),
# routers
# path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
......
......@@ -189,6 +189,8 @@ def video_result(request):
data = serializer.data
print('data length: ', len(data))
# iterate through the existing lecture videos for the lecturer
for video in data:
video_id = video['id']
......@@ -197,6 +199,8 @@ def video_result(request):
# check whether the video id exist in the Activity Recognition table
lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists()
print('lecture activity existence: ', lec_activity)
if lec_activity == False:
to_do_lecture_list.append({
"lecturer": lecturer,
......@@ -227,11 +231,15 @@ def video_result(request):
# loop through the to-do lecture list
for item in to_do_lecture_list:
isDate = item['date'] == str(day_timetable['date'])
print('item date: ', item['date'])
print('timetable date: ', str(day_timetable['date']))
# isLecturer = item['lecturer'] ==
# check for the particular lecture on the day
if isDate:
slots = day_timetable['time_slots']
# loop through the slots
for slot in slots:
# check for the lecturer and subject
......@@ -260,6 +268,8 @@ def video_result(request):
print('what is wrong?: ', exc)
return redirect('/500')
print('due lectures: ', due_lecture_list)
return render(request, "FirstApp/video_results.html",
{"lecturer": lecturer, "due_lectures": due_lecture_list})
......
......@@ -15,7 +15,6 @@ import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
......@@ -28,7 +27,6 @@ DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
......@@ -36,6 +34,7 @@ INSTALLED_APPS = [
'AttendanceApp.apps.AttendanceappConfig',
'MonitorLecturerApp.apps.MonitorlecturerappConfig',
'LectureSummarizingApp.apps.LectureSummarizingAppConfig',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
......@@ -48,6 +47,7 @@ INSTALLED_APPS = [
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
......@@ -59,6 +59,9 @@ MIDDLEWARE = [
ROOT_URLCONF = 'integrated_slpes.urls'
# adding the CORS attributes
CORS_ALLOW_ALL_ORIGINS = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
......@@ -78,7 +81,6 @@ TEMPLATES = [
WSGI_APPLICATION = 'integrated_slpes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
......@@ -93,7 +95,6 @@ DATABASES = {
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
......@@ -112,7 +113,6 @@ AUTH_PASSWORD_VALIDATORS = [
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
......@@ -126,7 +126,6 @@ USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
......@@ -137,7 +136,6 @@ STATICFILES_DIRS = [
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
# media files
MEDIA_URL = '/media/'
......@@ -145,7 +143,9 @@ MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# REST FRAMEWORK
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
]
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.IsAuthenticated',
# ]
'DEFAULT_AUTHENTICATION_CLASSES': [],
'DEFAULT_PERMISSION_CLASSES': []
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment