Commit a577e3e7 authored by SohanDanushka's avatar SohanDanushka

Merge branch 'QA_RELEASE' into db_and_monitoring_IT17097284

parents 71039e2f ea733587
...@@ -10,6 +10,7 @@ from rest_framework.views import APIView ...@@ -10,6 +10,7 @@ from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser from rest_framework.parsers import MultiPartParser, FormParser
from . import record from . import record
from . import test as t
from rest_framework.views import * from rest_framework.views import *
...@@ -171,3 +172,29 @@ class InitiateLecture(APIView): ...@@ -171,3 +172,29 @@ class InitiateLecture(APIView):
return Response({ return Response({
"response": "success" "response": "success"
}) })
class stopRecording(APIView):
def get(self, request):
t.isStop = 1
return Response({
"response": "stopped"
})
def post(self, request):
pass
# test method (delete later)
class TestAPI(APIView):
def get(self, request):
t.isStop = 0
param = request.query_params.get('param')
# t.test()
t.IPWebcamTest()
return Response({
"response": "started"
})
def post(self, request):
pass
\ No newline at end of file
...@@ -23,15 +23,12 @@ maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector ...@@ -23,15 +23,12 @@ maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector
class IPWebCam(object): class IPWebCam(object):
def __init__(self): def __init__(self):
self.url = "http://192.168.8.100:8080/shot.jpg" self.url = "http://192.168.8.103:8080/shot.jpg"
self._count = 0
def __del__(self): def __del__(self):
cv2.destroyAllWindows() cv2.destroyAllWindows()
def get_frame(self): def get_frame(self):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
imgResp = urllib.request.urlopen(self.url) imgResp = urllib.request.urlopen(self.url)
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8) imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
img= cv2.imdecode(imgNp,-1) img= cv2.imdecode(imgNp,-1)
...@@ -46,9 +43,6 @@ class IPWebCam(object): ...@@ -46,9 +43,6 @@ class IPWebCam(object):
frame_flip = cv2.flip(resize,1) frame_flip = cv2.flip(resize,1)
ret, jpeg = cv2.imencode('.jpg', frame_flip) ret, jpeg = cv2.imencode('.jpg', frame_flip)
# capture frame and save on a given time in order to run the face recognition
sleep(3); cv2.imwrite("%d.jpg" % self._count, img)
self._count =+1
return jpeg.tobytes() return jpeg.tobytes()
......
...@@ -43,6 +43,50 @@ function toggleLectureLive() { ...@@ -43,6 +43,50 @@ function toggleLectureLive() {
y.style.display = "none"; y.style.display = "none";
} }
} }
var timer = false;
//this is a test function
function testAPI() {
timer = true
startTimer()
let param = 'sachith';
//call the API
fetch('http://127.0.0.1:8000/attendance/test-api/?param=' + param)
.then((res) => res.json())
.then((out) => {})
.catch((err) => alert('error: ' + err));
}
var time = 'time';
function f() {
let param = 'sachith';
//call the API
fetch('http://127.0.0.1:8000/attendance/stop-api/?param=' + param)
.then((res) => res.json())
.then((out) => {
timer = false
startTimer();
})
.catch((err) => alert('error: ' + err));
}
function startTimer() {
var min = 0;
var seconds = 0;
if (timer) {
var sec = 0;
function pad ( val ) { return val > 9 ? val : "0" + val; }
setInterval( function(){
min = pad(parseInt(sec/60,10));
seconds = pad(++sec%60)
document.getElementById("seconds").innerHTML=pad(++sec%60);
document.getElementById("minutes").innerHTML=pad(parseInt(sec/60,10));
}, 1000);
} else {
document.getElementById("secondsStop").innerHTML=seconds;
document.getElementById("minutesStop").innerHTML=min;
}
}
</script> </script>
{% endblock %} {% endblock %}
...@@ -60,13 +104,21 @@ function toggleLectureLive() { ...@@ -60,13 +104,21 @@ function toggleLectureLive() {
<div class="card-body"> <div class="card-body">
<button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button> <button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button>
{# <button type="button" class="btn btn-success" id="test_btn" onclick="testAPI()">Test</button>#}
</div> </div>
<span id="minutes"></span>:<span id="seconds"></span>
<span id="minutesStop"></span>:<span id="secondsStop"></span>
<div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%"> <div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%">
<div class="row justify-content-center"> <div class="row justify-content-center">
<img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}"> <img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}">
</div> </div>
<div class="row justify-content-center"> <div class="row justify-content-center">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle"><i class="fas fa-video"></i></button> <div class="col">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle" onclick="testAPI()"><i class="fas fa-video"></i></button>
</div>
<div class="col">
<button style="display: block; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle" onclick="f()"><i class="fas fa-square"></i></button>
</div>
</div> </div>
</div> </div>
</div> </div>
......
import urllib3
import urllib.request as req
import cv2
import numpy as np
import time
isStop = 0
def IPWebcamTest():
# Replace the URL with your own IPwebcam shot.jpg IP:port
# url = 'http://192.168.2.35:8080/shot.jpg'
url = 'http://192.168.8.103:8080/shot.jpg'
# url = 'http://192.168.1.11:8080/startvideo?force=1&tag=rec'
# url = 'http://192.168.1.11:8080/stopvideo?force=1'
size = (600, 600)
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
# vid_cod = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
# output = cv2.VideoWriter("cam_video.avi", vid_cod, 20.0, (640, 480))
# output = cv2.VideoWriter("cam_video.mp4", vid_cod, 20.0, size)
output = cv2.VideoWriter("cam_video.mp4", vid_cod, 10.0, size)
no_of_frames = 0
while True:
# Use urllib to get the image from the IP camera
imgResp = req.urlopen(url)
# imgResp = urllib3.respon
# Numpy to convert into a array
imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
# Finally decode the array to OpenCV usable format ;)
img = cv2.imdecode(imgNp, -1)
# resize the image
img = cv2.resize(img, (600, 600))
# put the image on screen
# cv2.imshow('IPWebcam', img)
# write to the output writer
output.write(img)
# To give the processor some less stress
# time.sleep(0.1)
# time.sleep(1)
no_of_frames += 1
if isStop == 1:
break
# imgResp.release()
# cv2.destroyAllWindows()
print('no of frames: ', no_of_frames)
\ No newline at end of file
...@@ -2,7 +2,7 @@ from django.urls import path ...@@ -2,7 +2,7 @@ from django.urls import path
from .api import student_list, student_detail, subject_list, subject_detail, attendance_list, StudentAPIView, \ from .api import student_list, student_detail, subject_list, subject_detail, attendance_list, StudentAPIView, \
StudentDetails StudentDetails
from django.conf.urls import url from django.conf.urls import url
from .api import FileView, InitiateLecture from .api import *
from . import views from . import views
urlpatterns = [ urlpatterns = [
...@@ -19,5 +19,10 @@ urlpatterns = [ ...@@ -19,5 +19,10 @@ urlpatterns = [
url(r'^upload/$', FileView.as_view(), name='file-upload'), url(r'^upload/$', FileView.as_view(), name='file-upload'),
path('webcam_feed', views.webcam_feed, name='webcam_feed'), path('webcam_feed', views.webcam_feed, name='webcam_feed'),
# this url will initiate the lecture # this url will initiate the lecture
url(r'^process-initiate-lecture/$', InitiateLecture.as_view()) url(r'^process-initiate-lecture/$', InitiateLecture.as_view()),
# this url will be used for testing
url(r'^test-api/$', TestAPI.as_view()),
url(r'^stop-api/$', stopRecording.as_view())
] ]
from django.shortcuts import render from django.shortcuts import render
from django.http.response import StreamingHttpResponse from django.http.response import StreamingHttpResponse
from AttendanceApp.camera import IPWebCam from AttendanceApp.camera import IPWebCam
from FirstApp.MongoModels import LectureVideo
from FirstApp.serializers import LectureVideoSerializer
def initiate_lecture(request): def initiate_lecture(request):
lecture_video = LectureVideo.objects.all()
lecture_video_ser = LectureVideoSerializer(lecture_video, many=True)
print('lecture video data: ', lecture_video_ser.data)
return render(request, "AttendanceApp/Initiate_lecture.html") return render(request, "AttendanceApp/Initiate_lecture.html")
def gen(camera): def gen(camera):
while True: while True:
frame = camera.get_frame() frame = camera.get_frame()
yield (b'--frame\r\n' yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n') b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def webcam_feed(request): def webcam_feed(request):
return StreamingHttpResponse(gen(IPWebCam()), return StreamingHttpResponse(gen(IPWebCam()),
content_type='multipart/x-mixed-replace; boundary=frame') content_type='multipart/x-mixed-replace; boundary=frame')
\ No newline at end of file
...@@ -14,4 +14,10 @@ admin.site.register(LectureVideo) ...@@ -14,4 +14,10 @@ admin.site.register(LectureVideo)
admin.site.register(LectureActivity) admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation) admin.site.register(LectureGazeEstimation)
admin.site.register(Admin) admin.site.register(Admin)
admin.site.register(AdminCredentialDetails) admin.site.register(AdminCredentialDetails)
\ No newline at end of file admin.site.register(LectureActivityFrameRecognitions)
admin.site.register(LectureActivityFrameGroupings)
admin.site.register(LectureEmotionFrameRecognitions)
admin.site.register(LectureEmotionFrameGroupings)
admin.site.register(LectureGazeFrameRecognitions)
admin.site.register(LectureGazeFrameGroupings)
\ No newline at end of file
...@@ -11,11 +11,10 @@ each method will return an HttpResponse that allows its data to be rendered into ...@@ -11,11 +11,10 @@ each method will return an HttpResponse that allows its data to be rendered into
arbitrary media types. arbitrary media types.
""" """
from random import Random
from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData
from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer
from .MongoModels import *
from rest_framework.views import * from rest_framework.views import *
from .logic import activity_recognition as ar from .logic import activity_recognition as ar
from . import emotion_detector as ed from . import emotion_detector as ed
...@@ -23,7 +22,9 @@ from .logic import id_generator as ig ...@@ -23,7 +22,9 @@ from .logic import id_generator as ig
from .logic import pdf_file_generator as pdf from .logic import pdf_file_generator as pdf
from .logic import head_gaze_estimation as hge from .logic import head_gaze_estimation as hge
from .logic import video_extraction as ve from .logic import video_extraction as ve
from . logic import student_behavior_process as sbp
from .serializers import * from .serializers import *
from braces.views import CsrfExemptMixin
import datetime import datetime
...@@ -139,13 +140,45 @@ class LectureVideoViewSet(APIView): ...@@ -139,13 +140,45 @@ class LectureVideoViewSet(APIView):
return Response(serializer.data) return Response(serializer.data)
def post(self, request): def post(self, request):
# get the request data
# data = request.data
#
# # retrieve the last lecture video details
# last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# # create the next lecture video id
# new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
#
# # create the new lecture video
# LectureVideo(
# lecture_video_id=new_lecture_video_id,
# lecturer_id=data['lecturer_id'],
# subject_id=data['subject_id'],
# video_name=data['video_name'],
# video_length=data['video_length'],
# date=data['date']
# ).save()
#
# # return the successful response
# return Response({
# "response": "Successfully created",
#
# }, status=status.HTTP_201_CREATED)
# serializer = LectureVideoSerializer(data=request.data, many=True)
serializer = LectureVideoSerializer(data=request.data) serializer = LectureVideoSerializer(data=request.data)
# serializer.create(validated_data=request.data)
if serializer.is_valid(raise_exception=ValueError): if serializer.is_valid(raise_exception=ValueError):
print('valid')
serializer.create(validated_data=request.data) serializer.create(validated_data=request.data)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.error_messages, return Response(serializer.data, status=status.HTTP_201_CREATED)
status=status.HTTP_400_BAD_REQUEST)
# return Response(serializer.error_messages,
# status=status.HTTP_400_BAD_REQUEST)
# this API will retrieve a lecture video details # this API will retrieve a lecture video details
...@@ -374,17 +407,19 @@ class LectureEmotionProcess(APIView): ...@@ -374,17 +407,19 @@ class LectureEmotionProcess(APIView):
def get(self, request): def get(self, request):
video_name = request.query_params.get('lecture_video_name') video_name = request.query_params.get('lecture_video_name')
video_id = request.query_params.get('lecture_video_id') # video_id = request.query_params.get('lecture_video_id')
int_video_id = int(request.query_params.get('lecture_video_id'))
percentages = ed.detect_emotion(video_name) percentages = ed.detect_emotion(video_name)
percentages.calcPercentages() percentages.calcPercentages()
self.save_emotion_report(video_id, percentages) self.save_emotion_report(int_video_id, percentages)
return Response({"response": True}) return Response({"response": True})
def post(self, request): def post(self, request):
pass pass
def save_emotion_report(self, lec_video_id, percentages): def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id) # lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last() last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
...@@ -499,7 +534,8 @@ class ProcessLectureGazeEstimation(APIView): ...@@ -499,7 +534,8 @@ class ProcessLectureGazeEstimation(APIView):
pass pass
def estimate_gaze(self, lec_video_id, percentages): def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id) # lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last() last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
...@@ -757,6 +793,9 @@ class GetLectureActivitySummary(APIView): ...@@ -757,6 +793,9 @@ class GetLectureActivitySummary(APIView):
def get(self, request): def get(self, request):
video_name = request.query_params.get('video_name') video_name = request.query_params.get('video_name')
phone_perct = request.query_params.get('phone_perct')
listen_perct = request.query_params.get('listen_perct')
note_perct = request.query_params.get('note_perct')
# checking the existence of lecture activity frame grouping records in the db # checking the existence of lecture activity frame grouping records in the db
isExist = LectureActivityFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists() isExist = LectureActivityFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists()
...@@ -792,10 +831,14 @@ class GetLectureActivitySummary(APIView): ...@@ -792,10 +831,14 @@ class GetLectureActivitySummary(APIView):
class_labels = ['phone_perct', 'listen_perct', 'note_perct'] class_labels = ['phone_perct', 'listen_perct', 'note_perct']
# get the comments list
comments = sbp.generate_student_behavior_comments("Activity", phone_perct=phone_perct, listen_perct=listen_perct, note_perct=note_perct)
return Response({ return Response({
"frame_landmarks": frame_landmarks, "frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages, "frame_group_percentages": frame_group_percentages,
"activity_labels": class_labels "activity_labels": class_labels,
"comments": comments
}) })
# else: # else:
...@@ -920,65 +963,65 @@ class GetLectureEmotionSummary(APIView): ...@@ -920,65 +963,65 @@ class GetLectureEmotionSummary(APIView):
"emotion_labels": class_labels "emotion_labels": class_labels
}) })
# else: else:
#
# frame_landmarks = [] frame_landmarks = []
#
# # retrieve frame landmarks from db # retrieve frame landmarks from db
# lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter( lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
# lecture_video_id__video_name=video_name) lecture_video_id__video_name=video_name)
# lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True) lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
# lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0] lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
#
# retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"] retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
#
# # creating a new list to display in the frontend # creating a new list to display in the frontend
# for landmark in retrieved_frame_landmarks: for landmark in retrieved_frame_landmarks:
# frame_landmarks.append(int(landmark['landmark'])) frame_landmarks.append(int(landmark['landmark']))
#
#
# l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion") l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
# frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict) frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
#
#
#
# # save the frame group details into db (temp method) # save the frame group details into db (temp method)
#
# last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last() last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
# new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \ new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
# ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id) ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
#
# # retrieve the lecture emotion id # retrieve the lecture emotion id
# lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name) lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
# lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True) lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
# lec_emotion_id = lec_emotion_ser.data[0]['id'] lec_emotion_id = lec_emotion_ser.data[0]['id']
#
# # create the frame group details # create the frame group details
# frame_group_details = [] frame_group_details = []
#
# for key in frame_group_percentages.keys(): for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails' # create an object of type 'LectureActivityFrameGroupDetails'
# lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails() lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
# lec_emotion_frame_group_details.frame_group = key lec_emotion_frame_group_details.frame_group = key
# lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key] lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
#
# frame_group_details.append(lec_emotion_frame_group_details) frame_group_details.append(lec_emotion_frame_group_details)
#
#
# new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings() new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
# new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
# new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
# new_lec_emotion_frame_groupings.frame_group_details = frame_group_details new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
#
# # save # save
# new_lec_emotion_frame_groupings.save() new_lec_emotion_frame_groupings.save()
#
#
# return Response({ return Response({
# "frame_landmarks": frame_landmarks, "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages, "frame_group_percentages": frame_group_percentages,
# "emotion_labels": emotion_labels "emotion_labels": emotion_labels
# }) })
# this API will retrieve lecture gaze summary # this API will retrieve lecture gaze summary
...@@ -1163,7 +1206,6 @@ class GetLectureActivityCorrelations(APIView): ...@@ -1163,7 +1206,6 @@ class GetLectureActivityCorrelations(APIView):
activity_correlations = ar.get_activity_correlations(individual_lec_activities, lec_recorded_activity_data) activity_correlations = ar.get_activity_correlations(individual_lec_activities, lec_recorded_activity_data)
print('activity correlations: ', activity_correlations)
return Response({ return Response({
"correlations": activity_correlations "correlations": activity_correlations
...@@ -1269,3 +1311,257 @@ class GetLectureGazeCorrelations(APIView): ...@@ -1269,3 +1311,257 @@ class GetLectureGazeCorrelations(APIView):
return Response({ return Response({
"correlations": gaze_correlations "correlations": gaze_correlations
}) })
# this class will handle the student activity-emotion correlations
class GetStudentActivityEmotionCorrelations(APIView):
def get(self, request):
# get the day option
option = request.query_params.get('option')
# get the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# initialize the student behavior count
student_behavior_count = 0
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# get the actual date
previous_date = current_date - option_date
# initialize the lists
individual_lec_activities = []
individual_lec_emotions = []
activity_emotion_correlations = []
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# retrieving lecture emotions
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture activities
if len(lec_activity) > 0:
student_behavior_count += 1
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
_, individual_lec_activities, _ = ar.get_student_activity_summary_for_period(activity_data)
# if there are lecture emotions
if len(lec_emotion) > 0:
student_behavior_count += 1
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
_, individual_lec_emotions, _ = ed.get_student_emotion_summary_for_period(emotion_data)
# if both student activity, emotion are available
if student_behavior_count == 2:
# find the correlations between student activity and gaze estimations
activity_emotion_correlations = sbp.calculate_student_activity_emotion_correlations(individual_lec_activities, individual_lec_emotions)
return Response({
"correlations": activity_emotion_correlations
})
# this class will handle the student activity-emotion correlations
class GetStudentActivityGazeCorrelations(APIView):
def get(self, request):
# get the day option
option = request.query_params.get('option')
# get the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# initialize the student behavior count
student_behavior_count = 0
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# get the actual date
previous_date = current_date - option_date
# initialize the lists
individual_lec_activities = []
individual_lec_gaze = []
activity_gaze_correlations = []
# retrieving lecture gaze estimations
lec_gaze = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture activities
if len(lec_activity) > 0:
student_behavior_count += 1
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
_, individual_lec_activities, _ = ar.get_student_activity_summary_for_period(activity_data)
# if there are gaze estimations
if len(lec_gaze) > 0:
student_behavior_count += 1
gaze_serializer = LectureGazeEstimationSerializer(lec_gaze, many=True)
gaze_data = gaze_serializer.data
_, individual_lec_gaze, _ = hge.get_student_gaze_estimation_summary_for_period(gaze_data)
# if there are any recorded lectures
if student_behavior_count == 2:
# find the correlations between student activity and gaze estimations
activity_gaze_correlations = sbp.calculate_student_activity_gaze_correlations(individual_lec_activities, individual_lec_gaze)
return Response({
"correlations": activity_gaze_correlations
})
# this class will handle the student emotion-gaze correlations
class GetStudentEmotionGazeCorrelations(APIView):
def get(self, request):
# get the day option
option = request.query_params.get('option')
# get the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# initialize the student behavior count
student_behavior_count = 0
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# get the actual date
previous_date = current_date - option_date
# initialize the lists
individual_lec_emotions = []
individual_lec_gaze = []
emotion_gaze_correlations = []
# retrieving lecture gaze estimations
lec_gaze = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# retrieving lecture emotions
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture emotions
if len(lec_emotion) > 0:
student_behavior_count += 1
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
_, individual_lec_emotions, _ = ed.get_student_emotion_summary_for_period(emotion_data)
# if there are gaze estimations
if len(lec_gaze) > 0:
student_behavior_count += 1
gaze_serializer = LectureGazeEstimationSerializer(lec_gaze, many=True)
gaze_data = gaze_serializer.data
_, individual_lec_gaze, _ = hge.get_student_gaze_estimation_summary_for_period(gaze_data)
# if there are any recorded lectures
if student_behavior_count == 2:
# find the correlations between student activity and gaze estimations
emotion_gaze_correlations = sbp.calculate_student_emotion_gaze_correlations(individual_lec_emotions, individual_lec_gaze)
return Response({
"correlations": emotion_gaze_correlations
})
##### BATCH PROCESS SECTION #####
# perform the student behavior analysis as a batch process
class BatchProcess(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
video_id = request.query_params.get('video_id')
return Response({
"response": True
})
# this API will check whether the lecture activity frame groupings exist
class CheckStudentBehaviorAvailability(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
#
# isActivityExist = LectureActivityFrameGroupings.objects.filter(
# lecture_activity_id__lecture_video_id__video_name=video_name).exists()
#
# isEmotionExist = LectureEmotionFrameGroupings.objects.filter(
# lecture_emotion_id__lecture_video_id__video_name=video_name).exists()
#
# isGazeExist = LectureGazeFrameGroupings.objects.filter(
# lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
isActivityExist = bool(Random().randint(0,2))
isEmotionExist = bool(Random().randint(0,2))
isGazeExist = bool(Random().randint(0,2))
return Response({
"isActivityExist": isActivityExist,
"isEmotionExist": isEmotionExist,
"isGazeExist": isGazeExist
})
# this API will perform some random task (delete later)
class TestRandom(APIView):
def get(self, request):
random = Random().randint(0, 100)
return Response({
"response": random
})
...@@ -52,6 +52,8 @@ def emotion_recognition(classifier, face_classifier, image): ...@@ -52,6 +52,8 @@ def emotion_recognition(classifier, face_classifier, image):
roi_gray = gray[y:y + h, x:x + w] roi_gray = gray[y:y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame) # rect,face,image = face_detector(frame)
# draw a rectangle
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
if np.sum([roi_gray]) != 0: if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float') / 255.0 roi = roi_gray.astype('float') / 255.0
...@@ -63,6 +65,9 @@ def emotion_recognition(classifier, face_classifier, image): ...@@ -63,6 +65,9 @@ def emotion_recognition(classifier, face_classifier, image):
preds = classifier.predict(roi)[0] preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()] label = class_labels[preds.argmax()]
# put the emotion label
cv2.putText(image, label, (x, y), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)
return label return label
...@@ -79,6 +84,7 @@ def detect_emotion(video): ...@@ -79,6 +84,7 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
EMOTION_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\emotion")
meta_data = VideoMeta() meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
...@@ -99,6 +105,20 @@ def detect_emotion(video): ...@@ -99,6 +105,20 @@ def detect_emotion(video):
# for testing purposes # for testing purposes
print('starting the emotion recognition process') print('starting the emotion recognition process')
# get width and height of the video frames
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# get the video frame size
size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(EMOTION_DIR, video)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
while (count_frames < frame_count): while (count_frames < frame_count):
# Grab a single frame of video # Grab a single frame of video
ret, frame = cap.read() ret, frame = cap.read()
...@@ -135,6 +155,9 @@ def detect_emotion(video): ...@@ -135,6 +155,9 @@ def detect_emotion(video):
# for testing purposes # for testing purposes
print('emotion frame count: ', count_frames) print('emotion frame count: ', count_frames)
# write the video frame to the video writer
output.write(frame)
count_frames += 1 count_frames += 1
# setting up the counted values # setting up the counted values
...@@ -146,8 +169,13 @@ def detect_emotion(video): ...@@ -146,8 +169,13 @@ def detect_emotion(video):
meta_data.surprise_count = count_surprise meta_data.surprise_count = count_surprise
cap.release() cap.release()
output.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes # for testing purposes
print('ending the emotion recognition process') print('ending the emotion recognition process')
...@@ -198,6 +226,8 @@ def get_frame_emotion_recognition(video_name): ...@@ -198,6 +226,8 @@ def get_frame_emotion_recognition(video_name):
# for testing purposes # for testing purposes
print('starting the emotion frame recognition process') print('starting the emotion frame recognition process')
# looping through the frames # looping through the frames
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
...@@ -216,18 +246,19 @@ def get_frame_emotion_recognition(video_name): ...@@ -216,18 +246,19 @@ def get_frame_emotion_recognition(video_name):
surprise_count = 0 surprise_count = 0
# get the detections # get the detections
detections = ar.person_detection(image, net) detections, persons = ar.person_detection(image, net)
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
# if there are detections # if there are detections
if (len(detections) > 0): if (len(detections) > 0):
# loop through the detections # loop through the detections
for detection in detections: for person in persons:
label = emotion_recognition(classifier, face_classifier, detection) label = emotion_recognition(classifier, face_classifier, person)
# checking for the label # checking for the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -422,17 +453,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -422,17 +453,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0 neutral_count = 0
detection_count = 0 detection_count = 0
detections = ar.person_detection(image, net) detections, persons = ar.person_detection(image, net)
# if there are detections # if there are detections
if (len(detections) > 0): if (len(detections) > 0):
# looping through the detections in each frame # looping through the detections in each frame
for detection in detections: for person in persons:
# run the model and get the emotion label # run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, detection) label = emotion_recognition(classifier, face_classifier, person)
# increment the count based on the label # increment the count based on the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -639,10 +670,14 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data ...@@ -639,10 +670,14 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
limit = 10 # limit = 10
limit = len(individual_lec_emotions)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))] data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))]
# declare the correlation data dictionary
corr_data = {}
# student activity labels # student activity labels
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral'] student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
lecturer_activity_labels = ['seated', 'standing', 'walking'] lecturer_activity_labels = ['seated', 'standing', 'walking']
...@@ -662,31 +697,72 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data ...@@ -662,31 +697,72 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data
# loop through the lecturer recorded data (lecturer) # loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data: for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count'])) value = int(data['seated_count'])
standing_perct_list.append(int(data['standing_count'])) value1 = int(data['standing_count'])
walking_perct_list.append(int(data['walking_count'])) value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student) # loop through the lecturer recorded data (student)
for data in individual_lec_emotions: for data in individual_lec_emotions:
happy_perct_list.append(int(data['happy_perct'])) value = int(data['happy_perct'])
sad_perct_list.append(int(data['sad_perct'])) value1 = int(data['sad_perct'])
angry_perct_list.append(int(data['angry_perct'])) value2 = int(data['angry_perct'])
surprise_perct_list.append(int(data['surprise_perct'])) value3 = int(data['surprise_perct'])
neutral_perct_list.append(int(data['neutral_perct'])) value4 = int(data['neutral_perct'])
if value != 0:
corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list, happy_perct_list.append(int(data['happy_perct']))
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list} if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
if len(happy_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[4]] = neutral_perct_list
if (len(sitting_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe # create the dataframe
df = pd.DataFrame(corr_data, index=data_index) df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation # calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit) pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====') print('====correlated variables=====')
print(pd_series) print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit): for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately # this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {} corr_dict = {}
......
...@@ -38,7 +38,8 @@ def activity_recognition(video_path): ...@@ -38,7 +38,8 @@ def activity_recognition(video_path):
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity") ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection # files required for person detection
...@@ -55,7 +56,9 @@ def activity_recognition(video_path): ...@@ -55,7 +56,9 @@ def activity_recognition(video_path):
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# define the student activity labels # define the student activity labels
class_labels = ['Phone checking', 'Listening', 'Note taking'] # class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels = ['Phone checki...', 'Listening', 'Note taking']
# load the model # load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
...@@ -80,14 +83,21 @@ def activity_recognition(video_path): ...@@ -80,14 +83,21 @@ def activity_recognition(video_path):
# for testing purposes # for testing purposes
print('starting the activity recognition process') print('starting the activity recognition process')
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(ACTIVITY_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
# looping through the frames # looping through the frames
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
ret, image = video.read() ret, image = video.read()
image = cv2.resize(image, size) # image = cv2.resize(image, size)
# perform person detection on the extracted image # perform person detection on the extracted image
detections = person_detection(image, net) detections, persons = person_detection(image, net)
# this is for testing purposes # this is for testing purposes
print('frame count: ', frame_count) print('frame count: ', frame_count)
...@@ -102,13 +112,26 @@ def activity_recognition(video_path): ...@@ -102,13 +112,26 @@ def activity_recognition(video_path):
# initialize the detection count # initialize the detection count
detection_count = 0 detection_count = 0
# to iterate each person
no_of_persons = 0
# looping through the person detections of the frame # looping through the person detections of the frame
for detection in detections: for detection in detections:
detection = cv2.resize(detection, size) # get the coordinates for the detection
startX = detection['startX']
startY = detection['startY']
endX = detection['endX']
endY = detection['endY']
image_array = np.asarray(detection) # detection = cv2.resize(detection, size)
normalized_image_array = (detection.astype(np.float32) / 127.0) - 1 # draw the coordinates of the persons' identified
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 5)
image_array = np.asarray(persons[no_of_persons])
image_array_resized = cv2.resize(image_array, size)
# normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
normalized_image_array = (image_array_resized.astype(np.float32) / 127.0) - 1
# Load the image into the array # Load the image into the array
data[0] = normalized_image_array data[0] = normalized_image_array
...@@ -120,18 +143,36 @@ def activity_recognition(video_path): ...@@ -120,18 +143,36 @@ def activity_recognition(video_path):
# counting the detections under each label # counting the detections under each label
if (label == class_labels[0]): if (label == class_labels[0]):
label = "Phone checking"
phone_checking_count += 1 phone_checking_count += 1
elif (label == class_labels[1]): elif (label == class_labels[1]):
listening_count += 1 listening_count += 1
elif (label == class_labels[2]): elif (label == class_labels[2]):
label = "Writing"
note_taking_count += 1 note_taking_count += 1
# vertical_pos = startY + int(endY / 2)
vertical_pos = int(endY / 2)
# put the identified label above the detected person
# cv2.putText(image, label, (startX, vertical_pos), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image, label, (startX, vertical_pos), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 255, 0), 10)
# increment the no.of persons
no_of_persons += 1
# increment the detection count # increment the detection count
detection_count += 1 detection_count += 1
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
# resize the image
image = cv2.resize(image, (224, 224))
# write the frame to the video writer
output.write(image)
# calculating the percentages for each label # calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0 phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
...@@ -143,6 +184,10 @@ def activity_recognition(video_path): ...@@ -143,6 +184,10 @@ def activity_recognition(video_path):
percentages["writing_perct"] = note_perct percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct percentages["listening_perct"] = listening_perct
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes # for testing purposes
print('activity recognition process is over') print('activity recognition process is over')
...@@ -163,6 +208,7 @@ def person_detection(image, net): ...@@ -163,6 +208,7 @@ def person_detection(image, net):
# set the threshold balue # set the threshold balue
threshold = 0.2 threshold = 0.2
detected_person = [] detected_person = []
persons = []
# initialize the list of class labels MobileNet SSD was trained to # initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class # detect, then generate a set of bounding box colors for each class
...@@ -211,14 +257,22 @@ def person_detection(image, net): ...@@ -211,14 +257,22 @@ def person_detection(image, net):
startX = 0 if startX < 0 else startX startX = 0 if startX < 0 else startX
startY = 0 if startY < 0 else startY startY = 0 if startY < 0 else startY
# extract the person # this dictionary will contain the bounding box coordinates
coordinates = {}
person = image[startY:startY + endY, startX:startX + endX] person = image[startY:startY + endY, startX:startX + endX]
detected_person.append(person) coordinates['startX'] = startX
coordinates['startY'] = startY
coordinates['endX'] = endX
coordinates['endY'] = endY
persons.append(person)
detected_person.append(coordinates)
person_count += 1 person_count += 1
# return the detection person list # return the detection person list
return detected_person return detected_person, persons
# this method will recognize the activity for each frame # this method will recognize the activity for each frame
...@@ -233,7 +287,8 @@ def get_frame_activity_recognition(video_name): ...@@ -233,7 +287,8 @@ def get_frame_activity_recognition(video_name):
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
# files required for person detection # files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt") config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
...@@ -247,7 +302,9 @@ def get_frame_activity_recognition(video_name): ...@@ -247,7 +302,9 @@ def get_frame_activity_recognition(video_name):
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# class labels # class labels
class_labels = ['Phone checking', 'Listening', 'Note taking'] # class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels = ['Phone checki...', 'Listening', 'Note taking']
# load the activity recogntion model # load the activity recogntion model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
...@@ -295,19 +352,27 @@ def get_frame_activity_recognition(video_name): ...@@ -295,19 +352,27 @@ def get_frame_activity_recognition(video_name):
detection_count = 0 detection_count = 0
detected_percentages = [] detected_percentages = []
detections = person_detection(image, net) detections, persons = person_detection(image, net)
# if there are detections # if there are detections
if (len(detections) > 0): if (len(detections) > 0):
no_of_persons = 0
# loop through each detection in the frame # loop through each detection in the frame
for detection in detections: for detection in detections:
detection = cv2.resize(detection, size) # get the coordinates for the detection
startX = detection['startX']
startY = detection['startY']
endX = detection['endX']
endY = detection['endY']
image_array = np.asarray(detection) image_array = np.asarray(persons[no_of_persons])
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 image_array_resized = cv2.resize(image_array, size)
# normalized_image_array = (detection.astype(np.float32) / 127.0) - 1
normalized_image_array = (image_array_resized.astype(np.float32) / 127.0) - 1
# Load the image into the array # Load the image into the array
data[0] = normalized_image_array data[0] = normalized_image_array
...@@ -427,10 +492,21 @@ def get_student_activity_summary_for_period(activities): ...@@ -427,10 +492,21 @@ def get_student_activity_summary_for_period(activities):
def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name)) EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized person detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
...@@ -443,11 +519,16 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -443,11 +519,16 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224) size = (224, 224)
# initializing the count variables # class labels
# class_labels = ['Phone checking', 'Listening', 'Note taking']
class_labels = ['Phone checki...', 'Listening', 'Note taking']
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0 frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group # get the frame differences for each frame group
frame_group_diff = {} frame_group_diff = {}
...@@ -463,9 +544,8 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -463,9 +544,8 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_diff[key] = diff if diff > 0 else 1 frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): # for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder while (frame_count < no_of_frames):
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# initializing the variables # initializing the variables
phone_count = 0 phone_count = 0
...@@ -473,57 +553,55 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -473,57 +553,55 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
listen_count = 0 listen_count = 0
detection_count = 0 detection_count = 0
# looping through the detections in each frame ret, image = video.read()
for detections in os.listdir(FRAME_FOLDER):
# checking whether the image contains only one person detections, persons = person_detection(image, net)
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
image = cv2.resize(image, size) # looping through the detections in each frame
for person in persons:
image_array = np.asarray(image) image = cv2.resize(person, size)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array image_array = np.asarray(image)
data[0] = normalized_image_array normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# run the inference # Load the image into the array
prediction = model.predict(data) data[0] = normalized_image_array
# get the predicted label # run the inference
label = class_labels[prediction.argmax()] prediction = model.predict(data)
# increment the count based on the label # get the predicted label
if label == class_labels[0]: label = class_labels[prediction.argmax()]
phone_count += 1
elif label == class_labels[1]:
listen_count += 1
elif label == class_labels[2]:
note_count += 1
# increment the detection count # increment the count based on the label
detection_count += 1 if label == class_labels[0]:
phone_count += 1
elif label == class_labels[1]:
listen_count += 1
elif label == class_labels[2]:
note_count += 1
# finding the time landmark that the current frame is in # increment the detection count
for i in frame_landmarks: detection_count += 1
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range # finding the time landmark that the current frame is in
if j < len(frame_landmarks): for i in frame_landmarks:
next_value = frame_landmarks[j] index = frame_landmarks.index(i)
j = index + 1
# checking the correct time landmark range # checking whether the next index is within the range
if (frame_count >= i) & (frame_count < next_value): if j < len(frame_landmarks):
frame_name = "{}-{}".format(i, next_value) next_value = frame_landmarks[j]
frame_group_dict[frame_name]['phone_count'] += phone_count # checking the correct time landmark range
frame_group_dict[frame_name]['listen_count'] += listen_count if (frame_count >= i) & (frame_count < next_value):
frame_group_dict[frame_name]['note_count'] += note_count frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['detection_count'] += detection_count
frame_group_dict[frame_name]['phone_count'] += phone_count
frame_group_dict[frame_name]['listen_count'] += listen_count
frame_group_dict[frame_name]['note_count'] += note_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -674,10 +752,14 @@ def get_activity_correlations(individual_lec_activities, lec_recorded_activity_d ...@@ -674,10 +752,14 @@ def get_activity_correlations(individual_lec_activities, lec_recorded_activity_d
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
limit = 10 # limit = 10
limit = len(individual_lec_activities)
data_index = ['lecture-{}'.format(i+1) for i in range(len(individual_lec_activities))] data_index = ['lecture-{}'.format(i+1) for i in range(len(individual_lec_activities))]
# declare the correlation data dictionary
corr_data = {}
# student activity labels # student activity labels
student_activity_labels = ['phone checking', 'listening', 'note taking'] student_activity_labels = ['phone checking', 'listening', 'note taking']
lecturer_activity_labels = ['seated', 'standing', 'walking'] lecturer_activity_labels = ['seated', 'standing', 'walking']
...@@ -694,29 +776,63 @@ def get_activity_correlations(individual_lec_activities, lec_recorded_activity_d ...@@ -694,29 +776,63 @@ def get_activity_correlations(individual_lec_activities, lec_recorded_activity_d
# loop through the lecturer recorded data (lecturer) # loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data: for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count'])) value = int(data['seated_count'])
standing_perct_list.append(int(data['standing_count'])) value1 = int(data['standing_count'])
walking_perct_list.append(int(data['walking_count'])) value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student) # loop through the lecturer recorded data (student)
for data in individual_lec_activities: for data in individual_lec_activities:
phone_perct_list.append(int(data['phone_perct'])) value = int(data['phone_perct'])
listen_perct_list.append(int(data['listening_perct'])) value1 = int(data['listening_perct'])
note_perct_list.append(int(data['writing_perct'])) value2 = int(data['writing_perct'])
if value != 0:
phone_perct_list.append(int(data['phone_perct']))
if value1 != 0:
listen_perct_list.append(int(data['listening_perct']))
if value2 != 0:
note_perct_list.append(int(data['writing_perct']))
corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list} if (len(phone_perct_list)) == len(individual_lec_activities):
corr_data[student_activity_labels[0]] = phone_perct_list
if (len(listen_perct_list)) == len(individual_lec_activities):
corr_data[student_activity_labels[1]] = listen_perct_list
if (len(note_perct_list)) == len(individual_lec_activities):
corr_data[student_activity_labels[2]] = note_perct_list
if (len(sitting_perct_list)) == len(individual_lec_activities):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_activities):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_activities):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe # create the dataframe
df = pd.DataFrame(corr_data, index=data_index) df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation # calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit) pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====') print('====correlated variables=====')
print(pd_series) print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit): for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately # this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {} corr_dict = {}
......
import requests
def batch_process(video_id, video_name):
# call the activity process
activity_resp = requests.get('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
# call the emotion process
emotion_resp = requests.get('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
# call the gaze process
gaze_resp = requests.get('http://127.0.0.1:8000/process-lecture-gaze-estimation/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
pass
# this method will save the lecture video
def save_student_lecture_video(student_video):
# call the API
student_video_save_resp = requests.post('http://127.0.0.1:8000/lecture-video', student_video)
\ No newline at end of file
...@@ -171,6 +171,7 @@ def process_gaze_estimation(video_path): ...@@ -171,6 +171,7 @@ def process_gaze_estimation(video_path):
ret, img = cap.read() ret, img = cap.read()
size = img.shape size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points. # 3D model points.
model_points = np.array([ model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip (0.0, 0.0, 0.0), # Nose tip
...@@ -211,6 +212,18 @@ def process_gaze_estimation(video_path): ...@@ -211,6 +212,18 @@ def process_gaze_estimation(video_path):
# for testing purposes # for testing purposes
print('starting the gaze estimation process') print('starting the gaze estimation process')
# get the frame sizes
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
frame_size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(GAZE_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, frame_size)
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -285,14 +298,19 @@ def process_gaze_estimation(video_path): ...@@ -285,14 +298,19 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
if isLookingDown & isLookingRight: if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1 head_down_right_count += 1
elif isLookingDown & isLookingLeft: elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1 head_down_left_count += 1
elif isLookingUp & isLookingRight: elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1 head_up_right_count += 1
elif isLookingUp & isLookingLeft: elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1 head_up_left_count += 1
elif isLookingFront: elif isLookingFront:
cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1 head_front_count += 1
...@@ -304,6 +322,9 @@ def process_gaze_estimation(video_path): ...@@ -304,6 +322,9 @@ def process_gaze_estimation(video_path):
# for testing purposes # for testing purposes
print('gaze estimation count: ', frame_count) print('gaze estimation count: ', frame_count)
# write to the video writer
output.write(img)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -330,6 +351,12 @@ def process_gaze_estimation(video_path): ...@@ -330,6 +351,12 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows() cv2.destroyAllWindows()
cap.release() cap.release()
output.release()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes # for testing purposes
print('ending the gaze estimation process') print('ending the gaze estimation process')
...@@ -538,6 +565,7 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -538,6 +565,7 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# for testing purposes # for testing purposes
print('ending the gaze estimation for frames process') print('ending the gaze estimation for frames process')
...@@ -979,10 +1007,15 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data): ...@@ -979,10 +1007,15 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
limit = 10
# limit = 10
limit = len(individual_lec_gaze)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))] data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))]
# declare the correlation data dictionary
corr_data = {}
# student gaze labels # student gaze labels
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front'] student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
lecturer_activity_labels = ['seated', 'standing', 'walking'] lecturer_activity_labels = ['seated', 'standing', 'walking']
...@@ -1001,28 +1034,72 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data): ...@@ -1001,28 +1034,72 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# loop through the lecturer recorded data (lecturer) # loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data: for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count'])) value = int(data['seated_count'])
standing_perct_list.append(int(data['standing_count'])) value1 = int(data['standing_count'])
walking_perct_list.append(int(data['walking_count'])) value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student) # loop through the lecturer recorded data (student)
for data in individual_lec_gaze: for data in individual_lec_gaze:
upright_perct_list.append(int(data['looking_up_and_right_perct'])) value = int(data['looking_up_and_right_perct'])
upleft_perct_list.append(int(data['looking_up_and_left_perct'])) value1 = int(data['looking_up_and_left_perct'])
downright_perct_list.append(int(data['looking_down_and_right_perct'])) value2 = int(data['looking_down_and_right_perct'])
downleft_perct_list.append(int(data['looking_down_and_left_perct'])) value3 = int(data['looking_down_and_left_perct'])
front_perct_list.append(int(data['looking_front_perct'])) value4 = int(data['looking_front_perct'])
corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list, if value != 0:
'Down and Left': downleft_perct_list, 'Front': front_perct_list, upright_perct_list.append(int(data['looking_up_and_right_perct']))
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list} if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
if (len(upright_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[4]] = front_perct_list
if (len(sitting_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe # create the dataframe
df = pd.DataFrame(corr_data, index=data_index) df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation # calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit) pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit): for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately # this dictionary will get the pandas.Series object's indices and values separately
......
import pandas as pd
from . import utilities as ut
def calculate_student_activity_emotion_correlations(lec_activities, lec_emotions):
# this variable will be used to store the correlations
correlations = []
# limit = 10
limit = len(lec_activities)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(lec_activities))]
# define the correlation data dictionary
corr_data = {}
# student gaze labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
# lecture activity data list (student)
phone_perct_list = []
note_perct_list = []
listen_perct_list = []
# lecture emotion data list (student)
happy_perct_list = []
sad_perct_list = []
angry_perct_list = []
surprise_perct_list = []
neutral_perct_list = []
# loop through the lecture activity data
for data in lec_activities:
value = int(data['phone_perct'])
value1 = int(data['listening_perct'])
value2= int(data['writing_perct'])
if value != 0:
phone_perct_list.append(int(data['phone_perct']))
if value1 != 0:
listen_perct_list.append(int(data['listening_perct']))
if value2 != 0:
note_perct_list.append(int(data['writing_perct']))
# loop through the lecture emotion data
for data in lec_emotions:
value = int(data['happy_perct'])
value1 = int(data['sad_perct'])
value2 = int(data['angry_perct'])
value3 = int(data['surprise_perct'])
value4 = int(data['neutral_perct'])
if value != 0:
happy_perct_list.append(int(data['happy_perct']))
if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
if len(phone_perct_list) == len(lec_activities):
corr_data[student_activity_labels[0]] = phone_perct_list
if len(listen_perct_list) == len(lec_activities):
corr_data[student_activity_labels[1]] = listen_perct_list
if len(note_perct_list) == len(lec_activities):
corr_data[student_activity_labels[2]] = note_perct_list
if len(happy_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(lec_activities):
corr_data[student_emotion_labels[4]] = neutral_perct_list
# corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
# 'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# }
print('data: ', corr_data)
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
df = df[(df.T != 0).any()]
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentActivity = index[0] in student_activity_labels
# check whether the second index is a lecturer activity
isStudentEmotion = index[1] in student_emotion_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentActivity & isStudentEmotion:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
# this method will calculate the student activity-gaze correlations
def calculate_student_activity_gaze_correlations(lec_activities, lec_gaze):
# this variable will be used to store the correlations
correlations = []
limit = len(lec_activities)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(lec_activities))]
# this dictionary contains the correlation data
corr_data = {}
# student gaze labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
# student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
# lecture activity data list (student)
phone_perct_list = []
note_perct_list = []
listen_perct_list = []
# lecture gaze estimation data list (student)
upright_perct_list = []
upleft_perct_list = []
downright_perct_list = []
downleft_perct_list = []
front_perct_list = []
# loop through the lecture activity data
for data in lec_activities:
value = int(data['phone_perct'])
value1 = int(data['listening_perct'])
value2 = int(data['writing_perct'])
if value != 0:
phone_perct_list.append(int(data['phone_perct']))
if value1 != 0:
listen_perct_list.append(int(data['listening_perct']))
if value2 != 0:
note_perct_list.append(int(data['writing_perct']))
# loop through the lecture activity data
for data in lec_gaze:
value = int(data['looking_up_and_right_perct'])
value1 = int(data['looking_up_and_left_perct'])
value2 = int(data['looking_down_and_right_perct'])
value3 = int(data['looking_down_and_left_perct'])
value4 = int(data['looking_front_perct'])
if value != 0:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
if (len(phone_perct_list)) == len(lec_activities):
corr_data[student_activity_labels[0]] = phone_perct_list
if (len(listen_perct_list)) == len(lec_activities):
corr_data[student_activity_labels[1]] = listen_perct_list
if (len(note_perct_list)) == len(lec_activities):
corr_data[student_activity_labels[2]] = note_perct_list
if (len(upright_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(lec_activities):
corr_data[student_gaze_labels[4]] = front_perct_list
# corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
# 'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list
# }
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print(pd_series)
print('length of pd_series: ', len(pd_series))
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentActivity = index[0] in student_activity_labels
# check whether the second index is a student gaze estimation
isStudentGaze = index[1] in student_gaze_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentActivity & isStudentGaze:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
print('correlations: ', correlations)
# return the list
return correlations
# this method will calculate the student activity-gaze correlations
def calculate_student_emotion_gaze_correlations(lec_emotions, lec_gaze):
# this variable will be used to store the correlations
correlations = []
limit = len(lec_emotions)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(lec_emotions))]
# this dictionary will contain the correlation data
corr_data = {}
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
# lecture emotion data list (student)
happy_perct_list = []
sad_perct_list = []
angry_perct_list = []
surprise_perct_list = []
neutral_perct_list = []
# lecture gaze estimation data list (student)
upright_perct_list = []
upleft_perct_list = []
downright_perct_list = []
downleft_perct_list = []
front_perct_list = []
# loop through the lecture emotion data
for data in lec_emotions:
value = int(data['happy_perct'])
value1 = int(data['sad_perct'])
value2 = int(data['angry_perct'])
value3 = int(data['surprise_perct'])
value4 = int(data['neutral_perct'])
if value != 0:
happy_perct_list.append(int(data['happy_perct']))
if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
# loop through the lecture gaze data
for data in lec_gaze:
value = int(data['looking_up_and_right_perct'])
value1 = int(data['looking_up_and_left_perct'])
value2 = int(data['looking_down_and_right_perct'])
value3 = int(data['looking_down_and_left_perct'])
value4 = int(data['looking_front_perct'])
if value != 0:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
if len(happy_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(lec_emotions):
corr_data[student_emotion_labels[4]] = neutral_perct_list
if (len(upright_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(lec_emotions):
corr_data[student_gaze_labels[4]] = front_perct_list
# corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# 'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list
# }
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentEmotion = index[0] in student_emotion_labels
# check whether the second index is a student gaze estimation
isStudentGaze = index[1] in student_gaze_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentEmotion & isStudentGaze:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
# this method will provide comments on the student behavior
def generate_student_behavior_comments(category, **kwargs):
# declare the comments list
comments = []
if category == "Activity":
float_phone_perct = float(kwargs.get('phone_perct'))
float_listen_perct = float(kwargs.get('listen_perct'))
float_note_perct = float(kwargs.get('note_perct'))
# set the threshold value list
THRESHOLDS = [40, 20, 30]
if int(float_phone_perct) >= THRESHOLDS[0]:
comments.append("Special Attention needs to be given to reduce student phone checking")
if int(float_listen_perct) < THRESHOLDS[1]:
comments.append("Consider taking steps to increase student attention")
if int(float_note_perct) < THRESHOLDS[2]:
comments.append("Try to pursue students to take important notes during the lecture")
elif category == "Emotion":
print('Emotion')
elif category == "Gaze":
print('Gaze')
# return the comment list
return comments
# this method will remove the redundant pairs in pandas dataframe
def get_redundant_pairs(df): def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix''' '''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set() pairs_to_drop = set()
...@@ -8,6 +9,7 @@ def get_redundant_pairs(df): ...@@ -8,6 +9,7 @@ def get_redundant_pairs(df):
pairs_to_drop.add((cols[i], cols[j])) pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop return pairs_to_drop
# this method will return the top specified correlations
def get_top_abs_correlations(df, n): def get_top_abs_correlations(df, n):
au_corr = df.corr().abs().unstack() au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df) labels_to_drop = get_redundant_pairs(df)
......
...@@ -306,4 +306,9 @@ def get_frame_landmarks(video_name): ...@@ -306,4 +306,9 @@ def get_frame_landmarks(video_name):
# now return the frame landmarks # now return the frame landmarks
return frame_landmarks return frame_landmarks
\ No newline at end of file
# this method will save leture video (student)
def save_lecture_student_video():
pass
\ No newline at end of file
...@@ -18,8 +18,12 @@ there are two fields inside "Meta" class, as follows. ...@@ -18,8 +18,12 @@ there are two fields inside "Meta" class, as follows.
from rest_framework import serializers from rest_framework import serializers
from djongo import models
from .MongoModels import * from .MongoModels import *
from . models import VideoMeta from . models import VideoMeta
from .logic import id_generator as ig
# from datetime import datetime as dt
import datetime
# lecture serializer # lecture serializer
...@@ -190,6 +194,110 @@ class LectureVideoSerializer(serializers.ModelSerializer): ...@@ -190,6 +194,110 @@ class LectureVideoSerializer(serializers.ModelSerializer):
model = LectureVideo model = LectureVideo
fields = '__all__' fields = '__all__'
# this method will validate the input data
def to_internal_value(self, data):
lecturer = None
subject = None
lecturer_data = data.get('lecturer')
subject_data = data.get('subject')
# serialize the lecturer data
lecturer = Lecturer.objects.filter(id=lecturer_data)
subject = Subject.objects.filter(id=subject_data)
lecturer_ser_data = LecturerSerializer(lecturer, many=True).data[0]
subject_ser_data = SubjectSerializer(subject, many=True).data[0]
# retrieve the last lecture video details
last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# create the next lecture video id
new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
# if both subject and lecturer details are available
if len(lecturer) == 1 & len(subject) == 1:
str_video_length = data.get('video_length')
video_length_parts = str_video_length.split(':')
video_length = datetime.timedelta(minutes=int(video_length_parts[0]), seconds=int(video_length_parts[1]),
milliseconds=int(video_length_parts[2]))
# this data will be passed as validated data
validated_data = {
'lecture_video_id': new_lecture_video_id,
'lecturer': lecturer_ser_data,
'subject': subject_ser_data,
'date': data.get('date'),
'video_name': data.get('video_name'),
'video_length': video_length
}
return super(LectureVideoSerializer, self).to_internal_value(validated_data)
# this method will override the 'create' method
def create(self, validated_data):
lecturer = None
subject = None
lecturer_data = validated_data.pop('lecturer')
subject_data = validated_data.pop('subject')
# serialize the lecturer data
lecturer = Lecturer.objects.filter(id=lecturer_data)
subject = Subject.objects.filter(id=subject_data)
# retrieve the last lecture video details
last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# create the next lecture video id
new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
# if both subject and lecturer details are available
if len(lecturer) == 1 & len(subject) == 1:
str_video_length = validated_data.pop('video_length')
video_length_parts = str_video_length.split(':')
video_length = datetime.timedelta(minutes=int(video_length_parts[0]), seconds=int(video_length_parts[1]), milliseconds=int(video_length_parts[2]))
lecture_video, created = LectureVideo.objects.update_or_create(
lecture_video_id=new_lecture_video_id,
lecturer=lecturer[0],
subject=subject[0],
date=validated_data.pop('date'),
video_name=validated_data.pop('video_name'),
video_length=video_length
)
# faculty_data = validated_data.pop('faculty')
# serialized_faculty = FacultySerializer(data=faculty_data)
#
# if (serialized_faculty.is_valid()):
# # faculty, faculty_created = Faculty.objects.get_or_create(defaults={}, faculty_id=serialized_faculty.data['faculty_id'])
# faculty = Faculty.objects.filter(faculty_id=serialized_faculty.data['faculty_id'])
#
# if (len(faculty) == 1):
# lecturer, created = Lecturer.objects.update_or_create(
# faculty=faculty[0],
# lecturer_id=validated_data.pop('lecturer_id'),
# fname=validated_data.pop('fname'),
# lname=validated_data.pop('lname'),
# email=validated_data.pop('email'),
# telephone=validated_data('telephone')
# )
#
# return lecturer
#
return lecture_video
return None
# lecture video time landmarks serializer # lecture video time landmarks serializer
class LectureVideoTimeLandmarksSerializer(serializers.ModelSerializer): class LectureVideoTimeLandmarksSerializer(serializers.ModelSerializer):
......
...@@ -257,8 +257,13 @@ ...@@ -257,8 +257,13 @@
//change the innerHTML of the clicked button //change the innerHTML of the clicked button
e.target.innerHTML = "<span class='font-italic'>Processing</span>"; e.target.innerHTML = "<span class='font-italic'>Processing</span>";
let phone_perct = $('#phone_perct').text().split("%")[0];
let listen_perct = $('#listening_perct').text().split("%")[0];
let note_perct = $('#writing_perct').text().split("%")[0];
//fetch the activity summary details //fetch the activity summary details
fetch('http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=' + global_video_name + '&phone_perct=' + phone_perct + '&note_perct=' + note_perct + '&listen_perct=' + listen_perct)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => activityFrameGroupPercentages(out, e)) .then((out) => activityFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err)); .catch((err) => alert('error: ' + err));
...@@ -295,16 +300,30 @@ ...@@ -295,16 +300,30 @@
//this function will handle the retrieved activity frame group percentages //this function will handle the retrieved activity frame group percentages
function activityFrameGroupPercentages(response, e) { function activityFrameGroupPercentages(response, e) {
//remove the previous comments
$('#student_activity_comments').empty();
lecture_activity_frame_group_percentages = response.frame_group_percentages; lecture_activity_frame_group_percentages = response.frame_group_percentages;
let frame_landmarks = response.frame_landmarks; let frame_landmarks = response.frame_landmarks;
{#let frame_group_dict = response.frame_group_dict;#} {#let frame_group_dict = response.frame_group_dict;#}
let activity_labels = response.activity_labels; let activity_labels = response.activity_labels;
let comment_list = response.comments;
//define a html string
let htmlString = "";
for (let i = 0; i < comment_list.length; i++) {
htmlString += "<p class='font-italic font-weight-bold'>";
htmlString += comment_list[i];
htmlString += "</p>";
}
//change the button back to original //change the button back to original
e.target.innerHTML = "Summary"; e.target.innerHTML = "Summary";
//append the html string to the comments list
$('#student_activity_comments').append(htmlString);
//open the modal //open the modal
$('#ActivitySummaryModal').modal(); $('#ActivitySummaryModal').modal();
...@@ -1017,6 +1036,67 @@ ...@@ -1017,6 +1036,67 @@
}); });
//this method will handle the student activity-emotion correlations advanced analysis
$('#student_activity_emotion_corr').click(function () {
//open the modal
$('#student_activity_emotion_advanced_modal').modal();
//show the loader
$('#student_activity_emotion_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-student-activity-emotion-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayActivityEmotionCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will handle the student activity-gaze correlations advanced analysis
$('#student_activity_gaze_corr').click(function () {
//open the modal
$('#student_activity_gaze_advanced_modal').modal();
//show the loader
$('#student_activity_gaze_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-student-activity-gaze-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayActivityGazeCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will handle the student emotion-gaze correlations advanced analysis
$('#student_emotion_gaze_corr').click(function () {
//open the modal
$('#student_emotion_gaze_advanced_modal').modal();
//show the loader
$('#student_emotion_gaze_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-student-emotion-gaze-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayEmotionGazeCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will display the activity correlations in a table //this method will display the activity correlations in a table
function displayActivityCorrelations(correlations) { function displayActivityCorrelations(correlations) {
...@@ -1032,17 +1112,13 @@ ...@@ -1032,17 +1112,13 @@
if (value <= 100 && value > 80) { if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>"; htmlString += "<tr class='bg-success text-white'>";
} } else if (value <= 80 && value > 60) {
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>"; htmlString += "<tr class='bg-primary text-white'>";
} } else if (value <= 60 && value > 40) {
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>"; htmlString += "<tr class='bg-warning text-white'>";
} } else if (value <= 40 && value > 20) {
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>"; htmlString += "<tr class='bg-danger text-white'>";
} } else if (value <= 20 && value > 0) {
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>"; htmlString += "<tr class='bg-dark text-white'>";
} }
...@@ -1088,17 +1164,13 @@ ...@@ -1088,17 +1164,13 @@
if (value <= 100 && value > 80) { if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>"; htmlString += "<tr class='bg-success text-white'>";
} } else if (value <= 80 && value > 60) {
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>"; htmlString += "<tr class='bg-primary text-white'>";
} } else if (value <= 60 && value > 40) {
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>"; htmlString += "<tr class='bg-warning text-white'>";
} } else if (value <= 40 && value > 20) {
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>"; htmlString += "<tr class='bg-danger text-white'>";
} } else if (value <= 20 && value > 0) {
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>"; htmlString += "<tr class='bg-dark text-white'>";
} }
...@@ -1145,17 +1217,13 @@ ...@@ -1145,17 +1217,13 @@
if (value <= 100 && value > 80) { if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>"; htmlString += "<tr class='bg-success text-white'>";
} } else if (value <= 80 && value > 60) {
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>"; htmlString += "<tr class='bg-primary text-white'>";
} } else if (value <= 60 && value > 40) {
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>"; htmlString += "<tr class='bg-warning text-white'>";
} } else if (value <= 40 && value > 20) {
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>"; htmlString += "<tr class='bg-danger text-white'>";
} } else if (value <= 20 && value > 0) {
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>"; htmlString += "<tr class='bg-dark text-white'>";
} }
...@@ -1187,6 +1255,189 @@ ...@@ -1187,6 +1255,189 @@
} }
//this method will display the student activity-emotion correlations in a table
function displayActivityEmotionCorrelations(correlations) {
let htmlString = "";
if (correlations.length !== 0) {
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
} else {
htmlString += "<tr>";
htmlString += "<td colspan='3'>";
htmlString += "<span class='font-italic'>No correlations were found</span>";
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#student_activity_emotion_corr_tbody').append(htmlString);
//hide the loader
$('#student_activity_emotion_corr_loader').hide();
//show the table
$('#student_activity_emotion_corr_table').attr('hidden', false);
}
//this method will display the student activity-gaze correlations in a table
function displayActivityGazeCorrelations(correlations) {
let htmlString = "";
if (correlations.length !== 0) {
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
} else {
htmlString += "<tr>";
htmlString += "<td colspan='3'>";
htmlString += "<span class='font-italic'>No correlations were found</span>";
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#student_activity_gaze_corr_tbody').append(htmlString);
//hide the loader
$('#student_activity_gaze_corr_loader').hide();
//show the table
$('#student_activity_gaze_corr_table').attr('hidden', false);
}
//this method will display the student emotion-gaze correlations in a table
function displayEmotionGazeCorrelations(correlations) {
let htmlString = "";
if (correlations.length !== 0) {
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
} else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
} else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
} else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
} else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
} else {
htmlString += "<tr>";
htmlString += "<td colspan='3'>";
htmlString += "<span class='font-italic'>No correlations were found</span>";
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#student_emotion_gaze_corr_tbody').append(htmlString);
//hide the loader
$('#student_emotion_gaze_corr_loader').hide();
//show the table
$('#student_emotion_gaze_corr_table').attr('hidden', false);
}
}); });
</script> </script>
...@@ -1214,86 +1465,7 @@ ...@@ -1214,86 +1465,7 @@
</div> </div>
<!-- Content Row --> <!-- Content Row -->
{# <div class="row">#}
{##}
{# <!-- Earnings (Monthly) Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-primary shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-primary text-uppercase mb-1">Earnings (Monthly)</div>#}
{# <div class="h5 mb-0 font-weight-bold text-gray-800">$40,000</div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-calendar fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!-- Earnings (Monthly) Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-success shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-success text-uppercase mb-1">Earnings (Annual)</div>#}
{# <div class="h5 mb-0 font-weight-bold text-gray-800">$215,000</div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-dollar-sign fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!-- Earnings (Monthly) Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-info shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-info text-uppercase mb-1">Tasks</div>#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col-auto">#}
{# <div class="h5 mb-0 mr-3 font-weight-bold text-gray-800">50%</div>#}
{# </div>#}
{# <div class="col">#}
{# <div class="progress progress-sm mr-2">#}
{# <div class="progress-bar bg-info" role="progressbar" style="width: 50%" aria-valuenow="50" aria-valuemin="0" aria-valuemax="100"></div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-clipboard-list fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!-- Pending Requests Card Example -->#}
{# <div class="col-xl-3 col-md-6 mb-4">#}
{# <div class="card border-left-warning shadow h-100 py-2">#}
{# <div class="card-body">#}
{# <div class="row no-gutters align-items-center">#}
{# <div class="col mr-2">#}
{# <div class="text-xs font-weight-bold text-warning text-uppercase mb-1">Pending Requests</div>#}
{# <div class="h5 mb-0 font-weight-bold text-gray-800">18</div>#}
{# </div>#}
{# <div class="col-auto">#}
{# <i class="fas fa-comments fa-2x text-gray-300"></i>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# </div>#}
<!-- Content Row --> <!-- Content Row -->
...@@ -1418,6 +1590,23 @@ ...@@ -1418,6 +1590,23 @@
Advanced Analysis Advanced Analysis
</button> </button>
<!-- end of button to view advanced analysis --> <!-- end of button to view advanced analysis -->
<!-- button to view advanced analysis dropdown -->
{# <div class="dropdown">#}
{# <button class="btn btn-secondary dropdown-toggle float-right mr-2" type="button"#}
{# id="activity_advanced_dropdown" data-toggle="dropdown"#}
{# aria-haspopup="true" aria-expanded="false">#}
{# Advanced Analysis#}
{# </button>#}
{# <div class="dropdown-menu"#}
{# aria-labelledby="activity_advanced_dropdown">#}
{# <button class="dropdown-item" id="activity_advanced_btn">Student vs. Lecturer</button>#}
{# <button class="dropdown-item">Student vs. Student</button>#}
{# </div>#}
{# </div>#}
<!-- end of button to view advanced analysis dropdown -->
</div> </div>
</div> </div>
<!-- end of Activity card --> <!-- end of Activity card -->
...@@ -1501,6 +1690,22 @@ ...@@ -1501,6 +1690,22 @@
Advanced Analysis Advanced Analysis
</button> </button>
<!-- end of button to view advanced analysis --> <!-- end of button to view advanced analysis -->
<!-- button to view advanced analysis dropdown -->
{# <div class="dropdown">#}
{# <button class="btn btn-secondary dropdown-toggle float-right mr-2" type="button"#}
{# id="emotion_advanced_dropdown" data-toggle="dropdown"#}
{# aria-haspopup="true" aria-expanded="false">#}
{# Advanced Analysis#}
{# </button>#}
{# <div class="dropdown-menu"#}
{# aria-labelledby="emotion_advanced_dropdown">#}
{# <button class="dropdown-item" id="emotion_advanced_btn">Student vs. Lecturer</button>#}
{# <button class="dropdown-item">Student vs. Student</button>#}
{# </div>#}
{# </div>#}
<!-- end of button to view advanced analysis dropdown -->
</div> </div>
</div> </div>
...@@ -1578,11 +1783,56 @@ ...@@ -1578,11 +1783,56 @@
</button> </button>
<!-- end of button to view advanced analysis --> <!-- end of button to view advanced analysis -->
<!-- button to view advanced analysis dropdown -->
{# <div class="dropdown">#}
{# <button class="btn btn-secondary dropdown-toggle float-right mr-2" type="button"#}
{# id="gaze_advanced_dropdown" data-toggle="dropdown"#}
{# aria-haspopup="true" aria-expanded="false">#}
{# Advanced Analysis#}
{# </button>#}
{# <div class="dropdown-menu"#}
{# aria-labelledby="gaze_advanced_dropdown">#}
{# <button class="dropdown-item" id="gaze_advanced_btn">Student vs. Lecturer</button>#}
{# <button class="dropdown-item">Student vs. Student</button>#}
{# </div>#}
{# </div>#}
<!-- end of button to view advanced analysis dropdown -->
</div> </div>
</div> </div>
<!-- end of Gaze estimation card --> <!-- end of Gaze estimation card -->
<!-- advanced analysis for student-student correlations -->
{# <div class="float-right m-2">#}
{# <button type="button" class="btn btn-success" id="student_student_corr">#}
{# Advanced Analysis#}
{# </button>#}
{# </div>#}
<!-- end of advanced analysis for student-student correlations -->
<!-- button to view advanced analysis dropdown -->
<div class="dropdown">
<button class="btn btn-secondary dropdown-toggle float-right mr-2"
type="button"
id="student_student_advanced_dropdown" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
Advanced Analysis
</button>
<div class="dropdown-menu"
aria-labelledby="student_student_advanced_dropdown">
<button class="dropdown-item" id="student_activity_emotion_corr">
Activity vs. Emotion
</button>
<button class="dropdown-item" id="student_activity_gaze_corr">Activity
vs. Gaze
</button>
<button class="dropdown-item" id="student_emotion_gaze_corr">Emotion vs.
Gaze
</button>
</div>
</div>
<!-- end of button to view advanced analysis dropdown -->
</div> </div>
<!-- end of student behavior summary --> <!-- end of student behavior summary -->
...@@ -2047,6 +2297,21 @@ ...@@ -2047,6 +2297,21 @@
</div> </div>
<div class="modal-body"> <div class="modal-body">
<div id="ActivityChartContainer" style="height: 370px; max-width: 920px; margin: 0px auto;"></div> <div id="ActivityChartContainer" style="height: 370px; max-width: 920px; margin: 0px auto;"></div>
<!-- Notes header -->
<div class="modal-header mt-4">
<h3>Notes</h3>
</div>
<!-- End of Notes header -->
<!-- Comments row -->
<div class="row mt-3">
<div class="col-lg-6" id="student_activity_comments">
</div>
</div>
<!-- End of Comments row -->
</div> </div>
<div class="modal-footer"> <div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button> <button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
...@@ -2364,6 +2629,143 @@ ...@@ -2364,6 +2629,143 @@
<!-- end of gaze advanced analysis modal --> <!-- end of gaze advanced analysis modal -->
<!-- student activity-emotion advanced analysis modal -->
<div class="modal fade" id="student_activity_emotion_advanced_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5>Student Behavior Correlations</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Activity vs. Student Emotion</h3>
<!-- ajax loader -->
<div class="text-center" id="student_activity_emotion_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="student_activity_emotion_corr_table" hidden>
<thead>
<tr>
<th>Student Activity</th>
<th>Student Emotion</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="student_activity_emotion_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of student activity-emotion advanced analysis modal -->
<!-- student activity-gaze advanced analysis modal -->
<div class="modal fade" id="student_activity_gaze_advanced_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5>Student Behavior Correlations</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Activity vs. Student Emotions</h3>
<!-- ajax loader -->
<div class="text-center" id="student_activity_gaze_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="student_activity_gaze_corr_table" hidden>
<thead>
<tr>
<th>Student Activity</th>
<th>Student Gaze</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="student_activity_gaze_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of student activity-gaze advanced analysis modal -->
<!-- student emotion-gaze advanced analysis modal -->
<div class="modal fade" id="student_emotion_gaze_advanced_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5>Student Behavior Correlations</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Emotion vs, Student Gaze</h3>
<!-- ajax loader -->
<div class="text-center" id="student_emotion_gaze_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="student_emotion_gaze_corr_table" hidden>
<thead>
<tr>
<th>Student Emotion</th>
<th>Student Gaze</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="student_emotion_gaze_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of student emotion-gaze advanced analysis modal -->
{% endblock %} {% endblock %}
<!--scripts--> <!--scripts-->
{% block 'scripts' %} {% block 'scripts' %}
......
...@@ -241,7 +241,13 @@ ...@@ -241,7 +241,13 @@
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_activity').click(function () { $('#integrate_activity').click(function () {
//define the student video src //define the student video src
{#global_video_name = "Video_test_9.mp4";#}
{#global_video_name = "Video_test_9_annotated.mp4";#}
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#let video_src = "{% static '' %}FirstApp/video/" + global_video_name;#}
{#let video_src = "{% static '' %}/FirstApp/activity/" + global_video_name;#}
{#let video_src = "{% static '' %}FirstApp/emotion/" + global_video_name;#}
//assign the video src //assign the video src
$('#student_video').attr('src', video_src); $('#student_video').attr('src', video_src);
...@@ -1078,6 +1084,11 @@ ...@@ -1078,6 +1084,11 @@
type="video/mp4"> type="video/mp4">
Your browser does not support the video tag. Your browser does not support the video tag.
</video> </video>
{# <video width="500" height="300" id="student_video" controls>#}
{# <source src="{% static 'FirstApp/videos/Video_test_2.mp4' %}"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div> </div>
<!--end of student video section --> <!--end of student video section -->
......
...@@ -74,6 +74,8 @@ ...@@ -74,6 +74,8 @@
real_class = '.' + real_class; real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML; let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-emotion-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index) fetch('http://127.0.0.1:8000/get-lecture-emotion-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json()) .then((res) => res.json())
...@@ -143,7 +145,8 @@ ...@@ -143,7 +145,8 @@
$('#video_name').text(video.video_name); $('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length); $('#video_duration').text(video.video_length);
$('#video_date').text(video.date); $('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id; {#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name; global_video_name = video.video_name;
...@@ -241,32 +244,67 @@ ...@@ -241,32 +244,67 @@
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
{#fetch('http://127.0.0.1:8000/get-random-number')#}
{#.then((res) => res.json())#}
{#.then((out) => alert(out.response))#}
{#.catch((err) => alert('err: ' + err));#}
//fetch the lecture recorded video name
fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#} {#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#} {#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4"; {#global_lecturer_video_name = "Test_3.mp4";#}
{#global_lecturer_video_name = "Lecturer_Video_4.mp4";#}
{##}
{#//define the lecturer video src#}
{#let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;#}
{##}
{##}
{##}
{#//assign the video src#}
{#$('#lecturer_video').attr('src', lecturer_video_src);#}
{##}
{#$('#integrate_modal').modal();#}
{#//fetch data from the API#}
{#fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name)#}
{# .then((res) => res.json())#}
{# .then((out) => displayEmotionRecognitionForFrame(out.response))#}
{# .catch((err) => alert('error: ' + err));#}
});
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src //define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name; let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
//assign the video src //assign the video src
$('#lecturer_video').attr('src', lecturer_video_src); $('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal(); $('#integrate_modal').modal();
//fetch data from the API //fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => displayEmotionRecognitionForFrame(out.response)) .then((out) => displayEmotionRecognitionForFrame(out.response))
.catch((err) => alert('error: ' + err)); .catch((err) => alert('error: ' + err));
}
});
//this function will display the emotion percentages for each frame //this function will display the emotion percentages for each frame
...@@ -338,7 +376,7 @@ ...@@ -338,7 +376,7 @@
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name) fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out)) .then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err)) .catch((err) => alert('error: ' + err));
} }
......
...@@ -74,6 +74,8 @@ ...@@ -74,6 +74,8 @@
real_class = '.' + real_class; real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML; let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-video-gaze-estimation-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index) fetch('http://127.0.0.1:8000/get-lecture-video-gaze-estimation-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json()) .then((res) => res.json())
...@@ -142,7 +144,8 @@ ...@@ -142,7 +144,8 @@
$('#video_name').text(video.video_name); $('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length); $('#video_duration').text(video.video_length);
$('#video_date').text(video.date); $('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id; {#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name; global_video_name = video.video_name;
...@@ -239,21 +242,28 @@ ...@@ -239,21 +242,28 @@
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src //assign the video src
$('#student_video').attr('src', video_src); $('#student_video').attr('src', video_src);
//assign the video src //fetch the lecture recorded video name
$('#lecturer_video').attr('src', lecturer_video_src); fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
{#global_lecturer_video_name = "Test_3.mp4";#}
{#global_lecturer_video_name = "Lecturer_Video_4.mp4";#}
{##}
{#//define the lecturer video src#}
{#let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;#}
{##}
{#//assign the video src#}
{#$('#lecturer_video').attr('src', lecturer_video_src);#}
{##}
{#$('#integrate_modal').modal();#}
$('#integrate_modal').modal();
//fetch data from the API //fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name)
...@@ -264,6 +274,23 @@ ...@@ -264,6 +274,23 @@
}); });
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
alert('hello');
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
}
//this function will load the activity recognition for frames //this function will load the activity recognition for frames
function displayGazeEstimationForFrame(response) { function displayGazeEstimationForFrame(response) {
......
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
{% block head %} {% block head %}
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content=""> <meta name="description" content="">
<meta name="author" content=""> <meta name="author" content="">
<title>SLPES</title> <title>SLPES</title>
{% load static %} {% load static %}
<link rel="shortcut icon" href="{% static 'FirstApp/images/favicon.ico' %}" type="image/x-icon" /> <link rel="shortcut icon" href="{% static 'FirstApp/images/favicon.ico' %}" type="image/x-icon"/>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous"> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
<link rel="stylesheet" href="{% static 'FirstApp/css/sb-admin-2.min.css' %}"> integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
<link rel="stylesheet" href="{% static 'FirstApp/css/slider.css' %}"> crossorigin="anonymous">
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css"> <link rel="stylesheet" href="{% static 'FirstApp/css/sb-admin-2.min.css' %}">
<link rel="stylesheet" href="{% static 'FirstApp/css/slider.css' %}">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet"> <link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link rel="stylesheet" href="{% static 'FirstApp/css/all.min.css' %}"> <link href="{% static 'FirstApp/css/snackbar.css' %}" rel="stylesheet" type="text/css">
<link href="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.css' %}" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
<!-- this link will import process workflow CSS --> rel="stylesheet">
<link href="{% static 'FirstApp/css/process-worflow.css' %}" rel="stylesheet" type="text/css"> <link rel="stylesheet" href="{% static 'FirstApp/css/all.min.css' %}">
<link href="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.css' %}" rel="stylesheet">
</head>
<!-- this link will import process workflow CSS -->
<link href="{% static 'FirstApp/css/process-worflow.css' %}" rel="stylesheet" type="text/css">
</head>
{% endblock %} {% endblock %}
<body id="page-top"> <body id="page-top">
<!-- Page Wrapper --> <!-- Page Wrapper -->
{% block javascript %} {% block javascript %}
{% load static %} {% load static %}
<script type="text/javascript" src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script> <script type="text/javascript" src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script> <script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script> <script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
{% endblock %} {% endblock %}
<div id="wrapper"> <div id="wrapper">
<!-- Sidebar --> <!-- Sidebar -->
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar"> <ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand --> <!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="/"> <a class="sidebar-brand d-flex align-items-center justify-content-center" href="/">
<div class="sidebar-brand-icon rotate-n-15"> {# <div class="sidebar-brand-icon rotate-n-15">#}
<i class="fas fa-laugh-wink"></i> {# <i class="fas fa-laugh-wink"></i>#}
</div> {# </div>#}
{# <div class="sidebar-brand-icon">#}
{# <i class="fas fa-chalkboard-teacher"></i>#}
{# </div>#}
{% if request.session.user_type == "Lecturer" %} {% if request.session.user_type == "Lecturer" %}
<div class="sidebar-brand-text mx-3">SLPES Lecturer</div> <div class="sidebar-brand-icon">
{% endif %} <i class="fas fa-chalkboard-teacher"></i>
</div>
<div class="sidebar-brand-text mx-3">SLPES Lecturer</div>
{% endif %}
{% if request.session.user_type == "Admin" %} {% if request.session.user_type == "Admin" %}
<div class="sidebar-brand-text mx-3">SLPES Admin</div> <div class="sidebar-brand-icon">
{% endif %} <i class="fa fa-user" aria-hidden="true"></i>
</a> </div>
<div class="sidebar-brand-text mx-3">SLPES Admin</div>
{% endif %}
</a>
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider my-0"> <hr class="sidebar-divider my-0">
<!-- Nav Item - Dashboard --> <!-- Nav Item - Dashboard -->
<li class="nav-item active"> <li class="nav-item active">
<a class="nav-link" href="/"> <a class="nav-link" href="/">
<i class="fas fa-fw fa-tachometer-alt"></i> <i class="fas fa-fw fa-tachometer-alt"></i>
<span>Dashboard</span></a> <span>Dashboard</span></a>
</li> </li>
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider"> <hr class="sidebar-divider">
<!-- Heading --> <!-- Heading -->
<div class="sidebar-heading"> <div class="sidebar-heading">
Interface Interface
</div> </div>
{% if request.session.user_type == "Lecturer" %} {% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo"
<i class="fas fa-fw fa-cog"></i> aria-expanded="true" aria-controls="collapseTwo">
<span>Estimations</span> <i class="fa fa-calculator" aria-hidden="true"></i>
</a> <span>Estimations</span>
<div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordionSidebar"> </a>
<div class="bg-white py-2 collapse-inner rounded"> <div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordionSidebar">
<h6 class="collapse-header">Components:</h6> <div class="bg-white py-2 collapse-inner rounded">
{# <a class="collapse-item" href="/pose">Pose</a>#} <h6 class="collapse-header">Components:</h6>
<a class="collapse-item" href="/gaze">Gaze</a> {# <a class="collapse-item" href="/pose">Pose</a>#}
<a class="collapse-item" href="/emotion">Emotion</a> <a class="collapse-item" href="/gaze">Gaze</a>
<a class="collapse-item" href="/activity">Activity</a> <a class="collapse-item" href="/emotion">Emotion</a>
</div>ac <a class="collapse-item" href="/activity">Activity</a>
</div> </div>
</li> ac
</div>
</li>
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree"
<i class="fas fa-fw fa-cog"></i> aria-expanded="true" aria-controls="collapseThree">
<span>Lecture</span> <i class="fa fa-graduation-cap" aria-hidden="true"></i>
</a> <span>Lecture</span>
<div id="collapseThree" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar"> </a>
<div class="bg-white py-2 collapse-inner rounded"> <div id="collapseThree" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar">
<h6 class="collapse-header">Components:</h6> <div class="bg-white py-2 collapse-inner rounded">
<a class="collapse-item" href="/summary/lecture">Summarization</a> <h6 class="collapse-header">Components:</h6>
</div> <a class="collapse-item" href="/summary/lecture">Summarization</a>
</div> </div>
</li> </div>
</li>
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour"
<i class="fas fa-fw fa-cog"></i> aria-expanded="true" aria-controls="collapseThree">
<span>Attendance</span> <i class="fa fa-eye" aria-hidden="true"></i>
</a> <span>Attendance</span>
<div id="collapseFour" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar"> </a>
<div class="bg-white py-2 collapse-inner rounded"> <div id="collapseFour" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar">
<h6 class="collapse-header">Components:</h6> <div class="bg-white py-2 collapse-inner rounded">
<a class="collapse-item" href="/attendance/initiate-lecture">initiate lecture</a> <h6 class="collapse-header">Components:</h6>
</div> <a class="collapse-item" href="/attendance/initiate-lecture">initiate lecture</a>
</div> </div>
</li> </div>
</li>
<!-- Nav Item - Utilities Collapse Menu --> <!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities" aria-expanded="true" aria-controls="collapseUtilities"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities"
<i class="fas fa-fw fa-wrench"></i> aria-expanded="true" aria-controls="collapseUtilities">
<span>Utilities</span> <i class="fas fa-fw fa-wrench"></i>
</a> <span>Utilities</span>
<div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities" data-parent="#accordionSidebar"> </a>
<div class="bg-white py-2 collapse-inner rounded"> <div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities"
<h6 class="collapse-header">Custom Utilities:</h6> data-parent="#accordionSidebar">
{# <a class="collapse-item" href="/extract">Video Extractor</a>#} <div class="bg-white py-2 collapse-inner rounded">
<a class="collapse-item" href="/video_result">Video Results</a> <h6 class="collapse-header">Custom Utilities:</h6>
{# <a class="collapse-item" href="utilities-animation.html">Animations</a>#} {# <a class="collapse-item" href="/extract">Video Extractor</a>#}
{# <a class="collapse-item" href="utilities-other.html">Other</a>#} <a class="collapse-item" href="/video_result">Video Results</a>
</div> {# <a class="collapse-item" href="utilities-animation.html">Animations</a>#}
</div> {# <a class="collapse-item" href="utilities-other.html">Other</a>#}
</li> </div>
</div>
</li>
{% endif %} {% endif %}
{% if request.session.user_type == "Admin" %} {% if request.session.user_type == "Admin" %}
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages"
aria-expanded="true" aria-controls="collapsePages"> aria-expanded="true" aria-controls="collapsePages">
<i class="fas fa-fw fa-folder"></i> <i class="fas fa-fw fa-folder"></i>
<span>Pages</span> <span>Pages</span>
</a> </a>
<div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar"> <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded"> <div class="bg-white py-2 collapse-inner rounded">
<!-- <h6 class="collapse-header">Login Screens:</h6>--> <!-- <h6 class="collapse-header">Login Screens:</h6>-->
<a class="collapse-item" href="/lecturer">Dashboard</a> <a class="collapse-item" href="/lecturer">Dashboard</a>
<a class="collapse-item" href="/lecturer/lecture-video">Video Page</a> <a class="collapse-item" href="/lecturer/lecture-video">Video Page</a>
</div>
</div> </div>
</div> </li>
</li> {% endif %}
{% endif %}
<!-- Divider -->
<!-- Divider --> <hr class="sidebar-divider">
<hr class="sidebar-divider">
{# <!-- Heading -->#}
{# <!-- Heading -->#} {# <div class="sidebar-heading">#}
{# <div class="sidebar-heading">#} {# Addons#}
{# Addons#} {# </div>#}
{# </div>#}
<!-- Nav Item - Pages Collapse Menu -->
<!-- Nav Item - Pages Collapse Menu --> {# <li class="nav-item">#}
{# <li class="nav-item">#} {# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">#}
{# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">#} {# <i class="fas fa-fw fa-folder"></i>#}
{# <i class="fas fa-fw fa-folder"></i>#} {# <span>Pages</span>#}
{# <span>Pages</span>#} {# </a>#}
{# </a>#} {# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#}
{# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#} {# <div class="bg-white py-2 collapse-inner rounded">#}
{# <div class="bg-white py-2 collapse-inner rounded">#} {# <h6 class="collapse-header">Login Screens:</h6>#}
{# <h6 class="collapse-header">Login Screens:</h6>#} {# <a class="collapse-item" href="/login">Login</a>#}
{# <a class="collapse-item" href="/login">Login</a>#} {# <a class="collapse-item" href="/register">Register</a>#}
{# <a class="collapse-item" href="/register">Register</a>#} {# <a class="collapse-item" href="/forgot-password">Forgot Password</a>#}
{# <a class="collapse-item" href="/forgot-password">Forgot Password</a>#} {# <div class="collapse-divider"></div>#}
{# <div class="collapse-divider"></div>#} {# <h6 class="collapse-header">Other Pages:</h6>#}
{# <h6 class="collapse-header">Other Pages:</h6>#} {# <a class="collapse-item" href="/404">404 Page</a>#}
{# <a class="collapse-item" href="/404">404 Page</a>#} {# <a class="collapse-item" href="/blank">Blank Page</a>#}
{# <a class="collapse-item" href="/blank">Blank Page</a>#} {# </div>#}
{# </div>#} {# </div>#}
{# </div>#} {# </li>#}
{# </li>#}
{# <!-- Nav Item - Charts -->#}
{# <!-- Nav Item - Charts -->#} {# <li class="nav-item">#}
{# <li class="nav-item">#} {# <a class="nav-link" href="charts.html">#}
{# <a class="nav-link" href="charts.html">#} {# <i class="fas fa-fw fa-chart-area"></i>#}
{# <i class="fas fa-fw fa-chart-area"></i>#} {# <span>Charts</span></a>#}
{# <span>Charts</span></a>#} {# </li>#}
{# </li>#} {##}
{##} {# <!-- Nav Item - Tables -->#}
{# <!-- Nav Item - Tables -->#} {# <li class="nav-item">#}
{# <li class="nav-item">#} {# <a class="nav-link" href="/tables">#}
{# <a class="nav-link" href="/tables">#} {# <i class="fas fa-fw fa-table"></i>#}
{# <i class="fas fa-fw fa-table"></i>#} {# <span>Tables</span></a>#}
{# <span>Tables</span></a>#} {# </li>#}
{# </li>#}
<!-- Divider -->
<!-- Divider --> <hr class="sidebar-divider d-none d-md-block">
<hr class="sidebar-divider d-none d-md-block">
<!-- Sidebar Toggler (Sidebar) -->
<!-- Sidebar Toggler (Sidebar) --> <div class="text-center d-none d-md-inline">
<div class="text-center d-none d-md-inline"> <button class="rounded-circle border-0" id="sidebarToggle"></button>
<button class="rounded-circle border-0" id="sidebarToggle"></button> </div>
</div>
</ul> </ul>
...@@ -218,248 +237,257 @@ ...@@ -218,248 +237,257 @@
<div id="content-wrapper" class="d-flex flex-column"> <div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content --> <!-- Main Content -->
<div id="content"> <div id="content">
<!-- Topbar --> <!-- Topbar -->
<nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow"> <nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow">
<!-- Sidebar Toggle (Topbar) --> <!-- Sidebar Toggle (Topbar) -->
<button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3"> <button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3">
<i class="fa fa-bars"></i> <i class="fa fa-bars"></i>
</button> </button>
<!-- Topbar Search --> <!-- Topbar Search -->
{# <form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">#} {# <form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">#}
{# <div class="input-group">#} {# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#} {# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#} {# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#} {# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#} {# <i class="fas fa-search fa-sm"></i>#}
{# </button>#} {# </button>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# </form>#} {# </form>#}
<!-- Topbar Navbar --> <!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto"> <ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) --> <!-- Nav Item - Search Dropdown (Visible Only XS) -->
{# <li class="nav-item dropdown no-arrow d-sm-none">#} {# <li class="nav-item dropdown no-arrow d-sm-none">#}
{# <a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#} {# <a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-search fa-fw"></i>#} {# <i class="fas fa-search fa-fw"></i>#}
{# </a>#} {# </a>#}
{# <!-- Dropdown - Messages -->#} {# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">#} {# <div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">#}
{# <form class="form-inline mr-auto w-100 navbar-search">#} {# <form class="form-inline mr-auto w-100 navbar-search">#}
{# <div class="input-group">#} {# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#} {# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#} {# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#} {# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#} {# <i class="fas fa-search fa-sm"></i>#}
{# </button>#} {# </button>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# </form>#} {# </form>#}
{# </div>#} {# </div>#}
{# </li>#} {# </li>#}
{##} {##}
{# <!-- Nav Item - Alerts -->#} {# <!-- Nav Item - Alerts -->#}
{# <li class="nav-item dropdown no-arrow mx-1">#} {# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#} {# <a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-bell fa-fw"></i>#} {# <i class="fas fa-bell fa-fw"></i>#}
{# <!-- Counter - Alerts -->#} {# <!-- Counter - Alerts -->#}
{# <span class="badge badge-danger badge-counter">3+</span>#} {# <span class="badge badge-danger badge-counter">3+</span>#}
{# </a>#} {# </a>#}
{# <!-- Dropdown - Alerts -->#} {# <!-- Dropdown - Alerts -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">#} {# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">#}
{# <h6 class="dropdown-header">#} {# <h6 class="dropdown-header">#}
{# Alerts Center#} {# Alerts Center#}
{# </h6>#} {# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#} {# <div class="mr-3">#}
{# <div class="icon-circle bg-primary">#} {# <div class="icon-circle bg-primary">#}
{# <i class="fas fa-file-alt text-white"></i>#} {# <i class="fas fa-file-alt text-white"></i>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# <div>#} {# <div>#}
{# <div class="small text-gray-500">December 12, 2019</div>#} {# <div class="small text-gray-500">December 12, 2019</div>#}
{# <span class="font-weight-bold">A new monthly report is ready to download!</span>#} {# <span class="font-weight-bold">A new monthly report is ready to download!</span>#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#} {# <div class="mr-3">#}
{# <div class="icon-circle bg-success">#} {# <div class="icon-circle bg-success">#}
{# <i class="fas fa-donate text-white"></i>#} {# <i class="fas fa-donate text-white"></i>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# <div>#} {# <div>#}
{# <div class="small text-gray-500">December 7, 2019</div>#} {# <div class="small text-gray-500">December 7, 2019</div>#}
{# $290.29 has been deposited into your account!#} {# $290.29 has been deposited into your account!#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#} {# <div class="mr-3">#}
{# <div class="icon-circle bg-warning">#} {# <div class="icon-circle bg-warning">#}
{# <i class="fas fa-exclamation-triangle text-white"></i>#} {# <i class="fas fa-exclamation-triangle text-white"></i>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# <div>#} {# <div>#}
{# <div class="small text-gray-500">December 2, 2019</div>#} {# <div class="small text-gray-500">December 2, 2019</div>#}
{# Spending Alert: We've noticed unusually high spending for your account.#} {# Spending Alert: We've noticed unusually high spending for your account.#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>#} {# <a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>#}
{# </div>#} {# </div>#}
{# </li>#} {# </li>#}
<!-- Nav Item - Messages --> <!-- Nav Item - Messages -->
{# <li class="nav-item dropdown no-arrow mx-1">#} {# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#} {# <a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-envelope fa-fw"></i>#} {# <i class="fas fa-envelope fa-fw"></i>#}
{# <!-- Counter - Messages -->#} {# <!-- Counter - Messages -->#}
{# <span class="badge badge-danger badge-counter">7</span>#} {# <span class="badge badge-danger badge-counter">7</span>#}
{# </a>#} {# </a>#}
{# <!-- Dropdown - Messages -->#} {# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">#} {# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">#}
{# <h6 class="dropdown-header">#} {# <h6 class="dropdown-header">#}
{# Message Center#} {# Message Center#}
{# </h6>#} {# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#} {# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">#} {# <img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#} {# <div class="status-indicator bg-success"></div>#}
{# </div>#} {# </div>#}
{# <div class="font-weight-bold">#} {# <div class="font-weight-bold">#}
{# <div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>#} {# <div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>#}
{# <div class="small text-gray-500">Emily Fowler · 58m</div>#} {# <div class="small text-gray-500">Emily Fowler · 58m</div>#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#} {# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">#} {# <img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">#}
{# <div class="status-indicator"></div>#} {# <div class="status-indicator"></div>#}
{# </div>#} {# </div>#}
{# <div>#} {# <div>#}
{# <div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>#} {# <div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>#}
{# <div class="small text-gray-500">Jae Chun · 1d</div>#} {# <div class="small text-gray-500">Jae Chun · 1d</div>#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#} {# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">#} {# <img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">#}
{# <div class="status-indicator bg-warning"></div>#} {# <div class="status-indicator bg-warning"></div>#}
{# </div>#} {# </div>#}
{# <div>#} {# <div>#}
{# <div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>#} {# <div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>#}
{# <div class="small text-gray-500">Morgan Alvarez · 2d</div>#} {# <div class="small text-gray-500">Morgan Alvarez · 2d</div>#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#} {# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#} {# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">#} {# <img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#} {# <div class="status-indicator bg-success"></div>#}
{# </div>#} {# </div>#}
{# <div>#} {# <div>#}
{# <div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>#} {# <div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>#}
{# <div class="small text-gray-500">Chicken the Dog · 2w</div>#} {# <div class="small text-gray-500">Chicken the Dog · 2w</div>#}
{# </div>#} {# </div>#}
{# </a>#} {# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>#} {# <a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>#}
{# </div>#} {# </div>#}
{# </li>#} {# </li>#}
<div class="topbar-divider d-none d-sm-block"></div> <div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information --> <!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow"> <li class="nav-item dropdown no-arrow">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> <a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button"
<span class="mr-2 d-none d-lg-inline text-gray-600 small">{{ request.user.username }}</span> data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
{% load static %} <span class="mr-2 d-none d-lg-inline text-gray-600 small">{{ request.user.username }}</span>
<img class="img-profile rounded-circle" src="{% static 'FirstApp/images/user_profile.png' %}"> {% load static %}
</a> <img class="img-profile rounded-circle"
<!-- Dropdown - User Information --> src="{% static 'FirstApp/images/user_profile.png' %}">
<div class="dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="userDropdown"> </a>
<a class="dropdown-item" href="#"> <!-- Dropdown - User Information -->
<i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i> <div class="dropdown-menu dropdown-menu-right shadow animated--grow-in"
Profile aria-labelledby="userDropdown">
</a> {# <a class="dropdown-item" href="#">#}
<a class="dropdown-item" href="#"> {# <i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i>#}
<i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i> {# Profile#}
Settings {# </a>#}
</a> {# <a class="dropdown-item" href="#">#}
<a class="dropdown-item" href="#"> {# <i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i>#}
<i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i> {# Settings#}
Activity Log {# </a>#}
</a> {# <a class="dropdown-item" href="#">#}
<div class="dropdown-divider"></div> {# <i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i>#}
<a class="dropdown-item" href="#" data-toggle="modal" data-target="#logoutModal"> {# Activity Log#}
<i class="fas fa-sign-out-alt fa-sm fa-fw mr-2 text-gray-400"></i> {# </a>#}
Logout {# <div class="dropdown-divider"></div>#}
</a> <a class="dropdown-item" href="#" data-toggle="modal" data-target="#logoutModal">
</div> <i class="fas fa-sign-out-alt fa-sm fa-fw mr-2 text-gray-400"></i>
</li> Logout
</a>
</div>
</li>
</ul>
</nav>
<!-- End of Topbar -->
{% block 'container-fluid' %}
{% load static %}
{% endblock %}
</div>
{% block 'footer' %}
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Student and Lecturer Performance Enhancement System 2019</span>
</div>
</div>
</footer>
{% endblock %}
</div>
</div>
{% block 'modal' %}
{% load static %}
<script type="text/javascript" src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
</ul>
</nav> <a class="scroll-to-top rounded" href="#page-top">
<!-- End of Topbar --> <i class="fas fa-angle-up"></i>
</a>
{% block 'container-fluid' %}
{% load static %} <div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
{% endblock %} aria-hidden="true">
</div> <div class="modal-dialog" role="document">
<div class="modal-content">
{% block 'footer' %} <div class="modal-header">
<footer class="sticky-footer bg-white"> <h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<div class="container my-auto"> <button class="close" type="button" data-dismiss="modal" aria-label="Close">
<div class="copyright text-center my-auto"> <span aria-hidden="true">×</span>
<span>Copyright &copy; Student and Lecturer Performance Enhancement System 2019</span> </button>
</div> </div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="/logout">Logout</a>
</div>
</div>
</div> </div>
</footer>
{% endblock %}
</div> </div>
</div>
{% block 'modal' %} {% endblock %}
{% load static %}
<script type="text/javascript" src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script> {% block 'scripts' %}
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="/logout">Logout</a>
</div>
</div>
</div>
</div>
{% endblock %}
{% block 'scripts' %}
{% load static %} {% load static %}
<script src="https://code.jquery.com/jquery-3.4.1.slim.min.js" integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n" crossorigin="anonymous"></script> <script src="https://code.jquery.com/jquery-3.4.1.slim.min.js"
<script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js" integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo" crossorigin="anonymous"></script> integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n"
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js" integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6" crossorigin="anonymous"></script> crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js"
integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo"
crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js"
integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6"
crossorigin="anonymous"></script>
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script> <script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script> <script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
...@@ -476,7 +504,7 @@ ...@@ -476,7 +504,7 @@
<!-- Page level custom scripts --> <!-- Page level custom scripts -->
<script src="{% static 'FirstApp/js/demo/chart-area-demo.js' %}"></script> <script src="{% static 'FirstApp/js/demo/chart-area-demo.js' %}"></script>
<script src="{% static 'FirstApp/js/demo/chart-pie-demo.js' %}"></script> <script src="{% static 'FirstApp/js/demo/chart-pie-demo.js' %}"></script>
{% endblock %} {% endblock %}
</body> </body>
</html> </html>
\ No newline at end of file
...@@ -40,6 +40,10 @@ ...@@ -40,6 +40,10 @@
$(document).ready(function () { $(document).ready(function () {
let folder = ''; let folder = '';
{#$('#activity_loader').attr('hidden', false);#}
{#$('#emotion_loader').attr('hidden', false);#}
{#$('#gaze_loader').attr('hidden', false);#}
//select a particular subject //select a particular subject
//select a particular subject //select a particular subject
$('input[type=radio]').click(function () { $('input[type=radio]').click(function () {
...@@ -134,7 +138,6 @@ ...@@ -134,7 +138,6 @@
.catch((error) => alert('an error occurred: ' + error)); .catch((error) => alert('an error occurred: ' + error));
} }
}); });
}); });
...@@ -198,19 +201,19 @@ ...@@ -198,19 +201,19 @@
let classname = $('#step_1').attr('class'); let classname = $('#step_1').attr('class');
setTimeout(() => { setTimeout(() => {
$('#step_1').attr('class', class1) $('#step_1').attr('class', class1)
}, 2000); }, 2000);
setTimeout(() => { setTimeout(() => {
$('#step_2').attr('class', class1) $('#step_2').attr('class', class1)
}, 4000); }, 4000);
setTimeout(() => { setTimeout(() => {
$('#step_3').attr('class', class1) $('#step_3').attr('class', class1)
}, 6000); }, 6000);
setTimeout(() => { setTimeout(() => {
$('#step_4').attr('class', class1) $('#step_4').attr('class', class1)
}, 8000); }, 8000);
...@@ -228,7 +231,6 @@ ...@@ -228,7 +231,6 @@
global_video_name = video_name; global_video_name = video_name;
//perform activity recognition //perform activity recognition
fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id) fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json()) .then((res) => res.json())
...@@ -251,7 +253,7 @@ ...@@ -251,7 +253,7 @@
//display the emotion loader //display the emotion loader
$('#emotion_loader').attr('hidden', false); $('#emotion_loader').attr('hidden', false);
//sending the request to process the lecture emotions //sending the request to process the lecture emotions
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id) fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => handleEmotionResponse(out.response)) .then((out) => handleEmotionResponse(out.response))
...@@ -292,14 +294,69 @@ ...@@ -292,14 +294,69 @@
//hide the activity loader //hide the activity loader
$('#gaze_loader').hide(); $('#gaze_loader').hide();
alert('good');
} }
} }
//this is a test function (delete later)
/*
let interval = setInterval(() => {
{#let url = 'http://127.0.0.1:8000/get-random_number';#}
let url = 'http://127.0.0.1:8000/check-availability';
fetch(url)
.then((res) => res.json())
.then((out) => displayProcess(out))
.catch((err) => alert('error: ' + err))
}, 10000);
//this function will handle the displaying loaders and status in the workflow
function displayProcess(response) {
//if the lecture activity has completed processing
if (response.isActivityExist) {
$('#step_1').attr("class", class1);
$('#activity_loader').hide();
$('#emotion_loader').attr('hidden', false);
}
//if the lecture emotion has completed processing
if (response.isEmotionExist) {
$('#step_2').attr("class", class1);
$('#emotion_loader').hide();
$('#gaze_loader').attr('hidden', false);
}
//if the lecture gaze has completed processing
if (response.isGazeExist) {
$('#step_3').attr("class", class1);
$('#gaze_loader').hide();
}
//if all the processes are completed
if (response.isActivityExist && response.isEmotionExist && response.isGazeExist) {
var x = document.getElementById("snackbar");
x.className = "show";
setTimeout(function () {
x.className = x.className.replace("show", "");
}, 3000);
//clear the setInterval function
clearInterval(interval);
}
}
*/
}); });
...@@ -326,6 +383,7 @@ ...@@ -326,6 +383,7 @@
<!-- Page Heading --> <!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4"> <div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Lecture Video Results</h1> <h1 class="h3 mb-0 text-gray-800">Lecture Video Results</h1>
<h2><span id="time_display"></span></h2>
</div> </div>
<!--first row --> <!--first row -->
...@@ -344,7 +402,7 @@ ...@@ -344,7 +402,7 @@
<div class="card-body"> <div class="card-body">
<!--loading gif --> <!--loading gif -->
{% if due_lectures.count == 0 %} {% if due_lectures|length == 0 %}
<div class="text-center" id="no_subject_selected"> <div class="text-center" id="no_subject_selected">
<span class="font-italic">No lecture is to be processed</span> <span class="font-italic">No lecture is to be processed</span>
</div> </div>
...@@ -357,34 +415,43 @@ ...@@ -357,34 +415,43 @@
<div class="text-center" id="no_timetable_content" hidden> <div class="text-center" id="no_timetable_content" hidden>
<span class="font-italic">Not included in the timetable</span> <span class="font-italic">Not included in the timetable</span>
</div> </div>
<!--displaying the timetable -->
<table class="table table-striped" id="timetable"> <!-- if there are due lectures, display the table -->
{# <caption id="timetable_caption"></caption>#} {% if due_lectures %}
<thead>
<tr> <!--displaying the timetable -->
<th>Date</th> <table class="table table-striped" id="timetable">
<th>Subject</th> {# <caption id="timetable_caption"></caption>#}
<th>start time</th> <thead>
<th>end time</th> <tr>
<th></th> <th>Date</th>
</tr> <th>Subject</th>
</thead> <th>start time</th>
<tbody id="timetable_body"> <th>end time</th>
<th></th>
</tr>
</thead>
<tbody id="timetable_body">
{% for lecture in due_lectures %} {% for lecture in due_lectures %}
<tr> <tr>
<td class="font-weight-bolder">{{ lecture.date }}</td> <td class="font-weight-bolder">{{ lecture.date }}</td>
{# <td>{{ lecture.subject }}</td>#} {# <td>{{ lecture.subject }}</td>#}
<td class="font-weight-bolder">{{ lecture.subject_name }}</td> <td class="font-weight-bolder">{{ lecture.subject_name }}</td>
<td class="font-weight-bolder">{{ lecture.start_time }}</td> <td class="font-weight-bolder">{{ lecture.start_time }}</td>
<td class="font-weight-bolder">{{ lecture.end_time }}</td> <td class="font-weight-bolder">{{ lecture.end_time }}</td>
<td> <td>
<button type="button" class="btn btn-success batch_process" data-video-id="{{ lecture.video_id }}" data-video-name="{{ lecture.video_name }}" id="{{ lecture.subject }}">Process</button> <button type="button" class="btn btn-success batch_process"
{# <span class="font-italic font-weight-bolder text-success">Processing</span>#} data-video-id="{{ lecture.video_id }}"
data-video-name="{{ lecture.video_name }}"
id="{{ lecture.subject }}">Process
</button>
{# <span class="font-italic font-weight-bolder text-success">Processing</span>#}
</td> </td>
</tr> </tr>
{% endfor %} {% endfor %}
</tbody> </tbody>
</table> </table>
{% endif %}
</div> </div>
...@@ -417,27 +484,29 @@ ...@@ -417,27 +484,29 @@
<div class="progress-bar"></div> <div class="progress-bar"></div>
</div> </div>
<a class="smpl-step-icon text-center"><i class="fa fa-chart-line" <a class="smpl-step-icon text-center"><i class="fa fa-chart-line"
style="font-size: 40px; padding-top: 10px; color: white"></i></a> style="font-size: 40px; padding-top: 10px; color: white"></i></a>
<div class="smpl-step-info text-center"> <div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">Perform Activity Recognition</span> <span class="font-italic font-weight-bolder">Perform Activity Recognition</span>
<br /> <br/>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="activity_loader" hidden> <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader" class="mt-2" id="activity_loader" hidden>
</div> </div>
</div> </div>
<!-- end of step 1 --> <!-- end of step 1 -->
<!-- step 2 --> <!-- step 2 -->
<div class="col-4 smpl-step-step disabled" id="step_2"> <div class="col-4 smpl-step-step disabled" id="step_2">
<div class="text-center smpl-step-num font-weight-bolder">Step 3</div> <div class="text-center smpl-step-num font-weight-bolder">Step 2</div>
<div class="progress"> <div class="progress">
<div class="progress-bar"></div> <div class="progress-bar"></div>
</div> </div>
<a class="smpl-step-icon text-center"><i class="fa fa-user" <a class="smpl-step-icon text-center"><i class="fa fa-user"
style="font-size: 50px; padding-top: 10px; color: white"></i></a> style="font-size: 50px; padding-top: 10px; color: white"></i></a>
<div class="smpl-step-info text-center"> <div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">Study Student Emotions</span> <span class="font-italic font-weight-bolder">Study Student Emotions</span>
<br /> <br/>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="emotion_loader" hidden> <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader" class="mt-2" id="emotion_loader" hidden>
</div> </div>
</div> </div>
<!-- end of step 2 --> <!-- end of step 2 -->
...@@ -454,18 +523,17 @@ ...@@ -454,18 +523,17 @@
</a> </a>
<div class="smpl-step-info text-center"> <div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">See students' Gazes</span> <span class="font-italic font-weight-bolder">See students' Gazes</span>
<br /> <br/>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="gaze_loader" hidden> <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader" class="mt-2" id="gaze_loader" hidden>
</div> </div>
</div> </div>
<!-- end of step 3 --> <!-- end of step 3 -->
</div> </div>
<!-- end of progress row -->
{# <!-- simulation button row -->#}
{# <div class="row">#}
{# <button type="button" class="btn btn-outline-danger" id="simulate_process">Simulate</button>#}
{# </div>#}
</div> </div>
<!-- end of container --> <!-- end of container -->
...@@ -478,6 +546,10 @@ ...@@ -478,6 +546,10 @@
</div> </div>
<!-- end of progress row --> <!-- end of progress row -->
<!-- snackbar -->
<div id="snackbar">The lecture is completely processed..</div>
<!-- end of snackbar -->
</div> </div>
{% endblock %} {% endblock %}
<!-- End of container-fluid --> <!-- End of container-fluid -->
......
...@@ -151,21 +151,30 @@ urlpatterns = [ ...@@ -151,21 +151,30 @@ urlpatterns = [
# retrieves lecture activity summary # retrieves lecture activity summary
url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()), url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()),
# retrieves lecture activity summary # retrieves lecture emotion summary
url(r'^get-lecture-emotion-summary/$', api.GetLectureEmotionSummary.as_view()), url(r'^get-lecture-emotion-summary/$', api.GetLectureEmotionSummary.as_view()),
# retrieves lecture activity summary # retrieves lecture gaze estimation summary
url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()), url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()),
# retrieves lecture activity summary # retrieves student activity correlations with lecturer activity
url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()), url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()),
# retrieves lecture activity summary # retrieves student emotion correlations with lecturer activity
url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()), url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()),
# retrieves lecture activity summary # retrieves student gaze estimation correlations with lecturer activity
url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()), url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()),
# retrieves student activity-emotion correlations
url(r'^get-student-activity-emotion-correlations/$', api.GetStudentActivityEmotionCorrelations.as_view()),
# retrieves student activity-gaze correlations
url(r'^get-student-activity-gaze-correlations/$', api.GetStudentActivityGazeCorrelations.as_view()),
# retrieves student emotion-gaze correlations
url(r'^get-student-emotion-gaze-correlations/$', api.GetStudentEmotionGazeCorrelations.as_view()),
##### OTHERS ##### ##### OTHERS #####
...@@ -173,6 +182,19 @@ urlpatterns = [ ...@@ -173,6 +182,19 @@ urlpatterns = [
url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()), url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()),
##### BATCH PROCESS #####
# perform batch process for student behavior
url(r'^student-behavior-batch-process/$', api.BatchProcess.as_view()),
# check availability for student behavior components
url(r'^check-availability/$', api.CheckStudentBehaviorAvailability.as_view()),
# perform random task (delete later)
url(r'^get-random-number/$', api.TestRandom.as_view()),
# routers # routers
# path('', include(router.urls)), # path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')) path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
......
...@@ -189,6 +189,8 @@ def video_result(request): ...@@ -189,6 +189,8 @@ def video_result(request):
data = serializer.data data = serializer.data
print('data length: ', len(data))
# iterate through the existing lecture videos for the lecturer # iterate through the existing lecture videos for the lecturer
for video in data: for video in data:
video_id = video['id'] video_id = video['id']
...@@ -197,6 +199,8 @@ def video_result(request): ...@@ -197,6 +199,8 @@ def video_result(request):
# check whether the video id exist in the Activity Recognition table # check whether the video id exist in the Activity Recognition table
lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists() lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists()
print('lecture activity existence: ', lec_activity)
if lec_activity == False: if lec_activity == False:
to_do_lecture_list.append({ to_do_lecture_list.append({
"lecturer": lecturer, "lecturer": lecturer,
...@@ -227,11 +231,15 @@ def video_result(request): ...@@ -227,11 +231,15 @@ def video_result(request):
# loop through the to-do lecture list # loop through the to-do lecture list
for item in to_do_lecture_list: for item in to_do_lecture_list:
isDate = item['date'] == str(day_timetable['date']) isDate = item['date'] == str(day_timetable['date'])
print('item date: ', item['date'])
print('timetable date: ', str(day_timetable['date']))
# isLecturer = item['lecturer'] == # isLecturer = item['lecturer'] ==
# check for the particular lecture on the day # check for the particular lecture on the day
if isDate: if isDate:
slots = day_timetable['time_slots'] slots = day_timetable['time_slots']
# loop through the slots # loop through the slots
for slot in slots: for slot in slots:
# check for the lecturer and subject # check for the lecturer and subject
...@@ -260,6 +268,8 @@ def video_result(request): ...@@ -260,6 +268,8 @@ def video_result(request):
print('what is wrong?: ', exc) print('what is wrong?: ', exc)
return redirect('/500') return redirect('/500')
print('due lectures: ', due_lecture_list)
return render(request, "FirstApp/video_results.html", return render(request, "FirstApp/video_results.html",
{"lecturer": lecturer, "due_lectures": due_lecture_list}) {"lecturer": lecturer, "due_lectures": due_lecture_list})
......
...@@ -15,20 +15,18 @@ import os ...@@ -15,20 +15,18 @@ import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production # Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'typgn#m(t#byxnp#ut@^gfqyh*1doa28gkqu(ap*k4s5!q&oyo' #original one # SECRET_KEY = 'typgn#m(t#byxnp#ut@^gfqyh*1doa28gkqu(ap*k4s5!q&oyo' #original one
SECRET_KEY = '!3-gwi-1#5-4**85xb#z(t-8#ayc#*gguw4v4+fkax4037sp=)' # exported one SECRET_KEY = '!3-gwi-1#5-4**85xb#z(t-8#ayc#*gguw4v4+fkax4037sp=)' # exported one
# SECURITY WARNING: don't run with debug turned on in production! # SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True DEBUG = True
ALLOWED_HOSTS = [] ALLOWED_HOSTS = []
# Application definition # Application definition
INSTALLED_APPS = [ INSTALLED_APPS = [
...@@ -36,6 +34,7 @@ INSTALLED_APPS = [ ...@@ -36,6 +34,7 @@ INSTALLED_APPS = [
'AttendanceApp.apps.AttendanceappConfig', 'AttendanceApp.apps.AttendanceappConfig',
'MonitorLecturerApp.apps.MonitorlecturerappConfig', 'MonitorLecturerApp.apps.MonitorlecturerappConfig',
'LectureSummarizingApp.apps.LectureSummarizingAppConfig', 'LectureSummarizingApp.apps.LectureSummarizingAppConfig',
'corsheaders',
'django.contrib.admin', 'django.contrib.admin',
'django.contrib.auth', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.contenttypes',
...@@ -48,6 +47,7 @@ INSTALLED_APPS = [ ...@@ -48,6 +47,7 @@ INSTALLED_APPS = [
] ]
MIDDLEWARE = [ MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware', 'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware', 'django.middleware.common.CommonMiddleware',
...@@ -59,6 +59,9 @@ MIDDLEWARE = [ ...@@ -59,6 +59,9 @@ MIDDLEWARE = [
ROOT_URLCONF = 'integrated_slpes.urls' ROOT_URLCONF = 'integrated_slpes.urls'
# adding the CORS attributes
CORS_ALLOW_ALL_ORIGINS = True
TEMPLATES = [ TEMPLATES = [
{ {
'BACKEND': 'django.template.backends.django.DjangoTemplates', 'BACKEND': 'django.template.backends.django.DjangoTemplates',
...@@ -78,7 +81,6 @@ TEMPLATES = [ ...@@ -78,7 +81,6 @@ TEMPLATES = [
WSGI_APPLICATION = 'integrated_slpes.wsgi.application' WSGI_APPLICATION = 'integrated_slpes.wsgi.application'
# Database # Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
...@@ -93,7 +95,6 @@ DATABASES = { ...@@ -93,7 +95,6 @@ DATABASES = {
} }
} }
# Password validation # Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
...@@ -112,7 +113,6 @@ AUTH_PASSWORD_VALIDATORS = [ ...@@ -112,7 +113,6 @@ AUTH_PASSWORD_VALIDATORS = [
}, },
] ]
# Internationalization # Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/ # https://docs.djangoproject.com/en/2.2/topics/i18n/
...@@ -126,7 +126,6 @@ USE_L10N = True ...@@ -126,7 +126,6 @@ USE_L10N = True
USE_TZ = True USE_TZ = True
# Static files (CSS, JavaScript, Images) # Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/ # https://docs.djangoproject.com/en/2.2/howto/static-files/
...@@ -137,7 +136,6 @@ STATICFILES_DIRS = [ ...@@ -137,7 +136,6 @@ STATICFILES_DIRS = [
] ]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets') STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
# media files # media files
MEDIA_URL = '/media/' MEDIA_URL = '/media/'
...@@ -145,7 +143,9 @@ MEDIA_ROOT = os.path.join(BASE_DIR, 'media') ...@@ -145,7 +143,9 @@ MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# REST FRAMEWORK # REST FRAMEWORK
REST_FRAMEWORK = { REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [ # 'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated', # 'rest_framework.permissions.IsAuthenticated',
] # ]
} 'DEFAULT_AUTHENTICATION_CLASSES': [],
\ No newline at end of file 'DEFAULT_PERMISSION_CLASSES': []
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment