Commit d8f6824a authored by LiniEisha's avatar LiniEisha

Merge branch 'QA_RELEASE' into IT17100908

parents 51d22c43 a2d180df
...@@ -40,6 +40,15 @@ class Lecturer(models.Model): ...@@ -40,6 +40,15 @@ class Lecturer(models.Model):
return self.lecturer_id return self.lecturer_id
# admin model
class Admin(models.Model):
admin_id = models.CharField(max_length=10)
name = models.CharField(max_length=20)
email = models.EmailField()
def __str__(self):
return self.admin_id
# Lecturer_subject model # Lecturer_subject model
class LecturerSubject(models.Model): class LecturerSubject(models.Model):
lec_subject_id = models.CharField(max_length=10) lec_subject_id = models.CharField(max_length=10)
...@@ -56,6 +65,12 @@ class LecturerCredentials(models.Model): ...@@ -56,6 +65,12 @@ class LecturerCredentials(models.Model):
password = models.CharField(max_length=15) password = models.CharField(max_length=15)
# admin credential details
class AdminCredentialDetails(models.Model):
username = models.ForeignKey(Admin, on_delete=models.CASCADE)
password = models.CharField(max_length=15)
# timetable based on daily basis # timetable based on daily basis
class DailyTimeTable(models.Model): class DailyTimeTable(models.Model):
slot_id = models.AutoField(auto_created=True, primary_key=True) slot_id = models.AutoField(auto_created=True, primary_key=True)
...@@ -285,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model): ...@@ -285,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model):
# POSE section # POSE section
# lecture pose estimation # lecture gaze estimation
class LectureGazeEstimation(models.Model): class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10) lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE) lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
......
...@@ -13,3 +13,5 @@ admin.site.register(FacultyTimetable) ...@@ -13,3 +13,5 @@ admin.site.register(FacultyTimetable)
admin.site.register(LectureVideo) admin.site.register(LectureVideo)
admin.site.register(LectureActivity) admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation) admin.site.register(LectureGazeEstimation)
admin.site.register(Admin)
admin.site.register(AdminCredentialDetails)
\ No newline at end of file
from rest_framework.permissions import IsAuthenticated, IsAdminUser from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.authentication import SessionAuthentication, BasicAuthentication from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData
from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer
from .MongoModels import * from .MongoModels import *
from rest_framework.views import * from rest_framework.views import *
from .ImageOperations import saveImage from .ImageOperations import saveImage
...@@ -300,7 +302,6 @@ class LectureActivityProcess(APIView): ...@@ -300,7 +302,6 @@ class LectureActivityProcess(APIView):
LectureActivity( LectureActivity(
lecture_activity_id=new_lecture_activity_id, lecture_activity_id=new_lecture_activity_id,
lecture_video_id_id=lec_video_id, lecture_video_id_id=lec_video_id,
talking_perct=percentages['talking_perct'],
phone_perct=percentages['phone_perct'], phone_perct=percentages['phone_perct'],
listening_perct=percentages['listening_perct'], listening_perct=percentages['listening_perct'],
writing_perct=percentages['writing_perct'] writing_perct=percentages['writing_perct']
...@@ -473,16 +474,18 @@ class LectureEmotionProcess(APIView): ...@@ -473,16 +474,18 @@ class LectureEmotionProcess(APIView):
pass pass
def save_emotion_report(self, lec_video_id, percentages): def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id) lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last() last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id) new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
lecture_video_id = lec_video_data['id']
# creating a new lecture emotion report # creating a new lecture emotion report
LectureEmotionReport( LectureEmotionReport(
lecture_emotion_id=new_lecture_emotion_id, lecture_emotion_id=new_lecture_emotion_id,
lecture_video_id=lec_video, lecture_video_id_id=lecture_video_id,
happy_perct=percentages.happy_perct, happy_perct=percentages.happy_perct,
sad_perct=percentages.sad_perct, sad_perct=percentages.sad_perct,
angry_perct=percentages.angry_perct, angry_perct=percentages.angry_perct,
...@@ -511,8 +514,6 @@ class GetLectureEmotionReportViewSet(APIView): ...@@ -511,8 +514,6 @@ class GetLectureEmotionReportViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name') lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id) lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureEmotionSerializer(lecture_emotions, many=True) serializer = LectureEmotionSerializer(lecture_emotions, many=True)
...@@ -521,7 +522,6 @@ class GetLectureEmotionReportViewSet(APIView): ...@@ -521,7 +522,6 @@ class GetLectureEmotionReportViewSet(APIView):
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted
}) })
...@@ -685,17 +685,23 @@ class ProcessLectureGazeEstimation(APIView): ...@@ -685,17 +685,23 @@ class ProcessLectureGazeEstimation(APIView):
pass pass
def estimate_gaze(self, lec_video_id, percentages): def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id) lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last() last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id( new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id) last_lec_gaze.lecture_gaze_id)
new_lecture_gaze_primary_id = 1 if (last_lec_gaze is None) else int(last_lec_gaze.id) + 1
# get the video id
lecture_video_id = lec_video_data['id']
# creating a new lecture gaze estimation # creating a new lecture gaze estimation
LectureGazeEstimation( LectureGazeEstimation(
id=new_lecture_gaze_primary_id,
lecture_gaze_id=new_lecture_gaze_id, lecture_gaze_id=new_lecture_gaze_id,
lecture_video_id=lec_video, lecture_video_id_id=lecture_video_id,
looking_up_and_right_perct=percentages['head_up_right_perct'], looking_up_and_right_perct=percentages['head_up_right_perct'],
looking_up_and_left_perct=percentages['head_up_left_perct'], looking_up_and_left_perct=percentages['head_up_left_perct'],
looking_down_and_right_perct=percentages['head_down_right_perct'], looking_down_and_right_perct=percentages['head_down_right_perct'],
...@@ -722,8 +728,6 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -722,8 +728,6 @@ class GetLectureGazeEstimationViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name') lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = hge.getExtractedFrames(lecture_video_name)
lecture_gaze_estimations = LectureGazeEstimation.objects.filter( lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id) lecture_video_id__lecture_video_id=lecture_video_id)
...@@ -731,7 +735,6 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -731,7 +735,6 @@ class GetLectureGazeEstimationViewSet(APIView):
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted
}) })
...@@ -1270,3 +1273,178 @@ class GetLectureGazeSummary(APIView): ...@@ -1270,3 +1273,178 @@ class GetLectureGazeSummary(APIView):
"frame_group_percentages": frame_group_percentages, "frame_group_percentages": frame_group_percentages,
"gaze_labels": gaze_labels "gaze_labels": gaze_labels
}) })
# =====OTHERS=====
class GetLecturerRecordedVideo(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
subject = request.query_params.get('subject')
date = request.query_params.get('date')
# retrieve data
lec_recorded_video = LectureRecordedVideo.objects.filter(lecturer_id=lecturer, subject__subject_code=subject, lecturer_date=date)
lec_recorded_video_ser = LectureRecordedVideoSerializer(lec_recorded_video, many=True)
lec_recorded_video_data = lec_recorded_video_ser.data[0]
video_name = lec_recorded_video_data['lecture_video_name']
print('lecturer recorded video name: ', video_name)
return Response({
"video_name": video_name
})
# this API will get lecture activity correlations
class GetLectureActivityCorrelations(APIView):
def get(self, request):
option = request.query_params.get('option')
lecturer = request.query_params.get('lecturer')
int_option = int(option)
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
previous_date = current_date - option_date
individual_lec_activities = []
activity_correlations = []
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
if len(lec_activity) > 0:
isRecordFound = True
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
_, individual_lec_activities, _ = ar.get_student_activity_summary_for_period(activity_data)
# retrieving lecturer recorded activities
lec_recorded_activity = LecturerVideoMetaData.objects.filter(
lecturer_video_id__lecturer_date__gte=previous_date,
lecturer_video_id__lecturer_date__lte=current_date,
lecturer_video_id__lecturer=lecturer
)
if len(lec_recorded_activity) > 0:
lec_recorded_activity_ser = LecturerVideoMetaDataSerializer(lec_recorded_activity, many=True)
lec_recorded_activity_data = lec_recorded_activity_ser.data
activity_correlations = ar.get_activity_correlations(individual_lec_activities, lec_recorded_activity_data)
print('activity correlations: ', activity_correlations)
return Response({
"correlations": activity_correlations
})
# this API will get lecture emotion correlations
class GetLectureEmotionCorrelations(APIView):
def get(self, request):
option = request.query_params.get('option')
lecturer = request.query_params.get('lecturer')
int_option = int(option)
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
previous_date = current_date - option_date
individual_lec_emotions = []
emotion_correlations = []
# retrieving lecture activities
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture emotions
if len(lec_emotion) > 0:
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
_, individual_lec_emotions, _ = ed.get_student_emotion_summary_for_period(emotion_data)
# retrieving lecturer recorded activities
lec_recorded_activity = LecturerVideoMetaData.objects.filter(
lecturer_video_id__lecturer_date__gte=previous_date,
lecturer_video_id__lecturer_date__lte=current_date,
lecturer_video_id__lecturer=lecturer
)
# if there are any recorded lectures
if len(lec_recorded_activity) > 0:
lec_recorded_activity_ser = LecturerVideoMetaDataSerializer(lec_recorded_activity, many=True)
lec_recorded_activity_data = lec_recorded_activity_ser.data
emotion_correlations = ed.get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data)
return Response({
"correlations": emotion_correlations
})
# this API will get lecture gaze correlations
class GetLectureGazeCorrelations(APIView):
def get(self, request):
option = request.query_params.get('option')
lecturer = request.query_params.get('lecturer')
int_option = int(option)
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
previous_date = current_date - option_date
individual_lec_gaze = []
gaze_correlations = []
# retrieving lecture activities
lec_gaze = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are gaze estimations
if len(lec_gaze) > 0:
gaze_serializer = LectureGazeEstimationSerializer(lec_gaze, many=True)
gaze_data = gaze_serializer.data
_, individual_lec_gaze, _ = hge.get_student_gaze_estimation_summary_for_period(gaze_data)
# retrieving lecturer recorded activities
lec_recorded_activity = LecturerVideoMetaData.objects.filter(
lecturer_video_id__lecturer_date__gte=previous_date,
lecturer_video_id__lecturer_date__lte=current_date,
lecturer_video_id__lecturer=lecturer
)
# if there are any recorded lectures
if len(lec_recorded_activity) > 0:
lec_recorded_activity_ser = LecturerVideoMetaDataSerializer(lec_recorded_activity, many=True)
lec_recorded_activity_data = lec_recorded_activity_ser.data
# find the correlations between lecture gaze estimations and recorded lecture
gaze_correlations = hge.get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data)
return Response({
"correlations": gaze_correlations
})
...@@ -10,12 +10,13 @@ from .MongoModels import * ...@@ -10,12 +10,13 @@ from .MongoModels import *
from . models import VideoMeta from . models import VideoMeta
from . logic import custom_sorter as cs from . logic import custom_sorter as cs
from .logic import id_generator as ig from .logic import id_generator as ig
from .logic import activity_recognition as ar
from .logic import utilities as ut
# emotion recognition method
from .serializers import LectureEmotionSerializer from .serializers import LectureEmotionSerializer
import pandas as pd
# emotion recognition method
def emotion_recognition(classifier, face_classifier, image): def emotion_recognition(classifier, face_classifier, image):
label = "" label = ""
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
...@@ -47,7 +48,6 @@ def detect_emotion(video): ...@@ -47,7 +48,6 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
path = ''
meta_data = VideoMeta() meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
...@@ -65,6 +65,9 @@ def detect_emotion(video): ...@@ -65,6 +65,9 @@ def detect_emotion(video):
count_neutral = 0 count_neutral = 0
count_surprise = 0 count_surprise = 0
# for testing purposes
print('starting the emotion recognition process')
while (count_frames < frame_count): while (count_frames < frame_count):
# Grab a single frame of video # Grab a single frame of video
ret, frame = cap.read() ret, frame = cap.read()
...@@ -72,23 +75,7 @@ def detect_emotion(video): ...@@ -72,23 +75,7 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5) faces = face_classifier.detectMultiScale(gray,1.3,5)
label = emotion_recognition(classifier, face_classifier, frame)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
# counting the number of frames for each label, to calculate the percentage for each emotion later on... # counting the number of frames for each label, to calculate the percentage for each emotion later on...
...@@ -113,11 +100,9 @@ def detect_emotion(video): ...@@ -113,11 +100,9 @@ def detect_emotion(video):
elif (label == 'Surprise'): elif (label == 'Surprise'):
count_surprise += 1 count_surprise += 1
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3) # for testing purposes
# cv2.imwrite("".format(label, count), frame) print('emotion frame count: ', count_frames)
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
count_frames += 1 count_frames += 1
...@@ -132,6 +117,9 @@ def detect_emotion(video): ...@@ -132,6 +117,9 @@ def detect_emotion(video):
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
# for testing purposes
print('ending the emotion recognition process')
return meta_data return meta_data
...@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name): ...@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will # this method will
def get_frame_emotion_recognition(video_name): def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier( face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
frame_count = 0 frame_count = 0
...@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name): ...@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name):
# frame activity recognitions # frame activity recognitions
frame_emotion_recognitions = [] frame_emotion_recognitions = []
# # class labels # # class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
for frame in os.listdir(EXTRACTED_DIR):
# derive the frame folder path # for testing purposes
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) print('starting the emotion frame recognition process')
while (frame_count < no_of_frames):
ret, image = cap.read()
frame_name = "frame-{}".format(frame_count)
frame_details = {} frame_details = {}
frame_details['frame_name'] = frame frame_details['frame_name'] = frame_name
# initialize the count variables for a frame # initialize the count variables for a frame
happy_count = 0 happy_count = 0
...@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name): ...@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name):
neutral_count = 0 neutral_count = 0
surprise_count = 0 surprise_count = 0
# get the detections
detections = ar.person_detection(image, net)
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
for detections in os.listdir(FRAME_FOLDER): # if there are detections
if (len(detections) > 0):
# loop through the detections
for detection in detections:
# only take the images with the student name
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image) label = emotion_recognition(classifier, face_classifier, detection)
# checking for the label # checking for the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name): ...@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name):
# push to all the frame details # push to all the frame details
frame_emotion_recognitions.append(frame_details) frame_emotion_recognitions.append(frame_details)
else:
break
# for testing purposes
print('emotion frame recognition count: ', frame_count)
# increment the frame count
frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions) sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes
print('ending the emotion frame recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
...@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions): ...@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions):
# this method will retrieve activity frame groupings for a lecture # this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier( face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
...@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): while (frame_count < no_of_frames):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) # get the current frame
ret, image = cap.read()
# initializing the variables # initializing the variables
happy_count = 0 happy_count = 0
...@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0 neutral_count = 0
detection_count = 0 detection_count = 0
detections = ar.person_detection(image, net)
# if there are detections
if (len(detections) > 0):
# looping through the detections in each frame # looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER): for detection in detections:
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label # run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image) label = emotion_recognition(classifier, face_classifier, detection)
# increment the count based on the label # increment the count based on the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[frame_name]['neutral_count'] += neutral_count frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count frame_group_dict[frame_name]['detection_count'] += detection_count
else:
break
# for testing purposes
print('emotion frame groupings count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle some database operations # this section will handle some database operations
def save_frame_recognitions(video_name): def save_frame_recognitions(video_name):
# for testing purposes
print('starting the saving emotion frame recognition process')
# retrieve the lecture emotion id # retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name) lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True) lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
...@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name): ...@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name):
lec_emotion_frame_recognitions.save() lec_emotion_frame_recognitions.save()
# for testing purposes
print('ending the saving emotion frame recognition process')
# now return the frame recognitions # now return the frame recognitions
return frame_detections return frame_detections
...@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name): ...@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name):
# this method will save the emotion frame groupings to the database # this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving emotion frame grouoings process')
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict) frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db # save the frame group details into db
...@@ -631,5 +681,84 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -631,5 +681,84 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving emotion frame groupings process')
# save # save
new_lec_emotion_frame_groupings.save() new_lec_emotion_frame_groupings.save()
# this method will get emotion correlations
def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))]
# student activity labels
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
lecturer_activity_labels = ['seated', 'standing', 'walking']
# lecturer recorded data list (lecturer)
sitting_perct_list = []
standing_perct_list = []
walking_perct_list = []
# lecture activity data list (student)
happy_perct_list = []
sad_perct_list = []
angry_perct_list = []
surprise_perct_list = []
neutral_perct_list = []
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count']))
standing_perct_list.append(int(data['standing_count']))
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_emotions:
happy_perct_list.append(int(data['happy_perct']))
sad_perct_list.append(int(data['sad_perct']))
angry_perct_list.append(int(data['angry_perct']))
surprise_perct_list.append(int(data['surprise_perct']))
neutral_perct_list.append(int(data['neutral_perct']))
corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentEmotion = index[0] in student_emotion_labels
# check whether the second index is a lecturer activity
isLecturerAct = index[1] in lecturer_activity_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentEmotion & isLecturerAct:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
...@@ -58,3 +58,50 @@ class LecturerCredentialsForm(forms.ModelForm): ...@@ -58,3 +58,50 @@ class LecturerCredentialsForm(forms.ModelForm):
widgets = { widgets = {
'password': forms.PasswordInput() 'password': forms.PasswordInput()
} }
# admin login form
class AdminLoginForm(forms.Form):
# username = forms.CharField(max_length=100)
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
def clean(self):
# cleaned_username = self.cleaned_data.get('username')
cleaned_email = self.cleaned_data.get('email')
cleaned_password = self.cleaned_data.get('password')
admin = Admin.objects.get(email=cleaned_email)
# if an admin is already in the system
if (admin):
# retrieve the User object
user = User.objects.get(email=cleaned_email)
is_user = user.check_password(cleaned_password)
# if the password is correct
if (is_user):
# lec_credentials = LecturerCredentials.objects.filter(username_id=lecturer.id)
admin_credentials = AdminCredentialDetails.objects.get(username_id=admin.id)
print('credentials: ', admin_credentials)
# if lecture credentials are already created
if (admin_credentials):
admin_credentials.password = user.password
admin_credentials.save(force_update=True)
else:
LecturerCredentials(
username_id=admin.id,
password=user.password
).save()
else:
raise forms.ValidationError("Username or password is incorrect")
else:
print('the admin does not exist')
raise forms.ValidationError("The admin does not exist")
return super(AdminLoginForm, self).clean()
...@@ -9,6 +9,9 @@ from .custom_sorter import * ...@@ -9,6 +9,9 @@ from .custom_sorter import *
from ..MongoModels import * from ..MongoModels import *
from ..serializers import * from ..serializers import *
from . import id_generator as ig from . import id_generator as ig
from . import utilities as ut
import pandas as pd
def activity_recognition(video_path): def activity_recognition(video_path):
...@@ -50,38 +53,21 @@ def activity_recognition(video_path): ...@@ -50,38 +53,21 @@ def activity_recognition(video_path):
frame_count = 0 frame_count = 0
total_detections = 0 total_detections = 0
phone_checking_count = 0 phone_checking_count = 0
talking_count = 0
note_taking_count = 0 note_taking_count = 0
listening_count = 0 listening_count = 0
# video activity directory # for testing purposes
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path) print('starting the activity recognition process')
# creating the directory for the video
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
ret, image = video.read() ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size) image = cv2.resize(image, size)
detections = person_detection(image, net) detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # this is for testing purposes
# cv2.imwrite(FRAME_IMG, image) print('frame count: ', frame_count)
# if there are any person detections # if there are any person detections
if (len(detections) > 0): if (len(detections) > 0):
...@@ -90,6 +76,7 @@ def activity_recognition(video_path): ...@@ -90,6 +76,7 @@ def activity_recognition(video_path):
detection_count = 0 detection_count = 0
# looping through the person detections of the frame
for detection in detections: for detection in detections:
detection = cv2.resize(detection, size) detection = cv2.resize(detection, size)
...@@ -113,43 +100,33 @@ def activity_recognition(video_path): ...@@ -113,43 +100,33 @@ def activity_recognition(video_path):
elif (label == class_labels[2]): elif (label == class_labels[2]):
note_taking_count += 1 note_taking_count += 1
# saving the detection for the particular frame
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1 detection_count += 1
frame_count += 1 frame_count += 1
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label # calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0 phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0 # talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0 note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0 listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary # assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct percentages["phone_perct"] = phone_perct
percentages["talking_perct"] = talking_perct # percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct percentages["listening_perct"] = listening_perct
# for testing purposes
print('activity recognition process is over')
return percentages return percentages
def person_detection(image, net): def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
threshold = 0.2 threshold = 0.2
detected_person = [] detected_person = []
...@@ -391,14 +368,27 @@ def get_student_activity_evaluation(video_name): ...@@ -391,14 +368,27 @@ def get_student_activity_evaluation(video_name):
# recognize the activity for each frame # recognize the activity for each frame
def get_frame_activity_recognition(video_name): def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# load the model # class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam', model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
...@@ -407,45 +397,54 @@ def get_frame_activity_recognition(video_name): ...@@ -407,45 +397,54 @@ def get_frame_activity_recognition(video_name):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224) size = (224, 224)
# class labels # iteration
class_labels = ['Phone checking', 'Listening', 'Note taking'] video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0 frame_count = 0
# total_detections = 10
# frame activity recognitions # frame activity recognitions
frame_activity_recognitions = [] frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): while (frame_count < no_of_frames):
# define the count variables for each frame # define the count variables for each frame
phone_checking_count = 0 phone_checking_count = 0
listening_count = 0 listening_count = 0
note_taking_count = 0 note_taking_count = 0
ret, image = video.read()
# derive the frame folder path # derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) # FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
frame_name = "frame-{}".format(frame_count)
frame_details = {} frame_details = {}
frame_details['frame_name'] = frame frame_details['frame_name'] = frame_name
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
detected_percentages = [] detected_percentages = []
# loop through each detection in the frame detections = person_detection(image, net)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# check whether the image is not the frame itself # if there are detections
if "frame" not in detection: if (len(detections) > 0):
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size) # loop through each detection in the frame
for detection in detections:
image_array = np.asarray(image) detection = cv2.resize(detection, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array # Load the image into the array
...@@ -467,6 +466,7 @@ def get_frame_activity_recognition(video_name): ...@@ -467,6 +466,7 @@ def get_frame_activity_recognition(video_name):
# increment the detection count # increment the detection count
detection_count += 1 detection_count += 1
# calculating the percentages for the frame # calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0 phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0 listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
...@@ -480,13 +480,26 @@ def get_frame_activity_recognition(video_name): ...@@ -480,13 +480,26 @@ def get_frame_activity_recognition(video_name):
# push to all the frame details # push to all the frame details
frame_activity_recognitions.append(frame_details) frame_activity_recognitions.append(frame_details)
else:
break
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions) sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
# this method will retrieve individual student evaluation # this method will retrieve individual student evaluation
def get_individual_student_evaluation(video_name, student_name): def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
...@@ -753,6 +766,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -753,6 +766,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle saving activity entities to the database # this section will handle saving activity entities to the database
def save_frame_recognition(video_name): def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id # retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name) lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True) lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
...@@ -787,6 +804,9 @@ def save_frame_recognition(video_name): ...@@ -787,6 +804,9 @@ def save_frame_recognition(video_name):
lec_activity_frame_recognitions.save() lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections # now return the frame detections
return frame_detections return frame_detections
...@@ -794,6 +814,8 @@ def save_frame_recognition(video_name): ...@@ -794,6 +814,8 @@ def save_frame_recognition(video_name):
# this method will save the activity frame groupings to the database # this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving activity frame groupings process')
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks, frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict) frame_group_dict)
...@@ -825,5 +847,82 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -825,5 +847,82 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving activity frame groupings process')
# save # save
new_lec_activity_frame_groupings.save() new_lec_activity_frame_groupings.save()
# this method will get activity correlations
def get_activity_correlations(individual_lec_activities, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
data_index = ['lecture-{}'.format(i+1) for i in range(len(individual_lec_activities))]
# student activity labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
lecturer_activity_labels = ['seated', 'standing', 'walking']
# lecturer recorded data list (lecturer)
sitting_perct_list = []
standing_perct_list = []
walking_perct_list = []
# lecture activity data list (student)
phone_perct_list = []
listen_perct_list = []
note_perct_list = []
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count']))
standing_perct_list.append(int(data['standing_count']))
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_activities:
phone_perct_list.append(int(data['phone_perct']))
listen_perct_list.append(int(data['listening_perct']))
note_perct_list.append(int(data['writing_perct']))
corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentAct = index[0] in student_activity_labels
# check whether the second index is a lecturer activity
isLecturerAct = index[1] in lecturer_activity_labels
# if both are student and lecturer activities, add to the doctionary
if isStudentAct & isLecturerAct:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
...@@ -15,10 +15,12 @@ from . face_landmarks import get_landmark_model, detect_marks ...@@ -15,10 +15,12 @@ from . face_landmarks import get_landmark_model, detect_marks
import os import os
import shutil import shutil
import math import math
import pandas as pd
from ..MongoModels import * from ..MongoModels import *
from ..serializers import * from ..serializers import *
from . import id_generator as ig from . import id_generator as ig
from . import utilities as ut
def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val): def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):
...@@ -144,18 +146,10 @@ def process_gaze_estimation(video_path): ...@@ -144,18 +146,10 @@ def process_gaze_estimation(video_path):
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path)) VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze") GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze")
# create a folder with the same name as the video
VIDEO_DIR = os.path.join(GAZE_DIR, video_path)
# define a dictionary to return the percentage values # define a dictionary to return the percentage values
percentages = {} percentages = {}
# checking whether the video directory exist
if os.path.isdir(VIDEO_DIR):
shutil.rmtree(VIDEO_DIR)
# create the new directory
os.mkdir(VIDEO_DIR)
# load the face detection model # load the face detection model
face_model = get_face_detector() face_model = get_face_detector()
...@@ -202,6 +196,9 @@ def process_gaze_estimation(video_path): ...@@ -202,6 +196,9 @@ def process_gaze_estimation(video_path):
[0, 0, 1]], dtype="double" [0, 0, 1]], dtype="double"
) )
# for testing purposes
print('starting the gaze estimation process')
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -285,35 +282,39 @@ def process_gaze_estimation(video_path): ...@@ -285,35 +282,39 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
if isLookingDown & isLookingRight: if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1 head_down_right_count += 1
elif isLookingDown & isLookingLeft: elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1 head_down_left_count += 1
elif isLookingUp & isLookingRight: elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1 head_up_right_count += 1
elif isLookingUp & isLookingLeft: elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1 head_up_left_count += 1
elif isLookingFront: elif isLookingFront:
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1 head_front_count += 1
# indicate the student name # indicate the student name
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3) # cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count # increment the face count
face_count += 1 face_count += 1
# naming the new image # naming the new image
image_name = "frame-{}.png".format(frame_count) # image_name = "frame-{}.png".format(frame_count)
#
# new image path # # new image path
image_path = os.path.join(VIDEO_DIR, image_name) # image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image # save the new image
cv2.imwrite(image_path, img) # cv2.imwrite(image_path, img)
# for testing purposes
print('gaze estimation count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -323,8 +324,8 @@ def process_gaze_estimation(video_path): ...@@ -323,8 +324,8 @@ def process_gaze_estimation(video_path):
# after extracting the frames, save the changes to static content # after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w") # p = os.popen("python manage.py collectstatic", "w")
p.write("yes") # p.write("yes")
# calculate percentages # calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100 head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
...@@ -346,6 +347,9 @@ def process_gaze_estimation(video_path): ...@@ -346,6 +347,9 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows() cv2.destroyAllWindows()
cap.release() cap.release()
# for testing purposes
print('ending the gaze estimation process')
# return the dictionary # return the dictionary
return percentages return percentages
...@@ -370,7 +374,7 @@ def getExtractedFrames(lecture_video_name): ...@@ -370,7 +374,7 @@ def getExtractedFrames(lecture_video_name):
# this method will retrieve lecture gaze estimation for each frame # this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_esrimation_for_frames(video_name): def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory # get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
...@@ -422,6 +426,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -422,6 +426,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
[0, 0, 1]], dtype="double" [0, 0, 1]], dtype="double"
) )
# for testing purposes
print('starting the gaze estimation for frames process')
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -551,6 +559,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -551,6 +559,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# append the calculated percentages to the frame_detections # append the calculated percentages to the frame_detections
frame_detections.append(percentages) frame_detections.append(percentages)
# for testing purposes
print('gaze estimation frame recognition count: ', frame_count)
frame_count += 1 frame_count += 1
else: else:
...@@ -558,16 +569,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -558,16 +569,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
# return the details
return frame_detections, frame_rate return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period # this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# declare variables to add percentage values
looking_up_right_perct_combined = 0.0 looking_up_right_perct_combined = 0.0
looking_up_left_perct_combined = 0.0 looking_up_left_perct_combined = 0.0
looking_down_right_perct_combined = 0.0 looking_down_right_perct_combined = 0.0
...@@ -601,16 +613,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): ...@@ -601,16 +613,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# calculate the average percentages # calculate the average percentages
looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1) looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1) looking_up_left_average_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1) looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1)
looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1) looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1)
looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1) looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1)
percentages = {} percentages = {}
percentages["looking_up_and_right_perct"] = looking_up_right_average_perct percentages["looking_up_and_right_perct"] = looking_up_right_average_perct
percentages["looking_up_and_left_perct"] = looking_up_left_perct_combined percentages["looking_up_and_left_perct"] = looking_up_left_average_perct
percentages["looking_down_and_right_perct"] = looking_down_right_perct_combined percentages["looking_down_and_right_perct"] = looking_down_right_average_perct
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
...@@ -677,6 +689,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -677,6 +689,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# assign the difference # assign the difference
frame_group_diff[key] = diff if diff > 0 else 1 frame_group_diff[key] = diff if diff > 0 else 1
# for testing purposes
print('starting gaze frame grouping process')
# looping through the frames # looping through the frames
while True: while True:
...@@ -802,6 +816,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -802,6 +816,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict[frame_name]['detection_count'] += detection_count frame_group_dict[frame_name]['detection_count'] += detection_count
# for testing purposes
print('gaze frame groupings count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -848,12 +865,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -848,12 +865,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# define the labels # define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct'] labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# for testing purposes
print('ending gaze frame grouping process')
# return the dictionary # return the dictionary
return frame_group_dict, labels return frame_group_dict, labels
# this section will handle some database operations # this section will handle some database operations
def save_frame_detections(video_name): def save_frame_detections(video_name):
# for testing purposes
print('starting the saving gaze frame recognition process')
# retrieve the lecture emotion id # retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name) lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True) lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
...@@ -868,7 +893,7 @@ def save_frame_detections(video_name): ...@@ -868,7 +893,7 @@ def save_frame_detections(video_name):
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id) ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections # calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name) frame_detections, frame_rate = get_lecture_gaze_estimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings # to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = [] frame_recognition_details = []
...@@ -892,6 +917,9 @@ def save_frame_detections(video_name): ...@@ -892,6 +917,9 @@ def save_frame_detections(video_name):
lec_gaze_frame_recognitions.save() lec_gaze_frame_recognitions.save()
# for testing purposes
print('ending the saving gaze frame recognition process')
# now return the frame recognitions # now return the frame recognitions
return frame_detections return frame_detections
...@@ -899,6 +927,10 @@ def save_frame_detections(video_name): ...@@ -899,6 +927,10 @@ def save_frame_detections(video_name):
# this method will save gaze frame groupings to the database # this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving gaze frame groupings process')
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict) frame_group_dict)
...@@ -928,6 +960,83 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -928,6 +960,83 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving gaze frame groupings process')
# save # save
new_lec_gaze_frame_groupings.save() new_lec_gaze_frame_groupings.save()
# this method will get gaze estimation correlations
def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))]
# student gaze labels
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
lecturer_activity_labels = ['seated', 'standing', 'walking']
# lecturer recorded data list (lecturer)
sitting_perct_list = []
standing_perct_list = []
walking_perct_list = []
# lecture activity data list (student)
upright_perct_list = []
upleft_perct_list = []
downright_perct_list = []
downleft_perct_list = []
front_perct_list = []
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count']))
standing_perct_list.append(int(data['standing_count']))
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_gaze:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
downright_perct_list.append(int(data['looking_down_and_right_perct']))
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
front_perct_list.append(int(data['looking_front_perct']))
corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
'Down and Left': downleft_perct_list, 'Front': front_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentGaze = index[0] in student_gaze_labels
# check whether the second index is a lecturer activity
isLecturerAct = index[1] in lecturer_activity_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentGaze & isLecturerAct:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
return au_corr[0:n]
import os import os
import cv2 import cv2
import shutil import shutil
import datetime # import datetime
from datetime import timedelta
from FirstApp.MongoModels import * from FirstApp.MongoModels import *
from FirstApp.serializers import * from FirstApp.serializers import *
...@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name): ...@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name):
THRESHOLD_GAP = 5 THRESHOLD_GAP = 5
# calculating the real duration # calculating the real duration
real_duration = datetime.timedelta(seconds=(duration+THRESHOLD_GAP)) real_duration = timedelta(seconds=(duration))
# defines the number of seconds included for a frame group # defines the number of seconds included for a frame group
THRESHOLD_TIME = 10 THRESHOLD_TIME = 10
...@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name): ...@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name):
# loop through the threshold gap limit to define the time landmarks # loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP): for i in range(THRESHOLD_GAP):
initial_landmark += unit_gap initial_landmark += unit_gap
time_landmark = str(datetime.timedelta(seconds=initial_landmark)) time_landmark = str(timedelta(seconds=initial_landmark))
time_landmark_value = initial_landmark time_landmark_value = initial_landmark
time_landmarks.append(time_landmark) time_landmarks.append(time_landmark)
time_landmarks_values.append(time_landmark_value) time_landmarks_values.append(time_landmark_value)
...@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category): ...@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category):
# this section will handle some database operations # this section will handle some database operations
def save_time_landmarks(video_name): def save_time_landmarks(video_name):
# for testing purposes
print('starting the saving time landmarks process')
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last() last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \ new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id) ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
...@@ -233,12 +237,18 @@ def save_time_landmarks(video_name): ...@@ -233,12 +237,18 @@ def save_time_landmarks(video_name):
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
# for testing purposes
print('ending the saving time landmarks process')
new_lec_video_time_landmarks.save() new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database # this method will save frame landmarks to the database
def save_frame_landmarks(video_name): def save_frame_landmarks(video_name):
# for testing purposes
print('starting the saving frame landmarks process')
# retrieve the previous lecture video frame landmarks details # retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by( last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last() 'lecture_video_frame_landmarks_id').last()
...@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name): ...@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name):
new_lec_video_frame_landmarks.save() new_lec_video_frame_landmarks.save()
# for testing purposes
print('ending the saving frame landmarks process')
# now return the frame landmarks and the frame group dictionary # now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict return frame_landmarks, frame_group_dict
......
# Generated by Django 2.2.11 on 2020-10-20 16:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0014_lecturegazeframerecognitions'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin_id', models.CharField(max_length=10)),
('name', models.CharField(max_length=20)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='AdminCredentialDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=15)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Admin')),
],
),
migrations.DeleteModel(
name='LecturePoseEstimation',
),
]
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- 404 Error Text -->
<div class="text-center">
<div class="error mx-auto" data-text="404">401</div>
<p class="lead text-gray-800 mb-5">Unauthorized access</p>
<p class="text-gray-500 mb-0">It looks like you do not have access to this url</p>
<p class="text-gray-500 mb-0">Please login with the correct user type</p>
<a href="/logout">&larr; Back to Login Page</a>
</div>
</div>
{% endblock %}
<!--end of container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
...@@ -251,7 +251,6 @@ ...@@ -251,7 +251,6 @@
} }
//this function will handle the activity 'summary' button //this function will handle the activity 'summary' button
$('#activity_summary_btn').click(function (e) { $('#activity_summary_btn').click(function (e) {
...@@ -294,7 +293,6 @@ ...@@ -294,7 +293,6 @@
}); });
//this function will handle the retrieved activity frame group percentages //this function will handle the retrieved activity frame group percentages
function activityFrameGroupPercentages(response, e) { function activityFrameGroupPercentages(response, e) {
...@@ -357,7 +355,6 @@ ...@@ -357,7 +355,6 @@
} }
//this function will call the activity chart function //this function will call the activity chart function
function renderActivityChart(activity_labels) { function renderActivityChart(activity_labels) {
...@@ -486,7 +483,6 @@ ...@@ -486,7 +483,6 @@
} }
var chart = new CanvasJS.Chart("EmotionChartContainer", { var chart = new CanvasJS.Chart("EmotionChartContainer", {
animationEnabled: true, animationEnabled: true,
theme: "light2", theme: "light2",
...@@ -570,7 +566,6 @@ ...@@ -570,7 +566,6 @@
} }
var chart = new CanvasJS.Chart("GazeChartContainer", { var chart = new CanvasJS.Chart("GazeChartContainer", {
animationEnabled: true, animationEnabled: true,
theme: "light2", theme: "light2",
...@@ -609,7 +604,6 @@ ...@@ -609,7 +604,6 @@
} }
//this function will render the chart for Activity statistics //this function will render the chart for Activity statistics
function renderActivityStatistics() { function renderActivityStatistics() {
...@@ -626,7 +620,6 @@ ...@@ -626,7 +620,6 @@
]; ];
for (let i = 0; i < label_length; i++) { for (let i = 0; i < label_length; i++) {
let label = activity_labels[i]; let label = activity_labels[i];
...@@ -634,7 +627,7 @@ ...@@ -634,7 +627,7 @@
for (let j = 0; j < activity_length; j++) { for (let j = 0; j < activity_length; j++) {
let activity = individual_activities[j]; let activity = individual_activities[j];
datapoints.push({label: "lecture " + (j+1), y: activity[label]}); datapoints.push({label: "lecture " + (j + 1), y: activity[label]});
} }
...@@ -644,7 +637,7 @@ ...@@ -644,7 +637,7 @@
name: label, name: label,
markerType: "square", markerType: "square",
{#xValueFormatString: "DD MMM, YYYY",#} {#xValueFormatString: "DD MMM, YYYY",#}
xValueFormatString: "lec " + (i+1), xValueFormatString: "lec " + (i + 1),
color: getRandomColor(), color: getRandomColor(),
dataPoints: datapoints dataPoints: datapoints
}; };
...@@ -714,7 +707,7 @@ ...@@ -714,7 +707,7 @@
for (let j = 0; j < emotion_length; j++) { for (let j = 0; j < emotion_length; j++) {
let emotion = individual_emotions[j]; let emotion = individual_emotions[j];
datapoints.push({label: "lecture " + (j+1), y: emotion[label]}); datapoints.push({label: "lecture " + (j + 1), y: emotion[label]});
} }
let obj = { let obj = {
...@@ -723,7 +716,7 @@ ...@@ -723,7 +716,7 @@
name: label, name: label,
markerType: "square", markerType: "square",
{#xValueFormatString: "DD MMM, YYYY",#} {#xValueFormatString: "DD MMM, YYYY",#}
xValueFormatString: "Lec " + (i+1), xValueFormatString: "Lec " + (i + 1),
color: colors[i - 1], color: colors[i - 1],
dataPoints: datapoints dataPoints: datapoints
}; };
...@@ -740,7 +733,7 @@ ...@@ -740,7 +733,7 @@
axisX: { axisX: {
title: "Lecture", title: "Lecture",
{#valueFormatString: "DD MMM",#} {#valueFormatString: "DD MMM",#}
valueFormatString: "lec" , valueFormatString: "lec",
crosshair: { crosshair: {
enabled: true, enabled: true,
snapToDataPoint: true snapToDataPoint: true
...@@ -792,7 +785,7 @@ ...@@ -792,7 +785,7 @@
for (let j = 0; j < gaze_estimation_length; j++) { for (let j = 0; j < gaze_estimation_length; j++) {
let gaze_estimation = individual_gaze_estimations[j]; let gaze_estimation = individual_gaze_estimations[j];
datapoints.push({label: "lecture " + (j+1), y: gaze_estimation[label]}); datapoints.push({label: "lecture " + (j + 1), y: gaze_estimation[label]});
} }
let obj = { let obj = {
...@@ -801,7 +794,7 @@ ...@@ -801,7 +794,7 @@
name: label, name: label,
markerType: "square", markerType: "square",
{#xValueFormatString: "DD MMM, YYYY",#} {#xValueFormatString: "DD MMM, YYYY",#}
xValueFormatString: "Lec " + (i+1), xValueFormatString: "Lec " + (i + 1),
color: colors[i - 1], color: colors[i - 1],
dataPoints: datapoints dataPoints: datapoints
}; };
...@@ -818,7 +811,7 @@ ...@@ -818,7 +811,7 @@
axisX: { axisX: {
title: "Lecture", title: "Lecture",
{#valueFormatString: "DD MMM",#} {#valueFormatString: "DD MMM",#}
valueFormatString: "lec" , valueFormatString: "lec",
crosshair: { crosshair: {
enabled: true, enabled: true,
snapToDataPoint: true snapToDataPoint: true
...@@ -965,6 +958,235 @@ ...@@ -965,6 +958,235 @@
} }
//this function will handle the advanced analysis for activity
$('#activity_advanced_btn').click(function () {
$('#activity_advanced_modal').modal();
//enable the loader
$('#activity_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-activity-correlations/?lecturer=' + lecturer + '&option=' + option)
.then((res) => res.json())
.then((out) => displayActivityCorrelations(out.correlations))
.catch((err) => alert('error: ' + err));
});
//this function will handle the advanced analysis for emotion
$('#emotion_advanced_btn').click(function () {
$('#emotion_advanced_modal').modal();
//enable the loader
$('#emotion_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-emotion-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayEmotionCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this function will handle the advanced analysis for gaze
$('#gaze_advanced_btn').click(function () {
$('#gaze_advanced_modal').modal();
//enable the loader
$('#gaze_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-gaze-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayGazeCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will display the activity correlations in a table
function displayActivityCorrelations(correlations) {
let htmlString = "";
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#activity_corr_tbody').append(htmlString);
//hide the loader
$('#activity_corr_loader').hide();
//show the table
$('#activity_corr_table').attr('hidden', false);
}
//this method will display the emotion correlations in a table
function displayEmotionCorrelations(correlations) {
let htmlString = "";
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#emotion_corr_tbody').append(htmlString);
//hide the loader
$('#emotion_corr_loader').hide();
//show the table
$('#emotion_corr_table').attr('hidden', false);
}
//this method will display the activity correlations in a table
function displayGazeCorrelations(correlations) {
let htmlString = "";
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#gaze_corr_tbody').append(htmlString);
//hide the loader
$('#gaze_corr_loader').hide();
//show the table
$('#gaze_corr_table').attr('hidden', false);
}
}); });
</script> </script>
...@@ -1189,6 +1411,13 @@ ...@@ -1189,6 +1411,13 @@
</button> </button>
</div> </div>
<!-- end of stats button --> <!-- end of stats button -->
<!-- button to view advanced analysis -->
<button type="button" class="btn btn-danger float-right mr-2"
id="activity_advanced_btn">
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div> </div>
</div> </div>
<!-- end of Activity card --> <!-- end of Activity card -->
...@@ -1264,6 +1493,14 @@ ...@@ -1264,6 +1493,14 @@
</button> </button>
</div> </div>
<!-- end of stats button --> <!-- end of stats button -->
<!-- button to view advanced analysis -->
<button type="button" class="btn btn-danger float-right mr-2"
id="emotion_advanced_btn">
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div> </div>
</div> </div>
...@@ -1333,6 +1570,14 @@ ...@@ -1333,6 +1570,14 @@
</button> </button>
</div> </div>
<!-- end of stats button --> <!-- end of stats button -->
<!-- button to view advanced analysis -->
<button type="button" class="btn btn-danger float-right mr-2"
id="gaze_advanced_btn">
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div> </div>
</div> </div>
...@@ -1400,10 +1645,13 @@ ...@@ -1400,10 +1645,13 @@
<hr> <hr>
<!-- button to view activity summary --> <!-- button to view activity summary -->
<button type="button" class="btn btn-primary float-right" id="activity_summary_btn"> <button type="button" class="btn btn-primary float-right"
id="activity_summary_btn">
Summary Summary
</button> </button>
<!-- end of button to view activity summary --> <!-- end of button to view activity summary -->
</li> </li>
<!-- end of the activity list item --> <!-- end of the activity list item -->
...@@ -1469,10 +1717,13 @@ ...@@ -1469,10 +1717,13 @@
<hr> <hr>
<!-- button to view emotion summary --> <!-- button to view emotion summary -->
<button type="button" class="btn btn-primary float-right" id="emotion_summary_btn"> <button type="button" class="btn btn-primary float-right"
id="emotion_summary_btn">
Summary Summary
</button> </button>
<!-- end of button to view emotion summary --> <!-- end of button to view emotion summary -->
</li> </li>
<!-- end of the emotion list item --> <!-- end of the emotion list item -->
...@@ -1513,7 +1764,8 @@ ...@@ -1513,7 +1764,8 @@
<span class="float-right" id="looking_down_right_perct">50%</span> <span class="float-right" id="looking_down_right_perct">50%</span>
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-success" role="progressbar" id="looking_down_right_width" <div class="progress-bar bg-success" role="progressbar"
id="looking_down_right_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1544,7 +1796,8 @@ ...@@ -1544,7 +1796,8 @@
<!-- button to view gaze summary --> <!-- button to view gaze summary -->
<button type="button" class="btn btn-primary float-right" id="gaze_summary_btn"> <button type="button" class="btn btn-primary float-right"
id="gaze_summary_btn">
Summary Summary
</button> </button>
<!-- end of button to view gaze summary --> <!-- end of button to view gaze summary -->
...@@ -1554,7 +1807,6 @@ ...@@ -1554,7 +1807,6 @@
<!-- end of the gaze list item --> <!-- end of the gaze list item -->
</ul> </ul>
...@@ -1884,7 +2136,8 @@ ...@@ -1884,7 +2136,8 @@
</div> </div>
<div class="custom-control custom-radio mt-2"> <div class="custom-control custom-radio mt-2">
<input type="radio" class="custom-control-input" id="customRadio3" name="option" value="10000"> <input type="radio" class="custom-control-input" id="customRadio3" name="option"
value="10000">
<label class="custom-control-label" for="customRadio3">All</label> <label class="custom-control-label" for="customRadio3">All</label>
</div> </div>
...@@ -1955,7 +2208,8 @@ ...@@ -1955,7 +2208,8 @@
<!-- gaze estimation Modal--> <!-- gaze estimation Modal-->
<div class="modal fade" id="gaze_estimation_stats_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" <div class="modal fade" id="gaze_estimation_stats_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true"> aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 1400px"> <div class="modal-dialog" role="document" style="max-width: 1400px">
<div class="modal-content"> <div class="modal-content">
...@@ -1977,6 +2231,139 @@ ...@@ -1977,6 +2231,139 @@
<!-- end of activity statistics modal --> <!-- end of activity statistics modal -->
<!-- activity advanced analysis modal -->
<div class="modal fade" id="activity_advanced_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Activity Advanced Analysis</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Activity VS. Lecturer Activity</h3>
<!-- ajax loader -->
<div class="text-center" id="activity_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="activity_corr_table" hidden>
<thead>
<tr>
<th>Student Activity</th>
<th>Lecturer Activity</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="activity_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of activity advanced analysis modal -->
<!-- emotion advanced analysis modal -->
<div class="modal fade" id="emotion_advanced_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Emotion Advanced Analysis</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Emotions VS. Lecturer Activity</h3>
<!-- ajax loader -->
<div class="text-center" id="emotion_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="emotion_corr_table" hidden>
<thead>
<tr>
<th>Student Emotion</th>
<th>Lecturer Activity</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="emotion_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of emotion advanced analysis modal -->
<!-- gaze advanced analysis modal -->
<div class="modal fade" id="gaze_advanced_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Gaze Advanced Analysis</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Gaze estimation VS. Lecturer Activity</h3>
<!-- ajax loader -->
<div class="text-center" id="gaze_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="gaze_corr_table" hidden>
<thead>
<tr>
<th>Student Gaze estimation</th>
<th>Lecturer Activity</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="gaze_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of gaze advanced analysis modal -->
{% endblock %} {% endblock %}
<!--scripts--> <!--scripts-->
{% block 'scripts' %} {% block 'scripts' %}
......
...@@ -30,7 +30,11 @@ ...@@ -30,7 +30,11 @@
var global_video_name = ''; var global_video_name = '';
var global_lecturer_subject_index = 0; var global_lecturer_subject_index = 0;
var global_lecture_date = ''; var global_lecture_date = '';
var global_lecturer_video_name = '';
var lecturer_fps = 0;
;
//jquery //jquery
$(document).ready(function () { $(document).ready(function () {
...@@ -147,7 +151,6 @@ ...@@ -147,7 +151,6 @@
global_video_name = video.video_name; global_video_name = video.video_name;
if (lectureVideo.isActivityFound) { if (lectureVideo.isActivityFound) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>'; e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>';
} else { } else {
...@@ -174,13 +177,14 @@ ...@@ -174,13 +177,14 @@
fetch('http://127.0.0.1:8000/get-lecture-activity/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-activity/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
let frames = createFrames(out); {#let frames = createFrames(out);#}
return frames {#return frames#}
displayActivity(out);
}) })
.then((obj) => { .then((obj) => {
$('#video_frames').prepend(obj); {#$('#video_frames').prepend(obj);#}
$('#frame_loader').attr('hidden', true); $('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false); {#$('#slidecontainer').attr('hidden', false);#}
}) })
.catch((error) => alert('this is the error: ' + error)); .catch((error) => alert('this is the error: ' + error));
}); });
...@@ -234,571 +238,153 @@ ...@@ -234,571 +238,153 @@
} }
//this section is responsible for displaying the frames as video //to handle the 'integrate' modal
//creating the frame content $('#integrate_activity').click(function () {
function createFrames(res) { //define the student video src
let main_frame_content = "<div class='row' id='main_frames'>"; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
let len = image.detections.length;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + res.extracted[0].frame + "/" + res.extracted[0].detections[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + image.frame + "/" + image.detections[len - 1] + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayActivity(res);
return main_frame_content;
}
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 50);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
};
$(document).on('click', '.img-link', function (e) {
//removing previously displayed detections
$('.detections').remove();
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/'); //assign the video src
let len = img_src_arr.length; $('#student_video').attr('src', video_src);
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame //fetch the lecture recorded video name
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name) fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name)) .then((out) => assignLecturerRecordedVideoName(out))
.catch((error) => alert('this is an error')); .catch((err) => alert('error: ' + err));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
}
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number')); {#global_lecturer_video_name = "Test_1.mp4";#}
let label_name = $(this).attr('data-label'); {#global_lecturer_video_name = "Test_2.mp4";#}
{#global_lecturer_video_name = "Test_3.mp4";#}
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//appearing the loader
$('#activity_type').attr('hidden', false);
$('#activity_type_text').text(label_name);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api //fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label) fetch('http://127.0.0.1:8000/get-lecture-activity-for-frame?video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => createDetectedStudentFrames(out)) .then((out) => displayActivityRecognitionForFrame(out.response))
.catch((error) => alert('this is the error: ' + error)) .catch((err) => alert('error: ' + err));
});
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row m-3'>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>";
htmlString += "</div>";
htmlString += "</li>";
{#htmlString += "<div class='row m-3'></div>";#}
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
}); });
//disappearing the loader //assign the lecturer recorded video name
$('#detection_student_loader').attr('hidden', true); function assignLecturerRecordedVideoName(res) {
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing global_lecturer_video_name = res.video_name;
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image //assign the video src
$('#image_0_' + title).html($(selectedImage).html()); $('#lecturer_video').attr('src', lecturer_video_src);
}, 100);
}
//when the button is paused $('#integrate_modal').modal();
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
} }
}); //this function will load the activity recognition for frames
function displayActivityRecognitionForFrame(response) {
//this is to handle the 'evaluate' button //hide the loader
$('#evaluate_button').click(function () { $('#student_video_progress_loader').attr('hidden', true);
//show the progress bars
//hide the message $('#student_video_progress').attr('hidden', false);
$('#no_evaluated_student_content').attr('hidden', true);
//show the loader
$('#evaluate_student_loader').attr('hidden', false);
//using the fetch api
fetch('http://127.0.0.1:8000/get-lecture-activity-student-evaluation/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => evaluate_student(out))
.catch((error) => alert('this is the error: ' + error))
});
//to create html for evaluate function //creating the html string
function evaluate_student(response) {
let htmlString = ""; let htmlString = "";
//iterating through the student
response.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-evaluation-rows'>";
let student_count = 0; //creating the html string, iteratively
response.map((frame) => {
//iterating through the frames let frame_name = frame.frame_name;
response.response.map((frame) => { let phone_perct = Math.round(frame.phone_perct, 0);
let frame_detections = frame.detections; let listen_perct = Math.round(frame.listen_perct, 0);
let frame_detection_length = frame_detections.length; {#let listen_perct = Math.round(frame.listening_perct, 0);#}
let note_perct = Math.round(frame.note_perct, 0);
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_evaluation" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_evaluation" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
if (student_count === (frame_detection_length)) {
images += "<li class='list-group-item'>";
images += "<button type='button' class='btn btn-dark individual-evaluation' id='evaluate_student_" + title + "'>evaluate</button>";
images += "</li>";
}
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>"; //append to the html string
htmlString += "<li class='list-group-item'>"; //phone checking
htmlString += "<div class='row m-3'>"; htmlString += "<div class='progress_area' id='progress_" + frame_name + "' hidden>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>"; htmlString += "<h4 class='small font-weight-bold'>Phone checking</h4>";
htmlString += "</div>"; htmlString += "<span class='float-right' id='phone_checking_instant_" + frame_name + "'>" + phone_perct + "%</span>";
htmlString += "</li>"; htmlString += "<div class='progress mb-4'>";
{#htmlString += "<div class='row m-3'></div>";#} htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" + frame_name + "' style='width: " + phone_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_evaluation" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>"; htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
});
//disappearing the loader
$('#evaluate_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#evaluation_students').append(htmlString);
}
//interval variable for individual students
let studentEvaluationVar = null;
//playing the frames for each student evaluation
$(document).on('click', '.play-pause-icon-student-evaluations', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-evaluations";
let pause_class = "fas fa-pause play-pause-icon-student-evaluations";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon //note taking
let title_part = $(this).attr('id'); htmlString += "<h4 class='small font-weight-bold'>Writing</h4>";
let title = title_part.split("_")[1]; htmlString += "<span class='float-right' id='note_taking_instant_" + frame_name + "'>" + note_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" + frame_name + "' style='width: " + note_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
//handling the slider htmlString += "</div>";
let slider = document.getElementById("slider_evaluation" + title);
let output = document.getElementById("demo_evaluation" + title);
//when the button is playing
if (current_class === play_class) {
studentEvaluationVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_evaluation' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_evaluation' + title).html($(selectedImage).html());
}, 100); //listening
} htmlString += "<h4 class='small font-weight-bold'>Listening</h4>";
htmlString += "<span class='float-right' id='listening_instant_" + frame_name + "'>" + listen_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" + frame_name + "' style='width: " + listen_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//when the button is paused //ending the progress area
else if (current_class === pause_class) { htmlString += "</div>";
clearInterval(studentEvaluationVar);
}
}); });
//end of student evaluation video frame
//to evaluate the individual student
$(document).on('click', '.individual-evaluation', function (e) {
let individual_id = $(this).attr('id');
let student_name = individual_id.split('_')[2];
student_name += ".png";
let html = $(this).html();
//after clicking, change the html //append the html
$(this).html("<span class='font-italic'>loading...</span>"); $('#student_video_column').append(htmlString);
//fetching from the API //start retrieving lecturer activity frame recognition
fetch('http://127.0.0.1:8000/get-lecture-activity-individual-student-evaluation/?video_name=' + global_video_name + '&student_name=' + student_name) fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => displayIndividualStudentActivity(out.response, e, student_name)) .then((out) => displayLecturerActivityRecognitionForFrame(out))
.catch((error) => alert('something went wrong')); .catch((err) => alert('error: ' + err))
});
//this function will display the individual student emotions
function displayIndividualStudentActivity(res, e, title) {
let phone_perct = Math.round(res.phone_perct, 1);
let writing_perct = Math.round(res.writing_perct, 1);
let listening_perct = Math.round(res.listening_perct, 1);
//set the percentage values
//$('#talking_individual_perct').text(res.talking_perct + '%');
$('#phone_individual_perct').text(phone_perct + '%');
$('#writing_individual_perct').text(writing_perct + '%');
$('#listening_individual_perct').text(listening_perct + '%');
//set the width
//$('#talking_individual_width').width(res.talking_perct + '%');
$('#phone_individual_width').width(phone_perct + '%');
$('#writing_individual_width').width(writing_perct + '%');
$('#listening_individual_width').width(listening_perct + '%');
//open the student individual modal
$('#student_individual_modal').modal();
//set the button to default
e.target.innerHTML = "<span>evaluate</span>";
} }
//to handle the 'integrate' modal
$('#integrate_gaze').click(function () {
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
$('#integrate_modal').modal();
//fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-activity-for-frame?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => displayActivityRecognitionForFrame(out.response))
.catch((err) => alert('error: ' + err));
});
//this function will load the activity recognition for frames //this function will load the activity recognition for frames
function displayActivityRecognitionForFrame(response) { function displayLecturerActivityRecognitionForFrame(response) {
//hide the loader //hide the loader
$('#student_video_progress_loader').attr('hidden', true); $('#lecturer_video_progress_loader').attr('hidden', true);
//show the progress bars //show the progress bars
$('#student_video_progress').attr('hidden', false); $('#lecturer_video_progress').attr('hidden', false);
//creating the html string //creating the html string
let htmlString = ""; let htmlString = "";
let duration = 1000 / response.fps;
lecturer_fps = Math.round(duration, 0);
console.log('lecturer fps: ', lecturer_fps);
//creating the html string, iteratively //creating the html string, iteratively
response.map((frame) => { response.frame_recognitions.map((frame) => {
let frame_name = frame.frame_name; let frame_name = frame.frame_name;
let phone_perct = Math.round(frame.phone_perct, 0); let sitting_perct = Math.round(frame.sitting_perct, 0);
let listen_perct = Math.round(frame.listen_perct, 0); let standing_perct = Math.round(frame.standing_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#} {#let listen_perct = Math.round(frame.listening_perct, 0);#}
let note_perct = Math.round(frame.note_perct, 0); let walking_perct = Math.round(frame.walking_perct, 0);
//append to the html string //append to the html string
//phone checking //sitting
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>"; htmlString += "<div class='progress_area' id='progress_lecturer_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Phone checking</h4>"; htmlString += "<h4 class='small font-weight-bold'>Sitting</h4>";
htmlString += "<span class='float-right' id='phone_checking_instant_" +frame_name+ "'>" +phone_perct+ "%</span>"; htmlString += "<span class='float-right' id='sitting_instant_" + frame_name + "'>" + sitting_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" +frame_name+ "' style='width: " +phone_perct+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_" + frame_name + "' style='width: " + sitting_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//note taking //standing
htmlString += "<h4 class='small font-weight-bold'>Writing</h4>"; htmlString += "<h4 class='small font-weight-bold'>Standing</h4>";
htmlString += "<span class='float-right' id='note_taking_instant_" +frame_name+ "'>" +note_perct+ "%</span>"; htmlString += "<span class='float-right' id='standing_instant_" + frame_name + "'>" + standing_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" +frame_name+ "' style='width: " +note_perct+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='standing_instant_value_" + frame_name + "' style='width: " + standing_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//listening //walking
htmlString += "<h4 class='small font-weight-bold'>Listening</h4>"; htmlString += "<h4 class='small font-weight-bold'>Walking</h4>";
htmlString += "<span class='float-right' id='listening_instant_" +frame_name+ "'>" +listen_perct+ "%</span>"; htmlString += "<span class='float-right' id='walking_instant_" + frame_name + "'>" + walking_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" +frame_name+ "' style='width: " +listen_perct+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_" + frame_name + "' style='width: " + walking_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//ending the progress area //ending the progress area
...@@ -807,7 +393,8 @@ ...@@ -807,7 +393,8 @@
}); });
//append the html //append the html
$('#student_video_column').append(htmlString); $('#lecturer_video_column').append(htmlString);
} }
...@@ -815,19 +402,19 @@ ...@@ -815,19 +402,19 @@
//to handle the 'integrate' play button //to handle the 'integrate' play button
$('#play_integrate_button').click(function () { $('#play_integrate_button').click(function () {
let video = $('video')[0]; let video = $('video')[0];
let video1 = $('video')[1];
let test_video = document.getElementsByTagName('video')[0]; let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play'; let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause'; let pause_class = 'btn btn-outline-danger pause';
let count = 0; let count = 0;
let count_lecturer = 0;
let classes = $(this).attr('class'); let classes = $(this).attr('class');
let video_interval = setInterval(() => { let video_interval = setInterval(() => {
let talking_number = Math.round(Math.random() * 100, 0);
let phone_number = Math.round(Math.random() * 100, 0); //=====STUDENTS COLUMN=====
let note_number = Math.round(Math.random() * 100, 0);
let listening_number = Math.round(Math.random() * 100, 0);
//get the relevant progress area //get the relevant progress area
let progress_area = "progress_frame-" + count; let progress_area = "progress_frame-" + count;
...@@ -842,35 +429,53 @@ ...@@ -842,35 +429,53 @@
//replace the current progress area with the selected one //replace the current progress area with the selected one
$('#student_video_progress').html(progress_area_html); $('#student_video_progress').html(progress_area_html);
//increment the count //increment the count
count++; count++;
//setting the values console.log('current frame (student): ', count);
/*
$('#talking_instant').text(talking_number + '%');
$('#phone_checking_instant').text(phone_number + '%');
$('#note_taking_instant').text(note_number + '%');
$('#listening_instant').text(listening_number + '%');
//setting the width
$('#talking_instant_value').width(talking_number + '%');
$('#phone_checking_instant_value').width(phone_number + '%');
$('#note_taking_instant_value').width(note_number + '%');
$('#listening_instant_value').width(listening_number + '%');
*/
}, 33); }, 33);
let video_interval_lecturer = setInterval(() => {
//=====LECTURER COLUMN=====
//get the relevant progress area
let progress_area_lecturer = "progress_lecturer_frame-" + count_lecturer;
let progress_area_id_lecturer = "#" + progress_area_lecturer;
//find the corresponding progress area
let progress_area_html_lecturer = document.getElementById(progress_area_lecturer);
//display the retrieved progress area
$(progress_area_id_lecturer).attr('hidden', false);
//replace the current progress area with the selected one
$('#lecturer_video_progress').html(progress_area_html_lecturer);
//increment the count
count_lecturer++;
console.log('current frame (lecturer): ', count_lecturer);
}, lecturer_fps);
//check for the current class //check for the current class
if (classes === play_class) { if (classes === play_class) {
$(this).text('Pause'); $(this).text('Pause');
$(this).attr('class', pause_class); $(this).attr('class', pause_class);
video.play(); video.play();
video1.play();
} else if (classes === pause_class) { } else if (classes === pause_class) {
$(this).text('Play'); $(this).text('Play');
$(this).attr('class', play_class); $(this).attr('class', play_class);
video.pause(); video.pause();
video1.pause();
} }
//function to do when the video is paused //function to do when the video is paused
...@@ -880,7 +485,13 @@ ...@@ -880,7 +485,13 @@
video.onended = function (e) { video.onended = function (e) {
//stop changing the activity values //stop changing the activity values
clearInterval(video_interval); clearInterval(video_interval);
} };
//function to do when the lecturer video is ended
video1.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval_lecturer);
};
}); });
...@@ -893,7 +504,6 @@ ...@@ -893,7 +504,6 @@
$('#generate_report_message').hide(); $('#generate_report_message').hide();
fetch('http://127.0.0.1:8000/lecture-activity-report-generation/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date) fetch('http://127.0.0.1:8000/lecture-activity-report-generation/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
...@@ -908,7 +518,6 @@ ...@@ -908,7 +518,6 @@
}); });
}); });
</script> </script>
...@@ -931,11 +540,11 @@ ...@@ -931,11 +540,11 @@
{% load static %} {% load static %}
<!-- Page Heading --> <!-- Page Heading -->
{# <div class="d-sm-flex align-items-center justify-content-between mb-4">#} <div class="d-sm-flex align-items-center justify-content-between mb-4">
{# <h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>#} <h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>
{# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#} {# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#}
{# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#} {# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#}
{# </div>#} </div>
<!--first row --> <!--first row -->
...@@ -1084,25 +693,8 @@ ...@@ -1084,25 +693,8 @@
alt="Loader"> alt="Loader">
</div> </div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!--this area will display the progress bars --> <!--this area will display the progress bars -->
<div class="progress_area" hidden> <div class="progress_area mt-4" hidden>
<!--talking with friends --> <!--talking with friends -->
<a href="#" class="btn btn-link labels" data-number="1" <a href="#" class="btn btn-link labels" data-number="1"
data-label="talking-with-friends"> data-label="talking-with-friends">
...@@ -1113,7 +705,8 @@ ...@@ -1113,7 +705,8 @@
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="talking_width" id="talking_width"
style="width: 20%" style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--phone checking --> <!--phone checking -->
...@@ -1126,7 +719,8 @@ ...@@ -1126,7 +719,8 @@
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="phone_width" id="phone_width"
style="width: 40%" style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--note taking --> <!--note taking -->
...@@ -1138,7 +732,8 @@ ...@@ -1138,7 +732,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="writing_width" <div class="progress-bar" role="progressbar" id="writing_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--listening--> <!--listening-->
...@@ -1149,16 +744,16 @@ ...@@ -1149,16 +744,16 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="listening_width" style="width: 80%" id="listening_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div>
</div> </div>
<!--evaluate button -->
<button type="button" class="btn btn-danger float-right"
id="evaluate_button">Evaluate
</button>
</div> </div>
</div> </div>
<!--graph tab --> <!--graph tab -->
...@@ -1215,66 +810,6 @@ ...@@ -1215,66 +810,6 @@
</div> </div>
<!--2nd column -->
{# <div class="col-lg-6">#}
{# <!--card content -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#}
{# </div>#}
{##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#}
{# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# </div>#}
{# </div>#}
<!--detection person card -->
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by activity#}
{# type)</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#}
{# </div>#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{##}
{# </div>#}
{# </div>#}
{# </div>#}
<!--2nd column --> <!--2nd column -->
<div class="col-lg-6"> <div class="col-lg-6">
<!--card --> <!--card -->
...@@ -1292,7 +827,7 @@ ...@@ -1292,7 +827,7 @@
<!--button --> <!--button -->
<div class="text-right m-4"> <div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_gaze"> <button type="button" class="btn btn-outline-success" id="integrate_activity">
Process Process
</button> </button>
</div> </div>
...@@ -1310,32 +845,32 @@ ...@@ -1310,32 +845,32 @@
<!--1st column --> <!--1st column -->
<div class="col-lg-6"> <div class="col-lg-6">
{# <!--card -->#} {# <!--card -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header">#} {# <div class="card-header">#}
{# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#} {# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="card-body" id="evaluation_students">#} {# <div class="card-body" id="evaluation_students">#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_evaluated_student_content">#} {# <div class="text-center p-2" id="no_evaluated_student_content">#}
{# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#} {# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--the detection student loader -->#} {# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="evaluate_student_loader" hidden>#} {# <div class="text-center p-2" id="evaluate_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{# <!--end of student detection loader -->#} {# <!--end of student detection loader -->#}
{##} {##}
{##} {##}
{# </div>#} {# </div>#}
{##} {##}
{# </div>#} {# </div>#}
</div> </div>
...@@ -1508,7 +1043,8 @@ ...@@ -1508,7 +1043,8 @@
</div> </div>
</div> </div>
<div class="modal-footer"> <div class="modal-footer">
<button type="button" class="btn btn-primary" data-dismiss="modal" id="generate_report_btn">Yes</button> <button type="button" class="btn btn-primary" data-dismiss="modal" id="generate_report_btn">Yes
</button>
<button type="button" class="btn btn-danger" data-dismiss="modal">No</button> <button type="button" class="btn btn-danger" data-dismiss="modal">No</button>
</div> </div>
</div> </div>
...@@ -1558,7 +1094,7 @@ ...@@ -1558,7 +1094,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="talking_instant_value" id="talking_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1569,7 +1105,7 @@ ...@@ -1569,7 +1105,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="phone_checking_instant_value" id="phone_checking_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1580,7 +1116,7 @@ ...@@ -1580,7 +1116,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" <div class="progress-bar" role="progressbar"
id="note_taking_instant_value" id="note_taking_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1591,7 +1127,7 @@ ...@@ -1591,7 +1127,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="listening_instant_value" id="listening_instant_value"
{# style="width: 80%"#} {# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1601,24 +1137,74 @@ ...@@ -1601,24 +1137,74 @@
<!--end of 1st column --> <!--end of 1st column -->
<!--2nd column --> <!--2nd column -->
<div class="col-md-6"> <div class="col-md-6" id="lecturer_video_column">
<div class="text-center"> <div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span> <span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div> </div>
<!--display lecture video --> <!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section"> <div class="text-center m-3" id="lecturer_video_section">
<!--temporary text --> {# <!--temporary text -->#}
<div class="text-center" id="temp_lecturer_text"> {# <div class="text-center" id="temp_lecturer_text">#}
<span class="font-italic">No video was found</span> {# <span class="font-italic">No video was found</span>#}
{# </div>#}
<!--display lecturer video -->
<div class="text-center m-3" id="lecturer_video_section">
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
</div> </div>
<!--end of lecturer video section -->
<!-- ajax loader section -->
<div class="text-center mt-3" id="lecturer_video_progress_loader">
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
</div>
<!--progress bar section -->
<div class="progress_area" id="lecturer_video_progress" hidden>
<!--sitting -->
<h4 class="small font-weight-bold">Sitting</h4>
<span class="float-right" id="sitting_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="sitting_instant_value"
{# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--standing -->
<h4 class="small font-weight-bold">Standing</h4>
<span class="float-right" id="standing_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar"
id="standing_instant_value"
{# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--walking-->
<h4 class="small font-weight-bold">Walking</h4>
<span class="float-right" id="walking_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="walking_instant_value"
{# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
<!-- video -->
{# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div> </div>
<!--end of lecture video section --> <!--end of lecture video section -->
......
{% load static %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block">
<img src="{% static 'FirstApp/images/admin.jpg' %}" width="400" height="600"
alt="No image">
</div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Welcome Back!</h1>
</div>
<!--form -->
<form action="/process-admin-login" method="POST" name="loginForm" class="user">
{% csrf_token %}
<div class="form-group">
<input type="email" name="email" class="form-control form-control-user"
id="exampleInputEmail" aria-describedby="emailHelp"
placeholder="Enter Email Address...">
</div>
<div class="form-group">
<input type="password" name="password" class="form-control form-control-user"
id="exampleInputPassword" placeholder="Password">
<div class="alert alert-danger m-4">{{ message }}</div>
</div>
<div class="form-group">
<div class="custom-control custom-checkbox small">
<input type="checkbox" class="custom-control-input" id="customCheck">
<label class="custom-control-label" for="customCheck">Remember Me</label>
</div>
</div>
<button type="submit" class="btn btn-primary btn-user btn-block">Login</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
var global_lecture_video_id = ''; var global_lecture_video_id = '';
var global_video_name = ''; var global_video_name = '';
var global_lecturer_subject_index = 0; var global_lecturer_subject_index = 0;
var global_lecturer_video_name = '';
var lecturer_fps = 0;
//jquery //jquery
$(document).ready(function () { $(document).ready(function () {
...@@ -170,13 +172,14 @@ ...@@ -170,13 +172,14 @@
fetch('http://127.0.0.1:8000/get-lecture-emotion/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-emotion/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
let frames = createFrames(out); {#let frames = createFrames(out);#}
return frames {#return frames#}
displayActivity(out)
}) })
.then((obj) => { .then((obj) => {
$('#video_frames').prepend(obj); {#$('#video_frames').prepend(obj);#}
$('#frame_loader').attr('hidden', true); $('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false); {#$('#slidecontainer').attr('hidden', false);#}
}) })
.catch((error) => alert('this is the error: ' + error)); .catch((error) => alert('this is the error: ' + error));
}); });
...@@ -216,6 +219,7 @@ ...@@ -216,6 +219,7 @@
//to handle the 'btn-success' (process) button //to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) { $(document).on('click', '.btn-success', function (e) {
//sending the POST request to process the lecture activities //sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id) fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json()) .then((res) => res.json())
...@@ -232,516 +236,25 @@ ...@@ -232,516 +236,25 @@
} }
//this section is responsible for displaying the frames as video
//creating the frame content
function createFrames(res) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
let len = image.detections.length;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + res.extracted[0].frame + "/" + res.extracted[0].detections[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + image.frame + "/" + image.detections[len - 1] + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayActivity(res);
return main_frame_content;
}
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 50);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
};
$(document).on('click', '.img-link', function (e) {
//removing previously displayed detections
$('.detections').remove();
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/');
let len = img_src_arr.length;
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name)
.then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name))
.catch((error) => alert('this is an error'));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
}
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number'));
let label_name = $(this).attr('data-label');
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//appearing the loader
$('#activity_type').attr('hidden', false);
$('#activity_type_text').text(label_name);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label)
.then((res) => res.json())
.then((out) => createDetectedStudentFrames(out))
.catch((error) => alert('this is the error: ' + error))
});
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<h6 class='font-italic'>" + title + "</h6>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
});
//disappearing the loader
$('#detection_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection (by label)
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
}
});
//this is to handle the 'evaluate' button
$('#evaluate_button').click(function () {
//hide the message
$('#no_evaluated_student_content').attr('hidden', true);
//show the loader
$('#evaluate_student_loader').attr('hidden', false);
//using the fetch api
fetch('http://127.0.0.1:8000/get-lecture-emotion-student-evaluation/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => evaluate_student(out))
.catch((error) => alert('this is the error: ' + error))
});
//to create html for evaluate function
function evaluate_student(response) {
let htmlString = "";
//iterating through the student
response.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-evaluation-rows'>";
let student_count = 0;
//iterating through the frames
response.response.map((frame) => {
let frame_detections = frame.detections;
let frame_detection_length = frame_detections.length;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_evaluation" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_evaluation" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
if (student_count === (frame_detection_length)) {
images += "<li class='list-group-item'>";
images += "<button type='button' class='btn btn-dark individual-evaluation' id='evaluate_student_" + title + "'>evaluate</button>";
images += "</li>";
}
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row m-3'>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>";
htmlString += "</div>";
htmlString += "</li>";
{#htmlString += "<div class='row m-3'></div>";#}
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_evaluation" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
});
//disappearing the loader
$('#evaluate_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#evaluation_students').append(htmlString);
}
let studentEvaluationVar = null;
//playing the frames for each student evaluation
$(document).on('click', '.play-pause-icon-student-evaluations', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-evaluations";
let pause_class = "fas fa-pause play-pause-icon-student-evaluations";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_evaluation" + title);
let output = document.getElementById("demo_evaluation" + title);
//when the button is playing
if (current_class === play_class) {
studentEvaluationVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_evaluation' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_evaluation' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentEvaluationVar);
}
});
//to evaluate the individual student
$(document).on('click', '.individual-evaluation', function (e) {
let individual_id = $(this).attr('id');
let student_name = individual_id.split('_')[2];
student_name += ".png";
let html = $(this).html();
//after clicking, change the html
$(this).html("<span class='font-italic'>loading...</span>");
//fetching from the API
fetch('http://127.0.0.1:8000/get-lecture-emotion-individual-student-evaluation/?video_name=' + global_video_name + '&student_name=' + student_name)
.then((res) => res.json())
.then((out) => displayIndividualStudentEmotion(out.response, e, student_name))
.catch((error) => alert('something went wrong'));
//after 5 seconds, replace with the original html
/*
setTimeout(() => {
$(this).html(html);
//open the student individual modal
$('#student_individual_modal').modal();
}, 5000);
*/
});
//this function will display the individual student emotions
function displayIndividualStudentEmotion(res, e, title) {
//set the percentage values
$('#happy_individual_perct').text(res.happy_perct + '%');
$('#sad_individual_perct').text(res.sad_perct + '%');
$('#anger_individual_perct').text(res.angry_perct + '%');
$('#surprise_individual_perct').text(res.surprise_perct + '%');
$('#neutral_individual_perct').text(res.neutral_perct + '%');
//set the width
$('#happy_individual_width').width(res.happy_perct + '%');
$('#sad_individual_width').width(res.sad_perct + '%');
$('#anger_individual_width').width(res.angry_perct + '%');
$('#surprise_individual_width').width(res.surprise_perct + '%');
$('#neutral_individual_width').width(res.neutral_perct + '%');
//open the student individual modal
$('#student_individual_modal').modal();
//set the button to default
e.target.innerHTML = "<span>evaluate</span>";
}
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_activity').click(function () { $('#integrate_emotion').click(function () {
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src //assign the video src
$('#student_video').attr('src', video_src); $('#student_video').attr('src', video_src);
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal(); $('#integrate_modal').modal();
...@@ -753,7 +266,6 @@ ...@@ -753,7 +266,6 @@
.catch((err) => alert('error: ' + err)); .catch((err) => alert('error: ' + err));
}); });
...@@ -779,39 +291,39 @@ ...@@ -779,39 +291,39 @@
//append to the html string //append to the html string
//Happy //Happy
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>"; htmlString += "<div class='progress_area' id='progress_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Happy</h4>"; htmlString += "<h4 class='small font-weight-bold'>Happy</h4>";
htmlString += "<span class='float-right' id='happy_instant_" +frame_name+ "'>" +happy_perct+ "%</span>"; htmlString += "<span class='float-right' id='happy_instant_" + frame_name + "'>" + happy_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" +frame_name+ "' style='width: " +happy_perct+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" + frame_name + "' style='width: " + happy_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Sad //Sad
htmlString += "<h4 class='small font-weight-bold'>Sad</h4>"; htmlString += "<h4 class='small font-weight-bold'>Sad</h4>";
htmlString += "<span class='float-right' id='note_taking_instant_" +frame_name+ "'>" +sad_perct+ "%</span>"; htmlString += "<span class='float-right' id='note_taking_instant_" + frame_name + "'>" + sad_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" +frame_name+ "' style='width: " +sad_perct+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" + frame_name + "' style='width: " + sad_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Angry //Angry
htmlString += "<h4 class='small font-weight-bold'>Angry</h4>"; htmlString += "<h4 class='small font-weight-bold'>Angry</h4>";
htmlString += "<span class='float-right' id='listening_instant_" +frame_name+ "'>" +angry_perct+ "%</span>"; htmlString += "<span class='float-right' id='listening_instant_" + frame_name + "'>" + angry_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" +frame_name+ "' style='width: " +angry_perct+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" + frame_name + "' style='width: " + angry_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Neutral //Neutral
htmlString += "<h4 class='small font-weight-bold'>Neutral</h4>"; htmlString += "<h4 class='small font-weight-bold'>Neutral</h4>";
htmlString += "<span class='float-right' id='note_taking_instant_" +frame_name+ "'>" +neutral_perct+ "%</span>"; htmlString += "<span class='float-right' id='note_taking_instant_" + frame_name + "'>" + neutral_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" +frame_name+ "' style='width: " +neutral_perct+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" + frame_name + "' style='width: " + neutral_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Surprise //Surprise
htmlString += "<h4 class='small font-weight-bold'>Surprise</h4>"; htmlString += "<h4 class='small font-weight-bold'>Surprise</h4>";
htmlString += "<span class='float-right' id='listening_instant_" +frame_name+ "'>" +surprise_perct+ "%</span>"; htmlString += "<span class='float-right' id='listening_instant_" + frame_name + "'>" + surprise_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" +frame_name+ "' style='width: " +surprise_perct+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" + frame_name + "' style='width: " + surprise_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//ending the progress area //ending the progress area
...@@ -821,25 +333,92 @@ ...@@ -821,25 +333,92 @@
//append the html //append the html
$('#student_video_column').append(htmlString); $('#student_video_column').append(htmlString);
//start retrieving lecturer activity frame recognition
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err))
}
//this function will load the activity recognition for frames
function displayLecturerEmotionRecognitionForFrame(response) {
//hide the loader
$('#lecturer_video_progress_loader').attr('hidden', true);
//show the progress bars
$('#lecturer_video_progress').attr('hidden', false);
//creating the html string
let htmlString = "";
let duration = 1000 / response.fps;
lecturer_fps = Math.round(duration, 0);
console.log('lecturer fps: ', lecturer_fps);
//creating the html string, iteratively
response.frame_recognitions.map((frame) => {
let frame_name = frame.frame_name;
let sitting_perct = Math.round(frame.sitting_perct, 0);
let standing_perct = Math.round(frame.standing_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let walking_perct = Math.round(frame.walking_perct, 0);
//append to the html string
//sitting
htmlString += "<div class='progress_area' id='progress_lecturer_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Sitting</h4>";
htmlString += "<span class='float-right' id='sitting_instant_" + frame_name + "'>" + sitting_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_" + frame_name + "' style='width: " + sitting_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//standing
htmlString += "<h4 class='small font-weight-bold'>Standing</h4>";
htmlString += "<span class='float-right' id='standing_instant_" + frame_name + "'>" + standing_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='standing_instant_value_" + frame_name + "' style='width: " + standing_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//walking
htmlString += "<h4 class='small font-weight-bold'>Walking</h4>";
htmlString += "<span class='float-right' id='walking_instant_" + frame_name + "'>" + walking_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_" + frame_name + "' style='width: " + walking_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//ending the progress area
htmlString += "</div>";
});
//append the html
$('#lecturer_video_column').append(htmlString);
} }
//to handle the 'integrate' play button //to handle the 'integrate' play button
$('#play_integrate_button').click(function () { $('#play_integrate_button').click(function () {
let video = $('video')[0]; let video = $('video')[0];
let video1 = $('video')[1];
let test_video = document.getElementsByTagName('video')[0]; let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play'; let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause'; let pause_class = 'btn btn-outline-danger pause';
let count = 0; let count = 0;
let count_lecturer = 0;
let classes = $(this).attr('class'); let classes = $(this).attr('class');
let video_interval = setInterval(() => { let video_interval = setInterval(() => {
let talking_number = Math.round(Math.random() * 100, 0);
let phone_number = Math.round(Math.random() * 100, 0); //=====STUDENTS COLUMN=====
let note_number = Math.round(Math.random() * 100, 0);
let listening_number = Math.round(Math.random() * 100, 0);
//get the relevant progress area //get the relevant progress area
let progress_area = "progress_frame-" + count; let progress_area = "progress_frame-" + count;
...@@ -857,32 +436,49 @@ ...@@ -857,32 +436,49 @@
//increment the count //increment the count
count++; count++;
//setting the values
/*
$('#talking_instant').text(talking_number + '%');
$('#phone_checking_instant').text(phone_number + '%');
$('#note_taking_instant').text(note_number + '%');
$('#listening_instant').text(listening_number + '%');
//setting the width }, 33);
$('#talking_instant_value').width(talking_number + '%');
$('#phone_checking_instant_value').width(phone_number + '%');
$('#note_taking_instant_value').width(note_number + '%'); let video_interval_lecturer = setInterval(() => {
$('#listening_instant_value').width(listening_number + '%');
//=====LECTURER COLUMN=====
//get the relevant progress area
let progress_area_lecturer = "progress_lecturer_frame-" + count_lecturer;
let progress_area_id_lecturer = "#" + progress_area_lecturer;
//find the corresponding progress area
let progress_area_html_lecturer = document.getElementById(progress_area_lecturer);
//display the retrieved progress area
$(progress_area_id_lecturer).attr('hidden', false);
//replace the current progress area with the selected one
$('#lecturer_video_progress').html(progress_area_html_lecturer);
//increment the count
count_lecturer++;
console.log('current frame (lecturer): ', count_lecturer);
}, lecturer_fps);
*/
}, 1000);
//check for the current class //check for the current class
if (classes === play_class) { if (classes === play_class) {
$(this).text('Pause'); $(this).text('Pause');
$(this).attr('class', pause_class); $(this).attr('class', pause_class);
video.play(); video.play();
video1.play();
} else if (classes === pause_class) { } else if (classes === pause_class) {
$(this).text('Play'); $(this).text('Play');
$(this).attr('class', play_class); $(this).attr('class', play_class);
video.pause(); video.pause();
video1.pause();
} }
//function to do when the video is paused //function to do when the video is paused
...@@ -892,6 +488,12 @@ ...@@ -892,6 +488,12 @@
video.onended = function (e) { video.onended = function (e) {
//stop changing the activity values //stop changing the activity values
clearInterval(video_interval); clearInterval(video_interval);
};
//function to do when the video is ended
video1.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval_lecturer);
} }
}); });
...@@ -1063,31 +665,15 @@ ...@@ -1063,31 +665,15 @@
<!--temporary text --> <!--temporary text -->
<span class="font-italic" id="temporary_text">Frame will be displayed here</span> <span class="font-italic" id="temporary_text">Frame will be displayed here</span>
<!--loading buffer area--> <!--loading buffer area-->
<div class="text-center" id="frame_loader" hidden> <div class="text-center" id="frame_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader"> alt="Loader">
</div> </div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!--this area will display the progress bars --> <!--this area will display the progress bars -->
<div class="progress_area" hidden> <div class="progress_area mt-4" hidden>
<!--Happy --> <!--Happy -->
<a href="#" class="btn btn-link labels" data-number="1" <a href="#" class="btn btn-link labels" data-number="1"
data-label="Happy"> data-label="Happy">
...@@ -1098,7 +684,8 @@ ...@@ -1098,7 +684,8 @@
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="happy_width" id="happy_width"
style="width: 20%" style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--sad --> <!--sad -->
...@@ -1111,7 +698,8 @@ ...@@ -1111,7 +698,8 @@
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="sad_width" id="sad_width"
style="width: 40%" style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--anger --> <!--anger -->
...@@ -1123,7 +711,8 @@ ...@@ -1123,7 +711,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="anger_width" <div class="progress-bar" role="progressbar" id="anger_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--surprise--> <!--surprise-->
...@@ -1135,7 +724,8 @@ ...@@ -1135,7 +724,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="surprise_width" style="width: 80%" id="surprise_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--neutral--> <!--neutral-->
...@@ -1147,16 +737,17 @@ ...@@ -1147,16 +737,17 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="neutral_width" style="width: 80%" id="neutral_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
{# <!--evaluate button -->#}
{# <button type="button" class="btn btn-danger float-right"#}
{# id="evaluate_button">Evaluate#}
{# </button>#}
</div> </div>
</div>
</div> </div>
<!--graph tab --> <!--graph tab -->
...@@ -1214,63 +805,63 @@ ...@@ -1214,63 +805,63 @@
<!--2nd column --> <!--2nd column -->
{# <div class="col-lg-6">#} {# <div class="col-lg-6">#}
{# <!--card content -->#} {# <!--card content -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header py-3">#} {# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#} {# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#} {# <div class="text-center p-4" id="detection_frames">#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#} {# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#} {# <span class="font-italic">No frame is selected</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#} {# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#} {# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#} {# </div>#}
{# <!--the detection loader -->#} {# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#} {# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--detection person card -->#} {# <!--detection person card -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header py-3">#} {# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by emotion#} {# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by emotion#}
{# type)</h5>#} {# type)</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#} {# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#} {# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#} {# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#} {# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#} {# </p>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#} {# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#} {# <span class="font-italic">No activity type is selected</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--the detection student loader -->#} {# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#} {# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{##} {##}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
</div> </div>
...@@ -1280,36 +871,36 @@ ...@@ -1280,36 +871,36 @@
<div class="row p-2"> <div class="row p-2">
<!--1st column --> <!--1st column -->
{# <div class="col-lg-6">#} {# <div class="col-lg-6">#}
{# <!--card -->#} {# <!--card -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header">#} {# <div class="card-header">#}
{# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#} {# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="card-body" id="evaluation_students">#} {# <div class="card-body" id="evaluation_students">#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_evaluated_student_content">#} {# <div class="text-center p-2" id="no_evaluated_student_content">#}
{# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#} {# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--the detection student loader -->#} {# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="evaluate_student_loader" hidden>#} {# <div class="text-center p-2" id="evaluate_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{# <!--end of student detection loader -->#} {# <!--end of student detection loader -->#}
{##} {##}
{##} {##}
{# </div>#} {# </div>#}
{##} {##}
{# </div>#} {# </div>#}
{##} {##}
{##} {##}
{# </div>#} {# </div>#}
<!--end of 1st column --> <!--end of 1st column -->
...@@ -1330,7 +921,7 @@ ...@@ -1330,7 +921,7 @@
<!--button --> <!--button -->
<div class="text-right m-4"> <div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_activity"> <button type="button" class="btn btn-outline-success" id="integrate_emotion">
Process Process
</button> </button>
</div> </div>
...@@ -1529,7 +1120,7 @@ ...@@ -1529,7 +1120,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="happy_instant_value" id="happy_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1540,7 +1131,7 @@ ...@@ -1540,7 +1131,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="sad_instant_value" id="sad_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1551,7 +1142,7 @@ ...@@ -1551,7 +1142,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" <div class="progress-bar" role="progressbar"
id="angry_instant_value" id="angry_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1562,7 +1153,7 @@ ...@@ -1562,7 +1153,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="neutral_instant_value" id="neutral_instant_value"
{# style="width: 80%"#} {# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1574,7 +1165,7 @@ ...@@ -1574,7 +1165,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="surprise_instant_value" id="surprise_instant_value"
{# style="width: 80%"#} {# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1583,25 +1174,71 @@ ...@@ -1583,25 +1174,71 @@
</div> </div>
<!--end of 1st column --> <!--end of 1st column -->
<!--2nd column -->
<div class="col-md-6"> <!-- 2nd column -->
<div class="col-md-6" id="lecturer_video_column">
<div class="text-center"> <div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span> <span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div> </div>
<!--display lecture video --> <!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section"> <div class="text-center m-3" id="lecturer_video_section">
<!--temporary text -->
<div class="text-center" id="temp_lecturer_text">
<span class="font-italic">No video was found</span>
</div>
<!--display lecturer video -->
<div class="text-center m-3" id="lecturer_video_section">
<video width="500" height="300" id="lecturer_video" controls> <video width="500" height="300" id="lecturer_video" controls>
<source src="#" <source src="#"
type="video/mp4"> type="video/mp4">
Your browser does not support the video tag. Your browser does not support the video tag.
</video> </video>
</div>
<!--end of lecturer video section -->
<!-- ajax loader section -->
<div class="text-center mt-3" id="lecturer_video_progress_loader">
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
</div>
<!--progress bar section -->
<div class="progress_area" id="lecturer_video_progress" hidden>
<!--sitting -->
<h4 class="small font-weight-bold">Sitting</h4>
<span class="float-right" id="sitting_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="sitting_instant_value"
{# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--standing -->
<h4 class="small font-weight-bold">Standing</h4>
<span class="float-right" id="standing_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar"
id="standing_instant_value"
{# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--walking-->
<h4 class="small font-weight-bold">Walking</h4>
<span class="float-right" id="walking_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="walking_instant_value"
{# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
</div> </div>
<!--end of lecture video section --> <!--end of lecture video section -->
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
var global_lecture_video_id = ''; var global_lecture_video_id = '';
var global_video_name = ''; var global_video_name = '';
var global_lecturer_subject_index = 0; var global_lecturer_subject_index = 0;
var global_lecturer_video_name = '';
var lecturer_fps = 0;
//jquery //jquery
$(document).ready(function () { $(document).ready(function () {
...@@ -169,13 +171,14 @@ ...@@ -169,13 +171,14 @@
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
let frames = createFrames(out); {#let frames = createFrames(out);#}
return frames {#return frames#}
displayGazeEstimation(out)
}) })
.then((obj) => { .then((obj) => {
$('#video_frames').prepend(obj); {#$('#video_frames').prepend(obj);#}
$('#frame_loader').attr('hidden', true); $('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false); {#$('#slidecontainer').attr('hidden', false);#}
}) })
.catch((error) => alert('this is the error: ' + error)); .catch((error) => alert('this is the error: ' + error));
}); });
...@@ -231,55 +234,26 @@ ...@@ -231,55 +234,26 @@
} }
//this section is responsible for displaying the frames as video
//creating the frame content
function createFrames(res) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/gaze/" + global_video_name + "/" + res.extracted[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/gaze/" + global_video_name + "/" + image + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayGazeEstimation(res);
return main_frame_content;
}
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_activity').click(function () { $('#integrate_gaze').click(function () {
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src //assign the video src
$('#student_video').attr('src', video_src); $('#student_video').attr('src', video_src);
$('#integrate_modal').modal(); //assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
//fetch data from the API //fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name)
...@@ -312,43 +286,42 @@ ...@@ -312,43 +286,42 @@
//append to the html string //append to the html string
//looking up and right //looking up and right
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>"; htmlString += "<div class='progress_area' id='progress_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Looking up and right</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking up and right</h4>";
htmlString += "<span class='float-right' id='look_up_right_instant_" +frame_name+ "'>" +look_up_right+ "%</span>"; htmlString += "<span class='float-right' id='look_up_right_instant_" + frame_name + "'>" + look_up_right + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_" +frame_name+ "' style='width: " +look_up_right+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_" + frame_name + "' style='width: " + look_up_right + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking up and left //looking up and left
htmlString += "<h4 class='small font-weight-bold'>Looking up and left</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking up and left</h4>";
htmlString += "<span class='float-right' id='look_up_left_instant_" +frame_name+ "'>" +look_up_left+ "%</span>"; htmlString += "<span class='float-right' id='look_up_left_instant_" + frame_name + "'>" + look_up_left + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_" +frame_name+ "' style='width: " +look_up_left+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_" + frame_name + "' style='width: " + look_up_left + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking down and right //looking down and right
htmlString += "<h4 class='small font-weight-bold'>Looking down and right</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking down and right</h4>";
htmlString += "<span class='float-right' id='look_down_right_instant_" +frame_name+ "'>" +look_down_right+ "%</span>"; htmlString += "<span class='float-right' id='look_down_right_instant_" + frame_name + "'>" + look_down_right + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_" +frame_name+ "' style='width: " +look_down_right+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_" + frame_name + "' style='width: " + look_down_right + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking down and left //looking down and left
htmlString += "<h4 class='small font-weight-bold'>Looking down and left</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking down and left</h4>";
htmlString += "<span class='float-right' id='look_down_left_instant_" +frame_name+ "'>" +look_down_left+ "%</span>"; htmlString += "<span class='float-right' id='look_down_left_instant_" + frame_name + "'>" + look_down_left + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_" +frame_name+ "' style='width: " +look_down_left+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_" + frame_name + "' style='width: " + look_down_left + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking front //looking front
htmlString += "<h4 class='small font-weight-bold'>Looking front</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking front</h4>";
htmlString += "<span class='float-right' id='look_front_instant_" +frame_name+ "'>" +look_front+ "%</span>"; htmlString += "<span class='float-right' id='look_front_instant_" + frame_name + "'>" + look_front + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_" +frame_name+ "' style='width: " +look_front+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_" + frame_name + "' style='width: " + look_front + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//ending the progress area //ending the progress area
htmlString += "</div>"; htmlString += "</div>";
...@@ -357,25 +330,89 @@ ...@@ -357,25 +330,89 @@
//append the html //append the html
$('#student_video_column').append(htmlString); $('#student_video_column').append(htmlString);
//start retrieving lecturer activity frame recognition
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err))
}
//this function will load the activity recognition for frames
function displayLecturerEmotionRecognitionForFrame(response) {
//hide the loader
$('#lecturer_video_progress_loader').attr('hidden', true);
//show the progress bars
$('#lecturer_video_progress').attr('hidden', false);
//creating the html string
let htmlString = "";
let duration = 1000 / response.fps;
lecturer_fps = Math.round(duration, 0);
console.log('lecturer fps: ', lecturer_fps);
//creating the html string, iteratively
response.frame_recognitions.map((frame) => {
let frame_name = frame.frame_name;
let sitting_perct = Math.round(frame.sitting_perct, 0);
let standing_perct = Math.round(frame.standing_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let walking_perct = Math.round(frame.walking_perct, 0);
//append to the html string
//sitting
htmlString += "<div class='progress_area' id='progress_lecturer_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Sitting</h4>";
htmlString += "<span class='float-right' id='sitting_instant_" + frame_name + "'>" + sitting_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_" + frame_name + "' style='width: " + sitting_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//standing
htmlString += "<h4 class='small font-weight-bold'>Standing</h4>";
htmlString += "<span class='float-right' id='standing_instant_" + frame_name + "'>" + standing_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='standing_instant_value_" + frame_name + "' style='width: " + standing_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//walking
htmlString += "<h4 class='small font-weight-bold'>Walking</h4>";
htmlString += "<span class='float-right' id='walking_instant_" + frame_name + "'>" + walking_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_" + frame_name + "' style='width: " + walking_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//ending the progress area
htmlString += "</div>";
});
//append the html
$('#lecturer_video_column').append(htmlString);
} }
//to handle the 'integrate' play button //to handle the 'integrate' play button
$('#play_integrate_button').click(function () { $('#play_integrate_button').click(function () {
let video = $('video')[0]; let video = $('video')[0];
let video1 = $('video')[1];
let test_video = document.getElementsByTagName('video')[0]; let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play'; let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause'; let pause_class = 'btn btn-outline-danger pause';
let count = 0; let count = 0;
let count_lecturer = 0;
let classes = $(this).attr('class'); let classes = $(this).attr('class');
let video_interval = setInterval(() => { let video_interval = setInterval(() => {
{#let talking_number = Math.round(Math.random() * 100, 0);#}
{#let phone_number = Math.round(Math.random() * 100, 0);#} //=====STUDENTS COLUMN=====
{#let note_number = Math.round(Math.random() * 100, 0);#}
{#let listening_number = Math.round(Math.random() * 100, 0);#}
//get the relevant progress area //get the relevant progress area
let progress_area = "progress_frame-" + count; let progress_area = "progress_frame-" + count;
...@@ -393,33 +430,49 @@ ...@@ -393,33 +430,49 @@
//increment the count //increment the count
count++; count++;
//setting the values
{#$('#looking_up_right_instant_perct').text(talking_number + '%');#} }, 33);
{#$('#looking_up_left_instant_perct').text(phone_number + '%');#}
{#$('#looking_down_right_instant_perct').text(note_number + '%');#}
{#$('#looking_down_left_instant_perct').text(listening_number + '%');#}
{#$('#looking_front_instant_perct').text(listening_number + '%');#}
{##}
{#//setting the width#}
{#$('#talking_instant_value').width(talking_number + '%');#}
{#$('#phone_checking_instant_value').width(phone_number + '%');#}
{#$('#note_taking_instant_value').width(note_number + '%');#}
{#$('#listening_instant_value').width(listening_number + '%');#}
}, 33); let video_interval_lecturer = setInterval(() => {
//=====LECTURER COLUMN=====
//get the relevant progress area
let progress_area_lecturer = "progress_lecturer_frame-" + count_lecturer;
let progress_area_id_lecturer = "#" + progress_area_lecturer;
//find the corresponding progress area
let progress_area_html_lecturer = document.getElementById(progress_area_lecturer);
//display the retrieved progress area
$(progress_area_id_lecturer).attr('hidden', false);
//replace the current progress area with the selected one
$('#lecturer_video_progress').html(progress_area_html_lecturer);
//increment the count
count_lecturer++;
console.log('current frame (lecturer): ', count_lecturer);
}, lecturer_fps);
//check for the current class //check for the current class
if (classes === play_class) { if (classes === play_class) {
$(this).text('Pause'); $(this).text('Pause');
$(this).attr('class', pause_class); $(this).attr('class', pause_class);
video.play(); video.play();
video1.play();
} else if (classes === pause_class) { } else if (classes === pause_class) {
$(this).text('Play'); $(this).text('Play');
$(this).attr('class', play_class); $(this).attr('class', play_class);
video.pause(); video.pause();
video1.pause();
} }
//function to do when the video is paused //function to do when the video is paused
...@@ -429,259 +482,17 @@ ...@@ -429,259 +482,17 @@
video.onended = function (e) { video.onended = function (e) {
//stop changing the activity values //stop changing the activity values
clearInterval(video_interval); clearInterval(video_interval);
}
});
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
}; };
$(document).on('click', '.img-link', function (e) { //function to do when the video is ended
video1.onended = function (e) {
//removing previously displayed detections //stop changing the activity values
$('.detections').remove(); clearInterval(video_interval_lecturer);
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/');
let len = img_src_arr.length;
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name)
.then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name))
.catch((error) => alert('this is an error'));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
} }
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number'));
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label)
.then((res) => res.json())
.then((out) => createDetectedStudentFrames(out))
.catch((error) => alert('this is the error: ' + error))
}); });
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<h6 class='font-italic'>" + title + "</h6>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
});
//disappearing the loader
$('#detection_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
}
})
}); });
</script> </script>
...@@ -854,22 +665,6 @@ ...@@ -854,22 +665,6 @@
alt="Loader"> alt="Loader">
</div> </div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!--this area will display the progress bars --> <!--this area will display the progress bars -->
<div class="progress_area" hidden> <div class="progress_area" hidden>
...@@ -882,7 +677,8 @@ ...@@ -882,7 +677,8 @@
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="looking_up_right_width" id="looking_up_right_width"
style="width: 20%" style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--looking up and left --> <!--looking up and left -->
...@@ -894,7 +690,8 @@ ...@@ -894,7 +690,8 @@
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="looking_up_left_width" id="looking_up_left_width"
style="width: 40%" style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--looking down and right --> <!--looking down and right -->
...@@ -906,7 +703,8 @@ ...@@ -906,7 +703,8 @@
<div class="progress-bar" role="progressbar" <div class="progress-bar" role="progressbar"
id="looking_down_right_width" id="looking_down_right_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--Looking down and left--> <!--Looking down and left-->
...@@ -917,7 +715,8 @@ ...@@ -917,7 +715,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="looking_down_left_width" style="width: 80%" id="looking_down_left_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--Looking front--> <!--Looking front-->
...@@ -926,9 +725,10 @@ ...@@ -926,9 +725,10 @@
</a> </a>
<span class="float-right" id="looking_front_perct">60%</span> <span class="float-right" id="looking_front_perct">60%</span>
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-gradient-dark" role="progressbar"
id="looking_front_width" style="width: 80%" id="looking_front_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
</div> </div>
...@@ -936,6 +736,9 @@ ...@@ -936,6 +736,9 @@
</div> </div>
</div>
<!--graph tab --> <!--graph tab -->
<div class="tab-pane fade" id="graph" role="tabpanel" <div class="tab-pane fade" id="graph" role="tabpanel"
aria-labelledby="profile-tab"> aria-labelledby="profile-tab">
...@@ -1007,10 +810,11 @@ ...@@ -1007,10 +810,11 @@
<!--button --> <!--button -->
<div class="text-right m-4"> <div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_activity"> <button type="button" class="btn btn-outline-success" id="integrate_gaze">
Process Process
</button> </button>
</div> </div>
</div> </div>
</div> </div>
</div> </div>
...@@ -1164,7 +968,8 @@ ...@@ -1164,7 +968,8 @@
</a> </a>
<span class="float-right" id="looking_down_right_instant_perct">50%</span> <span class="float-right" id="looking_down_right_instant_perct">50%</span>
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="looking_down_right_instant_width" <div class="progress-bar" role="progressbar"
id="looking_down_right_instant_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1196,30 +1001,76 @@ ...@@ -1196,30 +1001,76 @@
</div> </div>
<!--end of 1st column --> <!--end of 1st column -->
<!--2nd column --> <!-- 2nd column -->
<div class="col-md-6"> <div class="col-md-6" id="lecturer_video_column">
<div class="text-center"> <div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span> <span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div> </div>
<!--display lecture video --> <!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section"> <div class="text-center m-3" id="lecturer_video_section">
<!--temporary text -->
<div class="text-center" id="temp_lecturer_text"> <!--display lecturer video -->
<span class="font-italic">No video was found</span> <div class="text-center m-3" id="lecturer_video_section">
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
</div> </div>
<!--end of lecturer video section -->
{# <video width="500" height="300" id="lecturer_video" controls>#} <!-- ajax loader section -->
{# <source src="#"#} <div class="text-center mt-3" id="lecturer_video_progress_loader">
{# type="video/mp4">#} <img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
{# Your browser does not support the video tag.#} </div>
{# </video>#}
<!--progress bar section -->
<div class="progress_area" id="lecturer_video_progress" hidden>
<!--sitting -->
<h4 class="small font-weight-bold">Sitting</h4>
<span class="float-right" id="sitting_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="sitting_instant_value"
{# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--standing -->
<h4 class="small font-weight-bold">Standing</h4>
<span class="float-right" id="standing_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar"
id="standing_instant_value"
{# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--walking-->
<h4 class="small font-weight-bold">Walking</h4>
<span class="float-right" id="walking_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="walking_instant_value"
{# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
</div> </div>
<!--end of lecture video section --> <!--end of lecture video section -->
</div> </div>
<!--end of 2nd column --> <!--end of 2nd column -->
</div> </div>
<!--end of 1st row --> <!--end of 1st row -->
......
...@@ -41,11 +41,18 @@ ...@@ -41,11 +41,18 @@
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar"> <ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand --> <!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="index.html"> <a class="sidebar-brand d-flex align-items-center justify-content-center" href="/">
<div class="sidebar-brand-icon rotate-n-15"> <div class="sidebar-brand-icon rotate-n-15">
<i class="fas fa-laugh-wink"></i> <i class="fas fa-laugh-wink"></i>
</div> </div>
{% if request.session.user_type == "Lecturer" %}
<div class="sidebar-brand-text mx-3">SLPES Lecturer</div> <div class="sidebar-brand-text mx-3">SLPES Lecturer</div>
{% endif %}
{% if request.session.user_type == "Admin" %}
<div class="sidebar-brand-text mx-3">SLPES Admin</div>
{% endif %}
</a> </a>
<!-- Divider --> <!-- Divider -->
...@@ -66,6 +73,8 @@ ...@@ -66,6 +73,8 @@
Interface Interface
</div> </div>
{% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
...@@ -83,6 +92,7 @@ ...@@ -83,6 +92,7 @@
</div> </div>
</li> </li>
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
...@@ -97,6 +107,8 @@ ...@@ -97,6 +107,8 @@
</div> </div>
</li> </li>
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i> <i class="fas fa-fw fa-cog"></i>
...@@ -127,6 +139,28 @@ ...@@ -127,6 +139,28 @@
</div> </div>
</li> </li>
{% endif %}
{% if request.session.user_type == "Admin" %}
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages"
aria-expanded="true" aria-controls="collapsePages">
<i class="fas fa-fw fa-folder"></i>
<span>Pages</span>
</a>
<div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<!-- <h6 class="collapse-header">Login Screens:</h6>-->
<a class="collapse-item" href="/lecturer">Dashboard</a>
<a class="collapse-item" href="/lecturer/lecture-video">Video Page</a>
</div>
</div>
</li>
{% endif %}
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider"> <hr class="sidebar-divider">
...@@ -178,6 +212,8 @@ ...@@ -178,6 +212,8 @@
</div> </div>
</ul> </ul>
<!-- End of Sidebar --> <!-- End of Sidebar -->
<div id="content-wrapper" class="d-flex flex-column"> <div id="content-wrapper" class="d-flex flex-column">
......
{% load static %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block">
<img src="{% static 'FirstApp/images/user_redirect.png' %}" width="400" height="500"
alt="No image">
</div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Select the user type</h1>
</div>
<!--form -->
<form action="/process-user-redirect" method="POST" name="loginForm" class="user">
{% csrf_token %}
<div class="form-check mx-3">
<input class="form-check-input" type="radio" name="user_type"
id="admin" value="admin" checked>
<label class="form-check-label" for="admin">
Admin
</label>
</div>
<div style="padding-top: 20px">
<div class="form-check mx-3">
<input class="form-check-input" type="radio" name="user_type"
id="lecturer" value="lecturer">
<label class="form-check-label" for="lecturer">
Lecturer
</label>
</div>
<div style="padding-top: 20px">
<button type="submit" class="btn btn-primary btn-user btn-block">Proceed</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
...@@ -14,6 +14,7 @@ urlpatterns = [ ...@@ -14,6 +14,7 @@ urlpatterns = [
path('logout', views.logoutView), path('logout', views.logoutView),
path('register-user', views.register), path('register-user', views.register),
path('404', views.view404), path('404', views.view404),
path('401', views.view401),
path('500', views.view500), path('500', views.view500),
path('blank', views.blank), path('blank', views.blank),
path('gaze', views.gaze), path('gaze', views.gaze),
...@@ -32,10 +33,20 @@ urlpatterns = [ ...@@ -32,10 +33,20 @@ urlpatterns = [
# video results # video results
path('video_result', views.video_result), path('video_result', views.video_result),
# this is used for login # this is used to process login
path('process-login', views.loggedInView), path('process-login', views.loggedInView),
# this is used for login # this is used to process admin login
path('process-admin-login', views.processAdminLogin),
# this is used for user-redirect processing
path('process-user-redirect', views.processUserRedirect),
# this is used for admin login page
path('admin-login', views.adminLogin),
# this is used for activity
path('activity', views.activity), path('activity', views.activity),
# tables view # tables view
...@@ -44,6 +55,10 @@ urlpatterns = [ ...@@ -44,6 +55,10 @@ urlpatterns = [
# test view (delete later) # test view (delete later)
path('test', views.test), path('test', views.test),
# user direct view
path('user-direct', views.userDirect),
url(r'^register', views.RegisterViewSet), url(r'^register', views.RegisterViewSet),
# re_path('video/?video_name<str:video_name>', views.video), # re_path('video/?video_name<str:video_name>', views.video),
url(r'^teachers/', views.teachersList.as_view()), url(r'^teachers/', views.teachersList.as_view()),
...@@ -140,6 +155,7 @@ urlpatterns = [ ...@@ -140,6 +155,7 @@ urlpatterns = [
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video) # lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()), url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section ##### ###### POSE Section #####
# lecture video API (for Pose estimation) # lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()), url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
...@@ -187,6 +203,21 @@ urlpatterns = [ ...@@ -187,6 +203,21 @@ urlpatterns = [
# retrieves lecture activity summary # retrieves lecture activity summary
url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()), url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()),
# retrieves lecture activity summary
url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()),
# retrieves lecture activity summary
url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()),
# retrieves lecture activity summary
url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()),
##### OTHERS #####
# retrieves lecture recorded video name
url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()),
# routers # routers
# path('', include(router.urls)), # path('', include(router.urls)),
......
...@@ -109,13 +109,18 @@ class LectureViewSet(APIView): ...@@ -109,13 +109,18 @@ class LectureViewSet(APIView):
####### VIEWS ###### ####### VIEWS ######
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def hello(request): def hello(request):
try:
username = request.user.username username = request.user.username
# retrieve the lecturer # retrieve the lecturer
lecturer = request.session['lecturer'] lecturer = request.session['lecturer']
user_type = request.session['user_type']
print('user_type: ', user_type)
# retrieve the lecturer's timetable slots # retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter() lecturer_timetable = FacultyTimetable.objects.filter()
...@@ -194,15 +199,27 @@ def hello(request): ...@@ -194,15 +199,27 @@ def hello(request):
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer} context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context) return render(request, 'FirstApp/Home.html', context)
# in case of keyerror exception
except KeyError as exc:
return redirect('/401')
except Exception as exc:
return redirect('/500')
# this method will handle 404 error page
def view404(request): def view404(request):
return render(request, 'FirstApp/404.html') return render(request, 'FirstApp/404.html')
# this page will handle 401 error page
def view401(request):
return render(request, 'FirstApp/401.html')
# querying the database # querying the database
def blank(request): def blank(request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id') emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
return render(request, 'FirstApp/blank.html', {'details': emotions}) return render(request, 'FirstApp/blank.html', {'details': emotions})
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def gaze(request): def gaze(request):
try: try:
...@@ -221,6 +238,11 @@ def gaze(request): ...@@ -221,6 +238,11 @@ def gaze(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -240,7 +262,7 @@ def processGaze(request): ...@@ -240,7 +262,7 @@ def processGaze(request):
# the corresponding view for pose estimation # the corresponding view for pose estimation
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def pose(request): def pose(request):
try: try:
...@@ -295,7 +317,7 @@ def webcam(request): ...@@ -295,7 +317,7 @@ def webcam(request):
return redirect('/') return redirect('/')
# to process video for emotion detection # to process video for emotion detection
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def video(request): def video(request):
title = 'Student and Lecturer Performance Enhancement System' title = 'Student and Lecturer Performance Enhancement System'
video_name = request.GET.get('video_name') video_name = request.GET.get('video_name')
...@@ -310,7 +332,7 @@ def video(request): ...@@ -310,7 +332,7 @@ def video(request):
# extractor view # extractor view
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def extractor(request): def extractor(request):
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos')) folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)] videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
...@@ -358,7 +380,7 @@ def child(request): ...@@ -358,7 +380,7 @@ def child(request):
return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'}) return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'})
# displaying video results # displaying video results
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def video_result(request): def video_result(request):
try: try:
...@@ -434,7 +456,11 @@ def video_result(request): ...@@ -434,7 +456,11 @@ def video_result(request):
# append to the list # append to the list
due_lecture_list.append(obj) due_lecture_list.append(obj)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
print('what is wrong?: ', exc) print('what is wrong?: ', exc)
return redirect('/500') return redirect('/500')
...@@ -444,7 +470,7 @@ def video_result(request): ...@@ -444,7 +470,7 @@ def video_result(request):
# view for emotion page # view for emotion page
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def emotion_view(request): def emotion_view(request):
try: try:
...@@ -463,6 +489,11 @@ def emotion_view(request): ...@@ -463,6 +489,11 @@ def emotion_view(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -490,6 +521,7 @@ def loggedInView(request): ...@@ -490,6 +521,7 @@ def loggedInView(request):
login(request, user) login(request, user)
# setting up the session # setting up the session
request.session['lecturer'] = lecturer.id request.session['lecturer'] = lecturer.id
request.session['user_type'] = "Lecturer"
return redirect('/') return redirect('/')
...@@ -506,7 +538,7 @@ def logoutView(request): ...@@ -506,7 +538,7 @@ def logoutView(request):
logout(request) logout(request)
return redirect('/login') return redirect('/user-direct')
# 500 error page # 500 error page
...@@ -519,7 +551,7 @@ def tables(request): ...@@ -519,7 +551,7 @@ def tables(request):
return render(request, "FirstApp/tables.html") return render(request, "FirstApp/tables.html")
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def activity(request): def activity(request):
try: try:
...@@ -538,6 +570,11 @@ def activity(request): ...@@ -538,6 +570,11 @@ def activity(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exception
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -546,3 +583,60 @@ def activity(request): ...@@ -546,3 +583,60 @@ def activity(request):
def test(request): def test(request):
return render(request, "FirstApp/pdf_template.html") return render(request, "FirstApp/pdf_template.html")
# this method will handle user directing function
def userDirect(request):
return render(request, "FirstApp/user_direct.html")
# this method will handle user redirection process
def processUserRedirect(request):
if request.POST:
user_type = request.POST.get('user_type')
if user_type == 'admin':
return redirect('/admin-login')
elif user_type == 'lecturer':
return redirect('/login')
return redirect('/500')
# admin login page
def adminLogin(request):
return render(request, "FirstApp/admin_login.html")
# this method will process admin login
def processAdminLogin(request):
username = "not logged in"
message = "Invalid Username or Password"
adminLoginForm = AdminLoginForm(request.POST)
print('message: ', message)
try:
# if the details are valid, let the user log in
if adminLoginForm.is_valid():
email = adminLoginForm.cleaned_data.get('email')
user = User.objects.get(email=email)
admin = Admin.objects.get(email=email)
login(request, user)
# setting up the session
request.session['admin'] = admin.id
request.session['user_type'] = "Admin"
return redirect('/lecturer')
else:
message = "Please provide correct credntials"
except Exception as exc:
print('exception: ', exc)
return render(request, 'FirstApp/admin_login.html', {'message': message})
\ No newline at end of file
...@@ -210,3 +210,44 @@ class LecturerAudioSummaryPeriodAPI(APIView): ...@@ -210,3 +210,44 @@ class LecturerAudioSummaryPeriodAPI(APIView):
}) })
# this section is for student and lecturer behavior integration
class StudentLecturerIntegratedAPI(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# finding the existence of Lecture activity frame recognition record
isExist = LecturerActivityFrameRecognitions.objects.filter(
lecturer_meta_id__lecturer_video_id__lecture_video_name=video_name).exists()
if (isExist):
lecture_activity_frame_recognitions = LecturerActivityFrameRecognitions.objects.filter(
lecturer_meta_id__lecturer_video_id__lecture_video_name=video_name)
lecture_activity_frame_recognitions_ser = LecturerActivityFrameRecognitionsSerializer(
lecture_activity_frame_recognitions, many=True)
lecture_activity_frame_recognitions_data = lecture_activity_frame_recognitions_ser.data[0]
frame_detections = lecture_activity_frame_recognitions_data['frame_recognition_details']
fps = lecture_activity_frame_recognitions_data['fps']
int_fps = int(fps)
return Response({
"frame_recognitions": frame_detections,
"fps": fps
})
else:
# frame_recognitions = classroom_activity.get_lecturer_activity_for_frames(video_name)
frame_recognitions, fps = classroom_activity.save_frame_recognition(video_name)
int_fps = int(fps)
# print('frame recognitions: ', frame_recognitions)
return Response({
"frame_recognitions": frame_recognitions,
"fps": fps
})
...@@ -5,6 +5,13 @@ import numpy as np ...@@ -5,6 +5,13 @@ import numpy as np
import cv2 import cv2
import os import os
from FirstApp.logic.custom_sorter import custom_object_sorter
from FirstApp.logic.id_generator import generate_new_id
from MonitorLecturerApp.models import LecturerVideoMetaData, LecturerActivityFrameRecognitions, \
LecturerActivityFrameRecognitionDetails
from MonitorLecturerApp.serializers import LecturerVideoMetaDataSerializer
def activity_recognition(video_name): def activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "MonitorLecturerApp\\models") CLASSIFIER_DIR = os.path.join(BASE_DIR, "MonitorLecturerApp\\models")
...@@ -108,3 +115,162 @@ def activity_recognition(video_name): ...@@ -108,3 +115,162 @@ def activity_recognition(video_name):
# this method will calculated lecturer activity for frames
def get_lecturer_activity_for_frames(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\lecturer_videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "MonitorLecturerApp\\models")
CLASSIFIER_PATH = os.path.join(CLASSIFIER_DIR, "keras_model_updated.h5")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
np.set_printoptions(suppress=True)
class_labels = ['Seated Teaching', 'Teaching by Standing', 'Teaching by Walking']
model = tensorflow.keras.models.load_model(CLASSIFIER_PATH)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
fps = video.get(cv2.CAP_PROP_FPS)
print('fps: ', fps)
frame_count = 0
# frame activity recognitions
frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames
while (frame_count < no_of_frames):
# define the count variables for each frame
sitting_count = 0
standing_count = 0
walking_count = 0
ret, image = video.read()
# derive the frame name
frame_name = "frame-{}".format(frame_count)
frame_details = {}
frame_details['frame_name'] = frame_name
detection = cv2.resize(image, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
# increment the relevant count, based on the label
if (label == class_labels[0]):
sitting_count += 1
elif (label == class_labels[1]):
standing_count += 1
elif (label == class_labels[2]):
walking_count += 1
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# calculating the percentages for the frame
sitting_perct = float(sitting_count) * 100
standing_perct = float(standing_count) * 100
walking_perct = float(walking_count) * 100
# adding the percentage values to the frame details
frame_details['sitting_perct'] = sitting_perct
frame_details['standing_perct'] = standing_perct
frame_details['walking_perct'] = walking_perct
# push to all the frame details
frame_activity_recognitions.append(frame_details)
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions, fps
# this section will handle saving activity entities to the database
def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id
lec_activity = LecturerVideoMetaData.objects.filter(lecturer_video_id__lecture_video_name=video_name)
lec_activity_ser = LecturerVideoMetaDataSerializer(lec_activity, many=True)
lec_activity_data = lec_activity_ser.data[0]
lec_activity_id = lec_activity_data['id']
# create a new lecture activity frame detections id
last_lec_activity_frame_recognitions = LecturerActivityFrameRecognitions.objects.order_by(
'lecturer_activity_frame_recognition_id').last()
new_lecture_activity_frame_recognitions_id = "LLAFR00001" if (last_lec_activity_frame_recognitions is None) else \
generate_new_id(last_lec_activity_frame_recognitions.lecturer_activity_frame_recognition_id)
# calculate the frame detections
frame_detections, fps = get_lecturer_activity_for_frames(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_activity_frame_recognition_details = LecturerActivityFrameRecognitionDetails()
lec_activity_frame_recognition_details.frame_name = detection['frame_name']
lec_activity_frame_recognition_details.sitting_perct = detection['sitting_perct']
lec_activity_frame_recognition_details.standing_perct = detection['standing_perct']
lec_activity_frame_recognition_details.walking_perct = detection['walking_perct']
frame_recognition_details.append(lec_activity_frame_recognition_details)
lec_activity_frame_recognitions = LecturerActivityFrameRecognitions()
lec_activity_frame_recognitions.lecturer_activity_frame_recognition_id = new_lecture_activity_frame_recognitions_id
lec_activity_frame_recognitions.lecturer_meta_id_id = lec_activity_id
lec_activity_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_activity_frame_recognitions.fps = float(fps)
lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections
return frame_detections, fps
# Generated by Django 2.2.11 on 2020-10-25 10:09
import MonitorLecturerApp.models
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('MonitorLecturerApp', '0004_lecturervideometadata_lecturer_video_id'),
]
operations = [
migrations.CreateModel(
name='LecturerActivityFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecturer_activity_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=MonitorLecturerApp.models.LecturerActivityFrameRecognitionDetails)),
('lecturer_meta_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MonitorLecturerApp.LecturerVideoMetaData')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-25 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MonitorLecturerApp', '0005_lectureractivityframerecognitions'),
]
operations = [
migrations.AddField(
model_name='lectureractivityframerecognitions',
name='fps',
field=models.FloatField(default=30.0),
),
]
...@@ -87,3 +87,27 @@ class LecturerAudioText (models.Model): ...@@ -87,3 +87,27 @@ class LecturerAudioText (models.Model):
def __str__(self): def __str__(self):
return self.lecturer_audio_text_id return self.lecturer_audio_text_id
# this abstract class will contain lecture activity frame recognition details
class LecturerActivityFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
sitting_perct = models.FloatField()
standing_perct = models.FloatField()
walking_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture activity frame recognitions
class LecturerActivityFrameRecognitions(models.Model):
lecturer_activity_frame_recognition_id = models.CharField(max_length=15)
lecturer_meta_id = models.ForeignKey(LecturerVideoMetaData, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LecturerActivityFrameRecognitionDetails)
fps = models.FloatField(default=30.0)
def __str__(self):
return self.lecturer_activity_frame_recognition_id
...@@ -2,7 +2,7 @@ from rest_framework import serializers ...@@ -2,7 +2,7 @@ from rest_framework import serializers
from FirstApp.serializers import LecturerSerializer, SubjectSerializer from FirstApp.serializers import LecturerSerializer, SubjectSerializer
from LectureSummarizingApp.models import LectureAudioSummary from LectureSummarizingApp.models import LectureAudioSummary
from .models import RegisterTeacher from .models import RegisterTeacher, LecturerActivityFrameRecognitions
from .models import LecturerAudioText, LecturerVideoMetaData, LecturerVideo, LectureRecordedVideo from .models import LecturerAudioText, LecturerVideoMetaData, LecturerVideo, LectureRecordedVideo
...@@ -44,3 +44,35 @@ class LecturerVideoMetaDataSerializer(serializers.ModelSerializer): ...@@ -44,3 +44,35 @@ class LecturerVideoMetaDataSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = LecturerVideoMetaData model = LecturerVideoMetaData
fields = '__all__' fields = '__all__'
# lecture activity frame recognition serializer
class LecturerActivityFrameRecognitionsSerializer(serializers.ModelSerializer):
lecturer_meta_id = LecturerVideoMetaDataSerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["sitting_perct"] = frame_recognition.sitting_perct
recognition["standing_perct"] = frame_recognition.standing_perct
recognition["walking_perct"] = frame_recognition.walking_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LecturerActivityFrameRecognitions
fields = '__all__'
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
{% load static %}
<!-- Custom fonts for this template-->
<link rel="shortcut icon" href="{% static 'FirstApp/images/favicon.ico' %}" type="image/x-icon"/>
<link href="{% static 'FirstApp/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body id="page-top"> <body id="page-top">
{% block javascript %}
{% load static %}
<!-- Bootstrap core JavaScript--> <!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script> <script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script> <script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
...@@ -321,6 +302,8 @@ ...@@ -321,6 +302,8 @@
}); });
</script> </script>
{% endblock %}
<!-- Page Wrapper --> <!-- Page Wrapper -->
<div id="wrapper"> <div id="wrapper">
...@@ -353,23 +336,23 @@ ...@@ -353,23 +336,23 @@
<div class="sidebar-heading"> <div class="sidebar-heading">
</div> </div>
{##}
<!-- Nav Item - Pages Collapse Menu --> {# <!-- Nav Item - Pages Collapse Menu -->#}
<li class="nav-item"> {# <li class="nav-item">#}
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" {# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages"#}
aria-expanded="true" aria-controls="collapsePages"> {# aria-expanded="true" aria-controls="collapsePages">#}
<i class="fas fa-fw fa-folder"></i> {# <i class="fas fa-fw fa-folder"></i>#}
<span>Pages</span> {# <span>Pages</span>#}
</a> {# </a>#}
<div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar"> {# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#}
<div class="bg-white py-2 collapse-inner rounded"> {# <div class="bg-white py-2 collapse-inner rounded">#}
<!-- <h6 class="collapse-header">Login Screens:</h6>--> {# <!-- <h6 class="collapse-header">Login Screens:</h6>-->#}
<a class="collapse-item" href="index.html">Dashboard</a> {# <a class="collapse-item" href="index.html">Dashboard</a>#}
<a class="collapse-item" href="/lecturer/lecture-video">Video Page</a> {# <a class="collapse-item" href="/lecturer/lecture-video">Video Page</a>#}
{##}
</div> {# </div>#}
</div> {# </div>#}
</li> {# </li>#}
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider d-none d-md-block"> <hr class="sidebar-divider d-none d-md-block">
...@@ -392,7 +375,8 @@ ...@@ -392,7 +375,8 @@
<!-- End of Topbar --> <!-- End of Topbar -->
{% block 'container-fluid' %}
{% load static %}
<!-- Begin Page Content --> <!-- Begin Page Content -->
<div class="container-fluid"> <div class="container-fluid">
...@@ -647,6 +631,7 @@ ...@@ -647,6 +631,7 @@
</div> </div>
<!-- /.container-fluid --> <!-- /.container-fluid -->
{% endblock %}
</div> </div>
<!-- End of Main Content --> <!-- End of Main Content -->
...@@ -667,6 +652,8 @@ ...@@ -667,6 +652,8 @@
</div> </div>
<!-- End of Page Wrapper --> <!-- End of Page Wrapper -->
{% block 'modal' %}
<!-- Scroll to Top Button--> <!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top"> <a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i> <i class="fas fa-angle-up"></i>
...@@ -686,7 +673,7 @@ ...@@ -686,7 +673,7 @@
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div> <div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer"> <div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button> <button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a> <a class="btn btn-primary" href="/logout">Logout</a>
</div> </div>
</div> </div>
</div> </div>
...@@ -870,6 +857,9 @@ ...@@ -870,6 +857,9 @@
<script src="{% static 'FirstApp/js/demo/chart-area-demo.js' %}"></script> <script src="{% static 'FirstApp/js/demo/chart-area-demo.js' %}"></script>
<script src="{% static 'FirstApp/js/demo/chart-pie-demo.js' %}"></script> <script src="{% static 'FirstApp/js/demo/chart-pie-demo.js' %}"></script>
{% endblock %}
</body> </body>
</html> </html>
{% extends 'MonitorLecturerApp/template.html' %} {% extends 'FirstApp/template.html' %}
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<body id="page-top"> <body id="page-top">
...@@ -154,6 +154,8 @@ ...@@ -154,6 +154,8 @@
<tbody> <tbody>
{% for video in Videos %} {% for video in Videos %}
{# {% for video in lecturer_videos %} #}
<tr> <tr>
<td>{{video.name}}</td> <td>{{video.name}}</td>
<td>{{video.duration}}</td> <td>{{video.duration}}</td>
......
...@@ -24,6 +24,9 @@ urlpatterns = [ ...@@ -24,6 +24,9 @@ urlpatterns = [
path('lecture-video', views.lecVideo), path('lecture-video', views.lecVideo),
# path('Video', views.hello) # path('Video', views.hello)
# delete this path later
path('test-frame-recognitions', views.testFrameRecognitions),
##### LECTURER ACTIVITY SECTION ##### ##### LECTURER ACTIVITY SECTION #####
# API to retrieve activity recognition # API to retrieve activity recognition
url(r'^activities/$', api.ActivityRecognitionAPI.as_view()), url(r'^activities/$', api.ActivityRecognitionAPI.as_view()),
...@@ -31,6 +34,9 @@ urlpatterns = [ ...@@ -31,6 +34,9 @@ urlpatterns = [
# API to retrieve lecturer video meta data results # API to retrieve lecturer video meta data results
url(r'^get-lecturer-video-results/$', api.GetLectureVideoResultsAPI.as_view()), url(r'^get-lecturer-video-results/$', api.GetLectureVideoResultsAPI.as_view()),
# API to retrieve lecturer video frame recognitions
url(r'^get-lecturer-video-frame-recognitions/$', api.StudentLecturerIntegratedAPI.as_view()),
##### END OF LECTURER ACTIVITY SECTION ##### ##### END OF LECTURER ACTIVITY SECTION #####
......
from django.shortcuts import render from django.shortcuts import render, redirect
from django.http import HttpResponse from django.http import HttpResponse
from django.conf.urls import url from django.conf.urls import url
from rest_framework import routers from rest_framework import routers
...@@ -43,6 +43,11 @@ def startup (request) : ...@@ -43,6 +43,11 @@ def startup (request) :
def hello(request): def hello(request):
# page = '<h1>THIS IS MY HOME</h1>' + '<h2> Hello Ishan</h2>' + '<button>Click Me</button>' # page = '<h1>THIS IS MY HOME</h1>' + '<h2> Hello Ishan</h2>' + '<button>Click Me</button>'
try:
admin = request.session['admin']
obj = {'Message': 'Student and Lecturer Performance Enhancement System'} obj = {'Message': 'Student and Lecturer Performance Enhancement System'}
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\lecturer_videos')) folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\lecturer_videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)] videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
...@@ -106,6 +111,15 @@ def hello(request): ...@@ -106,6 +111,15 @@ def hello(request):
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'MonitorLecturerApp/template.html', 'lec_list': lec_list} context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'MonitorLecturerApp/template.html', 'lec_list': lec_list}
return render(request, 'MonitorLecturerApp/index.html', context) return render(request, 'MonitorLecturerApp/index.html', context)
# in case the 'admin' session is not there
except KeyError as exc:
return redirect('/401')
# in case of general exceptions
except Exception as exc:
print('exception: ', exc)
return redirect('/500')
def view404(request): def view404(request):
return render(request, 'MonitorLecturerApp/404.html') return render(request, 'MonitorLecturerApp/404.html')
...@@ -173,3 +187,6 @@ def lecVideo(request): ...@@ -173,3 +187,6 @@ def lecVideo(request):
# for audioPath in audiopaths: # for audioPath in audiopaths:
# audio = tAudio() # audio = tAudio()
def testFrameRecognitions(request):
return render(request, "MonitorLecturerApp/test_frame_recognitions.html")
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment