Commit 4558dc0a authored by SohanDanushka's avatar SohanDanushka

Merge remote-tracking branch 'origin/QA_RELEASE' into db_and_monitoring_IT17097284

parents ea2061f2 65f5c691
...@@ -40,6 +40,15 @@ class Lecturer(models.Model): ...@@ -40,6 +40,15 @@ class Lecturer(models.Model):
return self.lecturer_id return self.lecturer_id
# admin model
class Admin(models.Model):
admin_id = models.CharField(max_length=10)
name = models.CharField(max_length=20)
email = models.EmailField()
def __str__(self):
return self.admin_id
# Lecturer_subject model # Lecturer_subject model
class LecturerSubject(models.Model): class LecturerSubject(models.Model):
lec_subject_id = models.CharField(max_length=10) lec_subject_id = models.CharField(max_length=10)
...@@ -56,6 +65,12 @@ class LecturerCredentials(models.Model): ...@@ -56,6 +65,12 @@ class LecturerCredentials(models.Model):
password = models.CharField(max_length=15) password = models.CharField(max_length=15)
# admin credential details
class AdminCredentialDetails(models.Model):
username = models.ForeignKey(Admin, on_delete=models.CASCADE)
password = models.CharField(max_length=15)
# timetable based on daily basis # timetable based on daily basis
class DailyTimeTable(models.Model): class DailyTimeTable(models.Model):
slot_id = models.AutoField(auto_created=True, primary_key=True) slot_id = models.AutoField(auto_created=True, primary_key=True)
...@@ -285,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model): ...@@ -285,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model):
# POSE section # POSE section
# lecture pose estimation # lecture gaze estimation
class LectureGazeEstimation(models.Model): class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10) lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE) lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
......
...@@ -13,3 +13,5 @@ admin.site.register(FacultyTimetable) ...@@ -13,3 +13,5 @@ admin.site.register(FacultyTimetable)
admin.site.register(LectureVideo) admin.site.register(LectureVideo)
admin.site.register(LectureActivity) admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation) admin.site.register(LectureGazeEstimation)
admin.site.register(Admin)
admin.site.register(AdminCredentialDetails)
\ No newline at end of file
...@@ -300,7 +300,6 @@ class LectureActivityProcess(APIView): ...@@ -300,7 +300,6 @@ class LectureActivityProcess(APIView):
LectureActivity( LectureActivity(
lecture_activity_id=new_lecture_activity_id, lecture_activity_id=new_lecture_activity_id,
lecture_video_id_id=lec_video_id, lecture_video_id_id=lec_video_id,
talking_perct=percentages['talking_perct'],
phone_perct=percentages['phone_perct'], phone_perct=percentages['phone_perct'],
listening_perct=percentages['listening_perct'], listening_perct=percentages['listening_perct'],
writing_perct=percentages['writing_perct'] writing_perct=percentages['writing_perct']
...@@ -473,16 +472,18 @@ class LectureEmotionProcess(APIView): ...@@ -473,16 +472,18 @@ class LectureEmotionProcess(APIView):
pass pass
def save_emotion_report(self, lec_video_id, percentages): def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id) lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last() last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id) new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
lecture_video_id = lec_video_data['id']
# creating a new lecture emotion report # creating a new lecture emotion report
LectureEmotionReport( LectureEmotionReport(
lecture_emotion_id=new_lecture_emotion_id, lecture_emotion_id=new_lecture_emotion_id,
lecture_video_id=lec_video, lecture_video_id_id=lecture_video_id,
happy_perct=percentages.happy_perct, happy_perct=percentages.happy_perct,
sad_perct=percentages.sad_perct, sad_perct=percentages.sad_perct,
angry_perct=percentages.angry_perct, angry_perct=percentages.angry_perct,
...@@ -511,8 +512,6 @@ class GetLectureEmotionReportViewSet(APIView): ...@@ -511,8 +512,6 @@ class GetLectureEmotionReportViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name') lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id) lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureEmotionSerializer(lecture_emotions, many=True) serializer = LectureEmotionSerializer(lecture_emotions, many=True)
...@@ -521,7 +520,6 @@ class GetLectureEmotionReportViewSet(APIView): ...@@ -521,7 +520,6 @@ class GetLectureEmotionReportViewSet(APIView):
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted
}) })
...@@ -685,17 +683,23 @@ class ProcessLectureGazeEstimation(APIView): ...@@ -685,17 +683,23 @@ class ProcessLectureGazeEstimation(APIView):
pass pass
def estimate_gaze(self, lec_video_id, percentages): def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id) lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last() last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id( new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id) last_lec_gaze.lecture_gaze_id)
new_lecture_gaze_primary_id = 1 if (last_lec_gaze is None) else int(last_lec_gaze.id) + 1
# get the video id
lecture_video_id = lec_video_data['id']
# creating a new lecture gaze estimation # creating a new lecture gaze estimation
LectureGazeEstimation( LectureGazeEstimation(
id=new_lecture_gaze_primary_id,
lecture_gaze_id=new_lecture_gaze_id, lecture_gaze_id=new_lecture_gaze_id,
lecture_video_id=lec_video, lecture_video_id_id=lecture_video_id,
looking_up_and_right_perct=percentages['head_up_right_perct'], looking_up_and_right_perct=percentages['head_up_right_perct'],
looking_up_and_left_perct=percentages['head_up_left_perct'], looking_up_and_left_perct=percentages['head_up_left_perct'],
looking_down_and_right_perct=percentages['head_down_right_perct'], looking_down_and_right_perct=percentages['head_down_right_perct'],
...@@ -722,8 +726,6 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -722,8 +726,6 @@ class GetLectureGazeEstimationViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name') lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = hge.getExtractedFrames(lecture_video_name)
lecture_gaze_estimations = LectureGazeEstimation.objects.filter( lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id) lecture_video_id__lecture_video_id=lecture_video_id)
...@@ -731,7 +733,6 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -731,7 +733,6 @@ class GetLectureGazeEstimationViewSet(APIView):
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted
}) })
......
...@@ -10,6 +10,7 @@ from .MongoModels import * ...@@ -10,6 +10,7 @@ from .MongoModels import *
from . models import VideoMeta from . models import VideoMeta
from . logic import custom_sorter as cs from . logic import custom_sorter as cs
from .logic import id_generator as ig from .logic import id_generator as ig
from .logic import activity_recognition as ar
# emotion recognition method # emotion recognition method
...@@ -47,7 +48,6 @@ def detect_emotion(video): ...@@ -47,7 +48,6 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
path = ''
meta_data = VideoMeta() meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
...@@ -65,6 +65,9 @@ def detect_emotion(video): ...@@ -65,6 +65,9 @@ def detect_emotion(video):
count_neutral = 0 count_neutral = 0
count_surprise = 0 count_surprise = 0
# for testing purposes
print('starting the emotion recognition process')
while (count_frames < frame_count): while (count_frames < frame_count):
# Grab a single frame of video # Grab a single frame of video
ret, frame = cap.read() ret, frame = cap.read()
...@@ -72,23 +75,7 @@ def detect_emotion(video): ...@@ -72,23 +75,7 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5) faces = face_classifier.detectMultiScale(gray,1.3,5)
label = emotion_recognition(classifier, face_classifier, frame)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
# counting the number of frames for each label, to calculate the percentage for each emotion later on... # counting the number of frames for each label, to calculate the percentage for each emotion later on...
...@@ -113,11 +100,9 @@ def detect_emotion(video): ...@@ -113,11 +100,9 @@ def detect_emotion(video):
elif (label == 'Surprise'): elif (label == 'Surprise'):
count_surprise += 1 count_surprise += 1
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3) # for testing purposes
# cv2.imwrite("".format(label, count), frame) print('emotion frame count: ', count_frames)
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
count_frames += 1 count_frames += 1
...@@ -132,6 +117,9 @@ def detect_emotion(video): ...@@ -132,6 +117,9 @@ def detect_emotion(video):
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
# for testing purposes
print('ending the emotion recognition process')
return meta_data return meta_data
...@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name): ...@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will # this method will
def get_frame_emotion_recognition(video_name): def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier( face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
frame_count = 0 frame_count = 0
...@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name): ...@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name):
# frame activity recognitions # frame activity recognitions
frame_emotion_recognitions = [] frame_emotion_recognitions = []
# # class labels # # class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
for frame in os.listdir(EXTRACTED_DIR):
# derive the frame folder path # for testing purposes
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) print('starting the emotion frame recognition process')
while (frame_count < no_of_frames):
ret, image = cap.read()
frame_name = "frame-{}".format(frame_count)
frame_details = {} frame_details = {}
frame_details['frame_name'] = frame frame_details['frame_name'] = frame_name
# initialize the count variables for a frame # initialize the count variables for a frame
happy_count = 0 happy_count = 0
...@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name): ...@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name):
neutral_count = 0 neutral_count = 0
surprise_count = 0 surprise_count = 0
# get the detections
detections = ar.person_detection(image, net)
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
for detections in os.listdir(FRAME_FOLDER): # if there are detections
if (len(detections) > 0):
# loop through the detections
for detection in detections:
# only take the images with the student name
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image) label = emotion_recognition(classifier, face_classifier, detection)
# checking for the label # checking for the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name): ...@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name):
# push to all the frame details # push to all the frame details
frame_emotion_recognitions.append(frame_details) frame_emotion_recognitions.append(frame_details)
else:
break
# for testing purposes
print('emotion frame recognition count: ', frame_count)
# increment the frame count
frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions) sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes
print('ending the emotion frame recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
...@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions): ...@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions):
# this method will retrieve activity frame groupings for a lecture # this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier( face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
...@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): while (frame_count < no_of_frames):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) # get the current frame
ret, image = cap.read()
# initializing the variables # initializing the variables
happy_count = 0 happy_count = 0
...@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0 neutral_count = 0
detection_count = 0 detection_count = 0
detections = ar.person_detection(image, net)
# if there are detections
if (len(detections) > 0):
# looping through the detections in each frame # looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER): for detection in detections:
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label # run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image) label = emotion_recognition(classifier, face_classifier, detection)
# increment the count based on the label # increment the count based on the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[frame_name]['neutral_count'] += neutral_count frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count frame_group_dict[frame_name]['detection_count'] += detection_count
else:
break
# for testing purposes
print('emotion frame groupings count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle some database operations # this section will handle some database operations
def save_frame_recognitions(video_name): def save_frame_recognitions(video_name):
# for testing purposes
print('starting the saving emotion frame recognition process')
# retrieve the lecture emotion id # retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name) lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True) lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
...@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name): ...@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name):
lec_emotion_frame_recognitions.save() lec_emotion_frame_recognitions.save()
# for testing purposes
print('ending the saving emotion frame recognition process')
# now return the frame recognitions # now return the frame recognitions
return frame_detections return frame_detections
...@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name): ...@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name):
# this method will save the emotion frame groupings to the database # this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving emotion frame grouoings process')
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict) frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db # save the frame group details into db
...@@ -631,5 +681,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -631,5 +681,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving emotion frame groupings process')
# save # save
new_lec_emotion_frame_groupings.save() new_lec_emotion_frame_groupings.save()
...@@ -58,3 +58,50 @@ class LecturerCredentialsForm(forms.ModelForm): ...@@ -58,3 +58,50 @@ class LecturerCredentialsForm(forms.ModelForm):
widgets = { widgets = {
'password': forms.PasswordInput() 'password': forms.PasswordInput()
} }
# admin login form
class AdminLoginForm(forms.Form):
# username = forms.CharField(max_length=100)
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
def clean(self):
# cleaned_username = self.cleaned_data.get('username')
cleaned_email = self.cleaned_data.get('email')
cleaned_password = self.cleaned_data.get('password')
admin = Admin.objects.get(email=cleaned_email)
# if an admin is already in the system
if (admin):
# retrieve the User object
user = User.objects.get(email=cleaned_email)
is_user = user.check_password(cleaned_password)
# if the password is correct
if (is_user):
# lec_credentials = LecturerCredentials.objects.filter(username_id=lecturer.id)
admin_credentials = AdminCredentialDetails.objects.get(username_id=admin.id)
print('credentials: ', admin_credentials)
# if lecture credentials are already created
if (admin_credentials):
admin_credentials.password = user.password
admin_credentials.save(force_update=True)
else:
LecturerCredentials(
username_id=admin.id,
password=user.password
).save()
else:
raise forms.ValidationError("Username or password is incorrect")
else:
print('the admin does not exist')
raise forms.ValidationError("The admin does not exist")
return super(AdminLoginForm, self).clean()
...@@ -50,38 +50,21 @@ def activity_recognition(video_path): ...@@ -50,38 +50,21 @@ def activity_recognition(video_path):
frame_count = 0 frame_count = 0
total_detections = 0 total_detections = 0
phone_checking_count = 0 phone_checking_count = 0
talking_count = 0
note_taking_count = 0 note_taking_count = 0
listening_count = 0 listening_count = 0
# video activity directory # for testing purposes
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path) print('starting the activity recognition process')
# creating the directory for the video
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
ret, image = video.read() ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size) image = cv2.resize(image, size)
detections = person_detection(image, net) detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # this is for testing purposes
# cv2.imwrite(FRAME_IMG, image) print('frame count: ', frame_count)
# if there are any person detections # if there are any person detections
if (len(detections) > 0): if (len(detections) > 0):
...@@ -90,6 +73,7 @@ def activity_recognition(video_path): ...@@ -90,6 +73,7 @@ def activity_recognition(video_path):
detection_count = 0 detection_count = 0
# looping through the person detections of the frame
for detection in detections: for detection in detections:
detection = cv2.resize(detection, size) detection = cv2.resize(detection, size)
...@@ -113,43 +97,33 @@ def activity_recognition(video_path): ...@@ -113,43 +97,33 @@ def activity_recognition(video_path):
elif (label == class_labels[2]): elif (label == class_labels[2]):
note_taking_count += 1 note_taking_count += 1
# saving the detection for the particular frame
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1 detection_count += 1
frame_count += 1 frame_count += 1
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label # calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0 phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0 # talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0 note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0 listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary # assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct percentages["phone_perct"] = phone_perct
percentages["talking_perct"] = talking_perct # percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct percentages["listening_perct"] = listening_perct
# for testing purposes
print('activity recognition process is over')
return percentages return percentages
def person_detection(image, net): def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
threshold = 0.2 threshold = 0.2
detected_person = [] detected_person = []
...@@ -391,14 +365,27 @@ def get_student_activity_evaluation(video_name): ...@@ -391,14 +365,27 @@ def get_student_activity_evaluation(video_name):
# recognize the activity for each frame # recognize the activity for each frame
def get_frame_activity_recognition(video_name): def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# load the model # class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam', model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
...@@ -407,45 +394,54 @@ def get_frame_activity_recognition(video_name): ...@@ -407,45 +394,54 @@ def get_frame_activity_recognition(video_name):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224) size = (224, 224)
# class labels # iteration
class_labels = ['Phone checking', 'Listening', 'Note taking'] video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0 frame_count = 0
# total_detections = 10
# frame activity recognitions # frame activity recognitions
frame_activity_recognitions = [] frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): while (frame_count < no_of_frames):
# define the count variables for each frame # define the count variables for each frame
phone_checking_count = 0 phone_checking_count = 0
listening_count = 0 listening_count = 0
note_taking_count = 0 note_taking_count = 0
ret, image = video.read()
# derive the frame folder path # derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) # FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
frame_name = "frame-{}".format(frame_count)
frame_details = {} frame_details = {}
frame_details['frame_name'] = frame frame_details['frame_name'] = frame_name
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
detected_percentages = [] detected_percentages = []
# loop through each detection in the frame detections = person_detection(image, net)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# check whether the image is not the frame itself # if there are detections
if "frame" not in detection: if (len(detections) > 0):
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size) # loop through each detection in the frame
for detection in detections:
image_array = np.asarray(image) detection = cv2.resize(detection, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array # Load the image into the array
...@@ -467,6 +463,7 @@ def get_frame_activity_recognition(video_name): ...@@ -467,6 +463,7 @@ def get_frame_activity_recognition(video_name):
# increment the detection count # increment the detection count
detection_count += 1 detection_count += 1
# calculating the percentages for the frame # calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0 phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0 listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
...@@ -480,13 +477,26 @@ def get_frame_activity_recognition(video_name): ...@@ -480,13 +477,26 @@ def get_frame_activity_recognition(video_name):
# push to all the frame details # push to all the frame details
frame_activity_recognitions.append(frame_details) frame_activity_recognitions.append(frame_details)
else:
break
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions) sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
# this method will retrieve individual student evaluation # this method will retrieve individual student evaluation
def get_individual_student_evaluation(video_name, student_name): def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
...@@ -753,6 +763,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -753,6 +763,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle saving activity entities to the database # this section will handle saving activity entities to the database
def save_frame_recognition(video_name): def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id # retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name) lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True) lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
...@@ -787,6 +801,9 @@ def save_frame_recognition(video_name): ...@@ -787,6 +801,9 @@ def save_frame_recognition(video_name):
lec_activity_frame_recognitions.save() lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections # now return the frame detections
return frame_detections return frame_detections
...@@ -794,6 +811,8 @@ def save_frame_recognition(video_name): ...@@ -794,6 +811,8 @@ def save_frame_recognition(video_name):
# this method will save the activity frame groupings to the database # this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving activity frame groupings process')
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks, frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict) frame_group_dict)
...@@ -825,5 +844,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -825,5 +844,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving activity frame groupings process')
# save # save
new_lec_activity_frame_groupings.save() new_lec_activity_frame_groupings.save()
...@@ -144,18 +144,10 @@ def process_gaze_estimation(video_path): ...@@ -144,18 +144,10 @@ def process_gaze_estimation(video_path):
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path)) VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze") GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze")
# create a folder with the same name as the video
VIDEO_DIR = os.path.join(GAZE_DIR, video_path)
# define a dictionary to return the percentage values # define a dictionary to return the percentage values
percentages = {} percentages = {}
# checking whether the video directory exist
if os.path.isdir(VIDEO_DIR):
shutil.rmtree(VIDEO_DIR)
# create the new directory
os.mkdir(VIDEO_DIR)
# load the face detection model # load the face detection model
face_model = get_face_detector() face_model = get_face_detector()
...@@ -202,6 +194,9 @@ def process_gaze_estimation(video_path): ...@@ -202,6 +194,9 @@ def process_gaze_estimation(video_path):
[0, 0, 1]], dtype="double" [0, 0, 1]], dtype="double"
) )
# for testing purposes
print('starting the gaze estimation process')
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -285,35 +280,39 @@ def process_gaze_estimation(video_path): ...@@ -285,35 +280,39 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
if isLookingDown & isLookingRight: if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1 head_down_right_count += 1
elif isLookingDown & isLookingLeft: elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1 head_down_left_count += 1
elif isLookingUp & isLookingRight: elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1 head_up_right_count += 1
elif isLookingUp & isLookingLeft: elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1 head_up_left_count += 1
elif isLookingFront: elif isLookingFront:
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1 head_front_count += 1
# indicate the student name # indicate the student name
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3) # cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count # increment the face count
face_count += 1 face_count += 1
# naming the new image # naming the new image
image_name = "frame-{}.png".format(frame_count) # image_name = "frame-{}.png".format(frame_count)
#
# new image path # # new image path
image_path = os.path.join(VIDEO_DIR, image_name) # image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image # save the new image
cv2.imwrite(image_path, img) # cv2.imwrite(image_path, img)
# for testing purposes
print('gaze estimation count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -323,8 +322,8 @@ def process_gaze_estimation(video_path): ...@@ -323,8 +322,8 @@ def process_gaze_estimation(video_path):
# after extracting the frames, save the changes to static content # after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w") # p = os.popen("python manage.py collectstatic", "w")
p.write("yes") # p.write("yes")
# calculate percentages # calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100 head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
...@@ -346,6 +345,9 @@ def process_gaze_estimation(video_path): ...@@ -346,6 +345,9 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows() cv2.destroyAllWindows()
cap.release() cap.release()
# for testing purposes
print('ending the gaze estimation process')
# return the dictionary # return the dictionary
return percentages return percentages
...@@ -370,7 +372,7 @@ def getExtractedFrames(lecture_video_name): ...@@ -370,7 +372,7 @@ def getExtractedFrames(lecture_video_name):
# this method will retrieve lecture gaze estimation for each frame # this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_esrimation_for_frames(video_name): def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory # get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
...@@ -422,6 +424,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -422,6 +424,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
[0, 0, 1]], dtype="double" [0, 0, 1]], dtype="double"
) )
# for testing purposes
print('starting the gaze estimation for frames process')
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -551,6 +557,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -551,6 +557,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# append the calculated percentages to the frame_detections # append the calculated percentages to the frame_detections
frame_detections.append(percentages) frame_detections.append(percentages)
# for testing purposes
print('gaze estimation frame recognition count: ', frame_count)
frame_count += 1 frame_count += 1
else: else:
...@@ -558,16 +567,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -558,16 +567,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
# return the details
return frame_detections, frame_rate return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period # this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# declare variables to add percentage values
looking_up_right_perct_combined = 0.0 looking_up_right_perct_combined = 0.0
looking_up_left_perct_combined = 0.0 looking_up_left_perct_combined = 0.0
looking_down_right_perct_combined = 0.0 looking_down_right_perct_combined = 0.0
...@@ -601,16 +611,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): ...@@ -601,16 +611,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# calculate the average percentages # calculate the average percentages
looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1) looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1) looking_up_left_average_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1) looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1)
looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1) looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1)
looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1) looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1)
percentages = {} percentages = {}
percentages["looking_up_and_right_perct"] = looking_up_right_average_perct percentages["looking_up_and_right_perct"] = looking_up_right_average_perct
percentages["looking_up_and_left_perct"] = looking_up_left_perct_combined percentages["looking_up_and_left_perct"] = looking_up_left_average_perct
percentages["looking_down_and_right_perct"] = looking_down_right_perct_combined percentages["looking_down_and_right_perct"] = looking_down_right_average_perct
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
...@@ -677,6 +687,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -677,6 +687,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# assign the difference # assign the difference
frame_group_diff[key] = diff if diff > 0 else 1 frame_group_diff[key] = diff if diff > 0 else 1
# for testing purposes
print('starting gaze frame grouping process')
# looping through the frames # looping through the frames
while True: while True:
...@@ -802,6 +814,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -802,6 +814,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict[frame_name]['detection_count'] += detection_count frame_group_dict[frame_name]['detection_count'] += detection_count
# for testing purposes
print('gaze frame groupings count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -848,12 +863,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -848,12 +863,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# define the labels # define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct'] labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# for testing purposes
print('ending gaze frame grouping process')
# return the dictionary # return the dictionary
return frame_group_dict, labels return frame_group_dict, labels
# this section will handle some database operations # this section will handle some database operations
def save_frame_detections(video_name): def save_frame_detections(video_name):
# for testing purposes
print('starting the saving gaze frame recognition process')
# retrieve the lecture emotion id # retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name) lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True) lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
...@@ -868,7 +891,7 @@ def save_frame_detections(video_name): ...@@ -868,7 +891,7 @@ def save_frame_detections(video_name):
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id) ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections # calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name) frame_detections, frame_rate = get_lecture_gaze_estimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings # to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = [] frame_recognition_details = []
...@@ -892,6 +915,9 @@ def save_frame_detections(video_name): ...@@ -892,6 +915,9 @@ def save_frame_detections(video_name):
lec_gaze_frame_recognitions.save() lec_gaze_frame_recognitions.save()
# for testing purposes
print('ending the saving gaze frame recognition process')
# now return the frame recognitions # now return the frame recognitions
return frame_detections return frame_detections
...@@ -899,6 +925,10 @@ def save_frame_detections(video_name): ...@@ -899,6 +925,10 @@ def save_frame_detections(video_name):
# this method will save gaze frame groupings to the database # this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving gaze frame groupings process')
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict) frame_group_dict)
...@@ -928,6 +958,9 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -928,6 +958,9 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving gaze frame groupings process')
# save # save
new_lec_gaze_frame_groupings.save() new_lec_gaze_frame_groupings.save()
import os import os
import cv2 import cv2
import shutil import shutil
import datetime # import datetime
from datetime import timedelta
from FirstApp.MongoModels import * from FirstApp.MongoModels import *
from FirstApp.serializers import * from FirstApp.serializers import *
...@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name): ...@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name):
THRESHOLD_GAP = 5 THRESHOLD_GAP = 5
# calculating the real duration # calculating the real duration
real_duration = datetime.timedelta(seconds=(duration+THRESHOLD_GAP)) real_duration = timedelta(seconds=(duration))
# defines the number of seconds included for a frame group # defines the number of seconds included for a frame group
THRESHOLD_TIME = 10 THRESHOLD_TIME = 10
...@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name): ...@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name):
# loop through the threshold gap limit to define the time landmarks # loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP): for i in range(THRESHOLD_GAP):
initial_landmark += unit_gap initial_landmark += unit_gap
time_landmark = str(datetime.timedelta(seconds=initial_landmark)) time_landmark = str(timedelta(seconds=initial_landmark))
time_landmark_value = initial_landmark time_landmark_value = initial_landmark
time_landmarks.append(time_landmark) time_landmarks.append(time_landmark)
time_landmarks_values.append(time_landmark_value) time_landmarks_values.append(time_landmark_value)
...@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category): ...@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category):
# this section will handle some database operations # this section will handle some database operations
def save_time_landmarks(video_name): def save_time_landmarks(video_name):
# for testing purposes
print('starting the saving time landmarks process')
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last() last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \ new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id) ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
...@@ -233,12 +237,18 @@ def save_time_landmarks(video_name): ...@@ -233,12 +237,18 @@ def save_time_landmarks(video_name):
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
# for testing purposes
print('ending the saving time landmarks process')
new_lec_video_time_landmarks.save() new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database # this method will save frame landmarks to the database
def save_frame_landmarks(video_name): def save_frame_landmarks(video_name):
# for testing purposes
print('starting the saving frame landmarks process')
# retrieve the previous lecture video frame landmarks details # retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by( last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last() 'lecture_video_frame_landmarks_id').last()
...@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name): ...@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name):
new_lec_video_frame_landmarks.save() new_lec_video_frame_landmarks.save()
# for testing purposes
print('ending the saving frame landmarks process')
# now return the frame landmarks and the frame group dictionary # now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict return frame_landmarks, frame_group_dict
......
# Generated by Django 2.2.11 on 2020-10-20 16:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0014_lecturegazeframerecognitions'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin_id', models.CharField(max_length=10)),
('name', models.CharField(max_length=20)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='AdminCredentialDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=15)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Admin')),
],
),
migrations.DeleteModel(
name='LecturePoseEstimation',
),
]
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- 404 Error Text -->
<div class="text-center">
<div class="error mx-auto" data-text="404">401</div>
<p class="lead text-gray-800 mb-5">Unauthorized access</p>
<p class="text-gray-500 mb-0">It looks like you do not have access to this url</p>
<p class="text-gray-500 mb-0">Please login with the correct user type</p>
<a href="/logout">&larr; Back to Login Page</a>
</div>
</div>
{% endblock %}
<!--end of container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
...@@ -147,7 +147,6 @@ ...@@ -147,7 +147,6 @@
global_video_name = video.video_name; global_video_name = video.video_name;
if (lectureVideo.isActivityFound) { if (lectureVideo.isActivityFound) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>'; e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>';
} else { } else {
...@@ -174,13 +173,14 @@ ...@@ -174,13 +173,14 @@
fetch('http://127.0.0.1:8000/get-lecture-activity/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-activity/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
let frames = createFrames(out); {#let frames = createFrames(out);#}
return frames {#return frames#}
displayActivity(out);
}) })
.then((obj) => { .then((obj) => {
$('#video_frames').prepend(obj); {#$('#video_frames').prepend(obj);#}
$('#frame_loader').attr('hidden', true); $('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false); {#$('#slidecontainer').attr('hidden', false);#}
}) })
.catch((error) => alert('this is the error: ' + error)); .catch((error) => alert('this is the error: ' + error));
}); });
...@@ -234,515 +234,8 @@ ...@@ -234,515 +234,8 @@
} }
//this section is responsible for displaying the frames as video
//creating the frame content
function createFrames(res) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
let len = image.detections.length;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + res.extracted[0].frame + "/" + res.extracted[0].detections[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + image.frame + "/" + image.detections[len - 1] + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayActivity(res);
return main_frame_content;
}
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 50);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
};
$(document).on('click', '.img-link', function (e) {
//removing previously displayed detections
$('.detections').remove();
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/');
let len = img_src_arr.length;
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name)
.then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name))
.catch((error) => alert('this is an error'));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
}
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number'));
let label_name = $(this).attr('data-label');
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//appearing the loader
$('#activity_type').attr('hidden', false);
$('#activity_type_text').text(label_name);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label)
.then((res) => res.json())
.then((out) => createDetectedStudentFrames(out))
.catch((error) => alert('this is the error: ' + error))
});
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row m-3'>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>";
htmlString += "</div>";
htmlString += "</li>";
{#htmlString += "<div class='row m-3'></div>";#}
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
});
//disappearing the loader
$('#detection_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
}
});
//this is to handle the 'evaluate' button
$('#evaluate_button').click(function () {
//hide the message
$('#no_evaluated_student_content').attr('hidden', true);
//show the loader
$('#evaluate_student_loader').attr('hidden', false);
//using the fetch api
fetch('http://127.0.0.1:8000/get-lecture-activity-student-evaluation/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => evaluate_student(out))
.catch((error) => alert('this is the error: ' + error))
});
//to create html for evaluate function
function evaluate_student(response) {
let htmlString = "";
//iterating through the student
response.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-evaluation-rows'>";
let student_count = 0;
//iterating through the frames
response.response.map((frame) => {
let frame_detections = frame.detections;
let frame_detection_length = frame_detections.length;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_evaluation" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_evaluation" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
if (student_count === (frame_detection_length)) {
images += "<li class='list-group-item'>";
images += "<button type='button' class='btn btn-dark individual-evaluation' id='evaluate_student_" + title + "'>evaluate</button>";
images += "</li>";
}
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row m-3'>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>";
htmlString += "</div>";
htmlString += "</li>";
{#htmlString += "<div class='row m-3'></div>";#}
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_evaluation" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
});
//disappearing the loader
$('#evaluate_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#evaluation_students').append(htmlString);
}
//interval variable for individual students
let studentEvaluationVar = null;
//playing the frames for each student evaluation
$(document).on('click', '.play-pause-icon-student-evaluations', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-evaluations";
let pause_class = "fas fa-pause play-pause-icon-student-evaluations";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_evaluation" + title);
let output = document.getElementById("demo_evaluation" + title);
//when the button is playing
if (current_class === play_class) {
studentEvaluationVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_evaluation' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_evaluation' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentEvaluationVar);
}
});
//end of student evaluation video frame
//to evaluate the individual student
$(document).on('click', '.individual-evaluation', function (e) {
let individual_id = $(this).attr('id');
let student_name = individual_id.split('_')[2];
student_name += ".png";
let html = $(this).html();
//after clicking, change the html
$(this).html("<span class='font-italic'>loading...</span>");
//fetching from the API
fetch('http://127.0.0.1:8000/get-lecture-activity-individual-student-evaluation/?video_name=' + global_video_name + '&student_name=' + student_name)
.then((res) => res.json())
.then((out) => displayIndividualStudentActivity(out.response, e, student_name))
.catch((error) => alert('something went wrong'));
});
//this function will display the individual student emotions
function displayIndividualStudentActivity(res, e, title) {
let phone_perct = Math.round(res.phone_perct, 1);
let writing_perct = Math.round(res.writing_perct, 1);
let listening_perct = Math.round(res.listening_perct, 1);
//set the percentage values
//$('#talking_individual_perct').text(res.talking_perct + '%');
$('#phone_individual_perct').text(phone_perct + '%');
$('#writing_individual_perct').text(writing_perct + '%');
$('#listening_individual_perct').text(listening_perct + '%');
//set the width
//$('#talking_individual_width').width(res.talking_perct + '%');
$('#phone_individual_width').width(phone_perct + '%');
$('#writing_individual_width').width(writing_perct + '%');
$('#listening_individual_width').width(listening_perct + '%');
//open the student individual modal
$('#student_individual_modal').modal();
//set the button to default
e.target.innerHTML = "<span>evaluate</span>";
}
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_gaze').click(function () { $('#integrate_activity').click(function () {
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
...@@ -780,25 +273,25 @@ ...@@ -780,25 +273,25 @@
//append to the html string //append to the html string
//phone checking //phone checking
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>"; htmlString += "<div class='progress_area' id='progress_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Phone checking</h4>"; htmlString += "<h4 class='small font-weight-bold'>Phone checking</h4>";
htmlString += "<span class='float-right' id='phone_checking_instant_" +frame_name+ "'>" +phone_perct+ "%</span>"; htmlString += "<span class='float-right' id='phone_checking_instant_" + frame_name + "'>" + phone_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" +frame_name+ "' style='width: " +phone_perct+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" + frame_name + "' style='width: " + phone_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//note taking //note taking
htmlString += "<h4 class='small font-weight-bold'>Writing</h4>"; htmlString += "<h4 class='small font-weight-bold'>Writing</h4>";
htmlString += "<span class='float-right' id='note_taking_instant_" +frame_name+ "'>" +note_perct+ "%</span>"; htmlString += "<span class='float-right' id='note_taking_instant_" + frame_name + "'>" + note_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" +frame_name+ "' style='width: " +note_perct+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" + frame_name + "' style='width: " + note_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//listening //listening
htmlString += "<h4 class='small font-weight-bold'>Listening</h4>"; htmlString += "<h4 class='small font-weight-bold'>Listening</h4>";
htmlString += "<span class='float-right' id='listening_instant_" +frame_name+ "'>" +listen_perct+ "%</span>"; htmlString += "<span class='float-right' id='listening_instant_" + frame_name + "'>" + listen_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" +frame_name+ "' style='width: " +listen_perct+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" + frame_name + "' style='width: " + listen_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//ending the progress area //ending the progress area
...@@ -893,7 +386,6 @@ ...@@ -893,7 +386,6 @@
$('#generate_report_message').hide(); $('#generate_report_message').hide();
fetch('http://127.0.0.1:8000/lecture-activity-report-generation/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date) fetch('http://127.0.0.1:8000/lecture-activity-report-generation/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
...@@ -908,7 +400,6 @@ ...@@ -908,7 +400,6 @@
}); });
}); });
</script> </script>
...@@ -931,11 +422,11 @@ ...@@ -931,11 +422,11 @@
{% load static %} {% load static %}
<!-- Page Heading --> <!-- Page Heading -->
{# <div class="d-sm-flex align-items-center justify-content-between mb-4">#} {# <div class="d-sm-flex align-items-center justify-content-between mb-4">#}
{# <h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>#} {# <h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>#}
{# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#} {# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#}
{# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#} {# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#}
{# </div>#} {# </div>#}
<!--first row --> <!--first row -->
...@@ -1084,25 +575,8 @@ ...@@ -1084,25 +575,8 @@
alt="Loader"> alt="Loader">
</div> </div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!--this area will display the progress bars --> <!--this area will display the progress bars -->
<div class="progress_area" hidden> <div class="progress_area mt-4" hidden>
<!--talking with friends --> <!--talking with friends -->
<a href="#" class="btn btn-link labels" data-number="1" <a href="#" class="btn btn-link labels" data-number="1"
data-label="talking-with-friends"> data-label="talking-with-friends">
...@@ -1113,7 +587,8 @@ ...@@ -1113,7 +587,8 @@
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="talking_width" id="talking_width"
style="width: 20%" style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--phone checking --> <!--phone checking -->
...@@ -1126,7 +601,8 @@ ...@@ -1126,7 +601,8 @@
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="phone_width" id="phone_width"
style="width: 40%" style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--note taking --> <!--note taking -->
...@@ -1138,7 +614,8 @@ ...@@ -1138,7 +614,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="writing_width" <div class="progress-bar" role="progressbar" id="writing_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--listening--> <!--listening-->
...@@ -1149,16 +626,17 @@ ...@@ -1149,16 +626,17 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="listening_width" style="width: 80%" id="listening_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--evaluate button --> </div>
<button type="button" class="btn btn-danger float-right"
id="evaluate_button">Evaluate
</button>
</div> </div>
</div> </div>
<!--graph tab --> <!--graph tab -->
...@@ -1215,66 +693,6 @@ ...@@ -1215,66 +693,6 @@
</div> </div>
<!--2nd column -->
{# <div class="col-lg-6">#}
{# <!--card content -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#}
{# </div>#}
{##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#}
{# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# </div>#}
{# </div>#}
<!--detection person card -->
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by activity#}
{# type)</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#}
{# </div>#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{##}
{# </div>#}
{# </div>#}
{# </div>#}
<!--2nd column --> <!--2nd column -->
<div class="col-lg-6"> <div class="col-lg-6">
<!--card --> <!--card -->
...@@ -1292,7 +710,7 @@ ...@@ -1292,7 +710,7 @@
<!--button --> <!--button -->
<div class="text-right m-4"> <div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_gaze"> <button type="button" class="btn btn-outline-success" id="integrate_activity">
Process Process
</button> </button>
</div> </div>
...@@ -1310,32 +728,32 @@ ...@@ -1310,32 +728,32 @@
<!--1st column --> <!--1st column -->
<div class="col-lg-6"> <div class="col-lg-6">
{# <!--card -->#} {# <!--card -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header">#} {# <div class="card-header">#}
{# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#} {# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="card-body" id="evaluation_students">#} {# <div class="card-body" id="evaluation_students">#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_evaluated_student_content">#} {# <div class="text-center p-2" id="no_evaluated_student_content">#}
{# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#} {# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--the detection student loader -->#} {# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="evaluate_student_loader" hidden>#} {# <div class="text-center p-2" id="evaluate_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{# <!--end of student detection loader -->#} {# <!--end of student detection loader -->#}
{##} {##}
{##} {##}
{# </div>#} {# </div>#}
{##} {##}
{# </div>#} {# </div>#}
</div> </div>
...@@ -1508,7 +926,8 @@ ...@@ -1508,7 +926,8 @@
</div> </div>
</div> </div>
<div class="modal-footer"> <div class="modal-footer">
<button type="button" class="btn btn-primary" data-dismiss="modal" id="generate_report_btn">Yes</button> <button type="button" class="btn btn-primary" data-dismiss="modal" id="generate_report_btn">Yes
</button>
<button type="button" class="btn btn-danger" data-dismiss="modal">No</button> <button type="button" class="btn btn-danger" data-dismiss="modal">No</button>
</div> </div>
</div> </div>
...@@ -1558,7 +977,7 @@ ...@@ -1558,7 +977,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="talking_instant_value" id="talking_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1569,7 +988,7 @@ ...@@ -1569,7 +988,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="phone_checking_instant_value" id="phone_checking_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1580,7 +999,7 @@ ...@@ -1580,7 +999,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" <div class="progress-bar" role="progressbar"
id="note_taking_instant_value" id="note_taking_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1591,7 +1010,7 @@ ...@@ -1591,7 +1010,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="listening_instant_value" id="listening_instant_value"
{# style="width: 80%"#} {# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1614,11 +1033,11 @@ ...@@ -1614,11 +1033,11 @@
</div> </div>
<!-- video --> <!-- video -->
{# <video width="500" height="300" id="lecturer_video" controls>#} {# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#} {# <source src="#"#}
{# type="video/mp4">#} {# type="video/mp4">#}
{# Your browser does not support the video tag.#} {# Your browser does not support the video tag.#}
{# </video>#} {# </video>#}
</div> </div>
<!--end of lecture video section --> <!--end of lecture video section -->
......
{% load static %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block">
<img src="{% static 'FirstApp/images/admin.jpg' %}" width="400" height="600"
alt="No image">
</div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Welcome Back!</h1>
</div>
<!--form -->
<form action="/process-admin-login" method="POST" name="loginForm" class="user">
{% csrf_token %}
<div class="form-group">
<input type="email" name="email" class="form-control form-control-user"
id="exampleInputEmail" aria-describedby="emailHelp"
placeholder="Enter Email Address...">
</div>
<div class="form-group">
<input type="password" name="password" class="form-control form-control-user"
id="exampleInputPassword" placeholder="Password">
<div class="alert alert-danger m-4">{{ message }}</div>
</div>
<div class="form-group">
<div class="custom-control custom-checkbox small">
<input type="checkbox" class="custom-control-input" id="customCheck">
<label class="custom-control-label" for="customCheck">Remember Me</label>
</div>
</div>
<button type="submit" class="btn btn-primary btn-user btn-block">Login</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
...@@ -170,13 +170,14 @@ ...@@ -170,13 +170,14 @@
fetch('http://127.0.0.1:8000/get-lecture-emotion/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-emotion/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
let frames = createFrames(out); {#let frames = createFrames(out);#}
return frames {#return frames#}
displayActivity(out)
}) })
.then((obj) => { .then((obj) => {
$('#video_frames').prepend(obj); {#$('#video_frames').prepend(obj);#}
$('#frame_loader').attr('hidden', true); $('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false); {#$('#slidecontainer').attr('hidden', false);#}
}) })
.catch((error) => alert('this is the error: ' + error)); .catch((error) => alert('this is the error: ' + error));
}); });
...@@ -216,6 +217,7 @@ ...@@ -216,6 +217,7 @@
//to handle the 'btn-success' (process) button //to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) { $(document).on('click', '.btn-success', function (e) {
//sending the POST request to process the lecture activities //sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id) fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json()) .then((res) => res.json())
...@@ -232,510 +234,8 @@ ...@@ -232,510 +234,8 @@
} }
//this section is responsible for displaying the frames as video
//creating the frame content
function createFrames(res) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
let len = image.detections.length;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + res.extracted[0].frame + "/" + res.extracted[0].detections[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + image.frame + "/" + image.detections[len - 1] + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayActivity(res);
return main_frame_content;
}
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 50);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
};
$(document).on('click', '.img-link', function (e) {
//removing previously displayed detections
$('.detections').remove();
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/');
let len = img_src_arr.length;
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name)
.then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name))
.catch((error) => alert('this is an error'));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
}
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number'));
let label_name = $(this).attr('data-label');
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//appearing the loader
$('#activity_type').attr('hidden', false);
$('#activity_type_text').text(label_name);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label)
.then((res) => res.json())
.then((out) => createDetectedStudentFrames(out))
.catch((error) => alert('this is the error: ' + error))
});
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<h6 class='font-italic'>" + title + "</h6>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
});
//disappearing the loader
$('#detection_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection (by label)
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
}
});
//this is to handle the 'evaluate' button
$('#evaluate_button').click(function () {
//hide the message
$('#no_evaluated_student_content').attr('hidden', true);
//show the loader
$('#evaluate_student_loader').attr('hidden', false);
//using the fetch api
fetch('http://127.0.0.1:8000/get-lecture-emotion-student-evaluation/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => evaluate_student(out))
.catch((error) => alert('this is the error: ' + error))
});
//to create html for evaluate function
function evaluate_student(response) {
let htmlString = "";
//iterating through the student
response.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-evaluation-rows'>";
let student_count = 0;
//iterating through the frames
response.response.map((frame) => {
let frame_detections = frame.detections;
let frame_detection_length = frame_detections.length;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_evaluation" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_evaluation" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
if (student_count === (frame_detection_length)) {
images += "<li class='list-group-item'>";
images += "<button type='button' class='btn btn-dark individual-evaluation' id='evaluate_student_" + title + "'>evaluate</button>";
images += "</li>";
}
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row m-3'>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>";
htmlString += "</div>";
htmlString += "</li>";
{#htmlString += "<div class='row m-3'></div>";#}
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_evaluation" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
});
//disappearing the loader
$('#evaluate_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#evaluation_students').append(htmlString);
}
let studentEvaluationVar = null;
//playing the frames for each student evaluation
$(document).on('click', '.play-pause-icon-student-evaluations', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-evaluations";
let pause_class = "fas fa-pause play-pause-icon-student-evaluations";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_evaluation" + title);
let output = document.getElementById("demo_evaluation" + title);
//when the button is playing
if (current_class === play_class) {
studentEvaluationVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_evaluation' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_evaluation' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentEvaluationVar);
}
});
//to evaluate the individual student
$(document).on('click', '.individual-evaluation', function (e) {
let individual_id = $(this).attr('id');
let student_name = individual_id.split('_')[2];
student_name += ".png";
let html = $(this).html();
//after clicking, change the html
$(this).html("<span class='font-italic'>loading...</span>");
//fetching from the API
fetch('http://127.0.0.1:8000/get-lecture-emotion-individual-student-evaluation/?video_name=' + global_video_name + '&student_name=' + student_name)
.then((res) => res.json())
.then((out) => displayIndividualStudentEmotion(out.response, e, student_name))
.catch((error) => alert('something went wrong'));
//after 5 seconds, replace with the original html
/*
setTimeout(() => {
$(this).html(html);
//open the student individual modal
$('#student_individual_modal').modal();
}, 5000);
*/
});
//this function will display the individual student emotions
function displayIndividualStudentEmotion(res, e, title) {
//set the percentage values
$('#happy_individual_perct').text(res.happy_perct + '%');
$('#sad_individual_perct').text(res.sad_perct + '%');
$('#anger_individual_perct').text(res.angry_perct + '%');
$('#surprise_individual_perct').text(res.surprise_perct + '%');
$('#neutral_individual_perct').text(res.neutral_perct + '%');
//set the width
$('#happy_individual_width').width(res.happy_perct + '%');
$('#sad_individual_width').width(res.sad_perct + '%');
$('#anger_individual_width').width(res.angry_perct + '%');
$('#surprise_individual_width').width(res.surprise_perct + '%');
$('#neutral_individual_width').width(res.neutral_perct + '%');
//open the student individual modal
$('#student_individual_modal').modal();
//set the button to default
e.target.innerHTML = "<span>evaluate</span>";
}
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_activity').click(function () { $('#integrate_emotion').click(function () {
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
...@@ -753,7 +253,6 @@ ...@@ -753,7 +253,6 @@
.catch((err) => alert('error: ' + err)); .catch((err) => alert('error: ' + err));
}); });
...@@ -779,39 +278,39 @@ ...@@ -779,39 +278,39 @@
//append to the html string //append to the html string
//Happy //Happy
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>"; htmlString += "<div class='progress_area' id='progress_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Happy</h4>"; htmlString += "<h4 class='small font-weight-bold'>Happy</h4>";
htmlString += "<span class='float-right' id='happy_instant_" +frame_name+ "'>" +happy_perct+ "%</span>"; htmlString += "<span class='float-right' id='happy_instant_" + frame_name + "'>" + happy_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" +frame_name+ "' style='width: " +happy_perct+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='phone_checking_instant_value_" + frame_name + "' style='width: " + happy_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Sad //Sad
htmlString += "<h4 class='small font-weight-bold'>Sad</h4>"; htmlString += "<h4 class='small font-weight-bold'>Sad</h4>";
htmlString += "<span class='float-right' id='note_taking_instant_" +frame_name+ "'>" +sad_perct+ "%</span>"; htmlString += "<span class='float-right' id='note_taking_instant_" + frame_name + "'>" + sad_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" +frame_name+ "' style='width: " +sad_perct+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" + frame_name + "' style='width: " + sad_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Angry //Angry
htmlString += "<h4 class='small font-weight-bold'>Angry</h4>"; htmlString += "<h4 class='small font-weight-bold'>Angry</h4>";
htmlString += "<span class='float-right' id='listening_instant_" +frame_name+ "'>" +angry_perct+ "%</span>"; htmlString += "<span class='float-right' id='listening_instant_" + frame_name + "'>" + angry_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" +frame_name+ "' style='width: " +angry_perct+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" + frame_name + "' style='width: " + angry_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Neutral //Neutral
htmlString += "<h4 class='small font-weight-bold'>Neutral</h4>"; htmlString += "<h4 class='small font-weight-bold'>Neutral</h4>";
htmlString += "<span class='float-right' id='note_taking_instant_" +frame_name+ "'>" +neutral_perct+ "%</span>"; htmlString += "<span class='float-right' id='note_taking_instant_" + frame_name + "'>" + neutral_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" +frame_name+ "' style='width: " +neutral_perct+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='note_taking_instant_value_" + frame_name + "' style='width: " + neutral_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//Surprise //Surprise
htmlString += "<h4 class='small font-weight-bold'>Surprise</h4>"; htmlString += "<h4 class='small font-weight-bold'>Surprise</h4>";
htmlString += "<span class='float-right' id='listening_instant_" +frame_name+ "'>" +surprise_perct+ "%</span>"; htmlString += "<span class='float-right' id='listening_instant_" + frame_name + "'>" + surprise_perct + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" +frame_name+ "' style='width: " +surprise_perct+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='listening_instant_value_" + frame_name + "' style='width: " + surprise_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//ending the progress area //ending the progress area
...@@ -1063,31 +562,15 @@ ...@@ -1063,31 +562,15 @@
<!--temporary text --> <!--temporary text -->
<span class="font-italic" id="temporary_text">Frame will be displayed here</span> <span class="font-italic" id="temporary_text">Frame will be displayed here</span>
<!--loading buffer area--> <!--loading buffer area-->
<div class="text-center" id="frame_loader" hidden> <div class="text-center" id="frame_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader"> alt="Loader">
</div> </div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!--this area will display the progress bars --> <!--this area will display the progress bars -->
<div class="progress_area" hidden> <div class="progress_area mt-4" hidden>
<!--Happy --> <!--Happy -->
<a href="#" class="btn btn-link labels" data-number="1" <a href="#" class="btn btn-link labels" data-number="1"
data-label="Happy"> data-label="Happy">
...@@ -1098,7 +581,8 @@ ...@@ -1098,7 +581,8 @@
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="happy_width" id="happy_width"
style="width: 20%" style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--sad --> <!--sad -->
...@@ -1111,7 +595,8 @@ ...@@ -1111,7 +595,8 @@
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="sad_width" id="sad_width"
style="width: 40%" style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--anger --> <!--anger -->
...@@ -1123,7 +608,8 @@ ...@@ -1123,7 +608,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="anger_width" <div class="progress-bar" role="progressbar" id="anger_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--surprise--> <!--surprise-->
...@@ -1135,7 +621,8 @@ ...@@ -1135,7 +621,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="surprise_width" style="width: 80%" id="surprise_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--neutral--> <!--neutral-->
...@@ -1147,16 +634,18 @@ ...@@ -1147,16 +634,18 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="neutral_width" style="width: 80%" id="neutral_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div>
</div> </div>
{# <!--evaluate button -->#}
{# <button type="button" class="btn btn-danger float-right"#}
{# id="evaluate_button">Evaluate#}
{# </button>#}
</div> </div>
</div> </div>
<!--graph tab --> <!--graph tab -->
...@@ -1214,63 +703,63 @@ ...@@ -1214,63 +703,63 @@
<!--2nd column --> <!--2nd column -->
{# <div class="col-lg-6">#} {# <div class="col-lg-6">#}
{# <!--card content -->#} {# <!--card content -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header py-3">#} {# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#} {# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#} {# <div class="text-center p-4" id="detection_frames">#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#} {# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#} {# <span class="font-italic">No frame is selected</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#} {# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#} {# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#} {# </div>#}
{# <!--the detection loader -->#} {# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#} {# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--detection person card -->#} {# <!--detection person card -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header py-3">#} {# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by emotion#} {# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by emotion#}
{# type)</h5>#} {# type)</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#} {# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#} {# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#} {# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#} {# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#} {# </p>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#} {# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#} {# <span class="font-italic">No activity type is selected</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--the detection student loader -->#} {# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#} {# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{##} {##}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
{# </div>#} {# </div>#}
</div> </div>
...@@ -1280,36 +769,36 @@ ...@@ -1280,36 +769,36 @@
<div class="row p-2"> <div class="row p-2">
<!--1st column --> <!--1st column -->
{# <div class="col-lg-6">#} {# <div class="col-lg-6">#}
{# <!--card -->#} {# <!--card -->#}
{# <div class="card shadow mb-4">#} {# <div class="card shadow mb-4">#}
{# <!--card header -->#} {# <!--card header -->#}
{# <div class="card-header">#} {# <div class="card-header">#}
{# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#} {# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--card body -->#} {# <!--card body -->#}
{# <div class="card-body" id="evaluation_students">#} {# <div class="card-body" id="evaluation_students">#}
{##} {##}
{# <!--no content message-->#} {# <!--no content message-->#}
{# <div class="text-center p-2" id="no_evaluated_student_content">#} {# <div class="text-center p-2" id="no_evaluated_student_content">#}
{# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#} {# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#}
{# </div>#} {# </div>#}
{##} {##}
{# <!--the detection student loader -->#} {# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="evaluate_student_loader" hidden>#} {# <div class="text-center p-2" id="evaluate_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#} {# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#} {# alt="Loader">#}
{# </div>#} {# </div>#}
{# <!--end of student detection loader -->#} {# <!--end of student detection loader -->#}
{##} {##}
{##} {##}
{# </div>#} {# </div>#}
{##} {##}
{# </div>#} {# </div>#}
{##} {##}
{##} {##}
{# </div>#} {# </div>#}
<!--end of 1st column --> <!--end of 1st column -->
...@@ -1330,7 +819,7 @@ ...@@ -1330,7 +819,7 @@
<!--button --> <!--button -->
<div class="text-right m-4"> <div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_activity"> <button type="button" class="btn btn-outline-success" id="integrate_emotion">
Process Process
</button> </button>
</div> </div>
...@@ -1529,7 +1018,7 @@ ...@@ -1529,7 +1018,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="happy_instant_value" id="happy_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1540,7 +1029,7 @@ ...@@ -1540,7 +1029,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="sad_instant_value" id="sad_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1551,7 +1040,7 @@ ...@@ -1551,7 +1040,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" <div class="progress-bar" role="progressbar"
id="angry_instant_value" id="angry_instant_value"
{# style="width: 0%"#} {# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1562,7 +1051,7 @@ ...@@ -1562,7 +1051,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="neutral_instant_value" id="neutral_instant_value"
{# style="width: 80%"#} {# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1574,7 +1063,7 @@ ...@@ -1574,7 +1063,7 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="surprise_instant_value" id="surprise_instant_value"
{# style="width: 80%"#} {# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
......
...@@ -169,13 +169,14 @@ ...@@ -169,13 +169,14 @@
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => { .then((out) => {
let frames = createFrames(out); {#let frames = createFrames(out);#}
return frames {#return frames#}
displayGazeEstimation(out)
}) })
.then((obj) => { .then((obj) => {
$('#video_frames').prepend(obj); {#$('#video_frames').prepend(obj);#}
$('#frame_loader').attr('hidden', true); $('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false); {#$('#slidecontainer').attr('hidden', false);#}
}) })
.catch((error) => alert('this is the error: ' + error)); .catch((error) => alert('this is the error: ' + error));
}); });
...@@ -231,47 +232,8 @@ ...@@ -231,47 +232,8 @@
} }
//this section is responsible for displaying the frames as video
//creating the frame content
function createFrames(res) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/gaze/" + global_video_name + "/" + res.extracted[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/gaze/" + global_video_name + "/" + image + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayGazeEstimation(res);
return main_frame_content;
}
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_activity').click(function () { $('#integrate_gaze').click(function () {
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
...@@ -312,43 +274,42 @@ ...@@ -312,43 +274,42 @@
//append to the html string //append to the html string
//looking up and right //looking up and right
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>"; htmlString += "<div class='progress_area' id='progress_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Looking up and right</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking up and right</h4>";
htmlString += "<span class='float-right' id='look_up_right_instant_" +frame_name+ "'>" +look_up_right+ "%</span>"; htmlString += "<span class='float-right' id='look_up_right_instant_" + frame_name + "'>" + look_up_right + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_" +frame_name+ "' style='width: " +look_up_right+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_" + frame_name + "' style='width: " + look_up_right + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking up and left //looking up and left
htmlString += "<h4 class='small font-weight-bold'>Looking up and left</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking up and left</h4>";
htmlString += "<span class='float-right' id='look_up_left_instant_" +frame_name+ "'>" +look_up_left+ "%</span>"; htmlString += "<span class='float-right' id='look_up_left_instant_" + frame_name + "'>" + look_up_left + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_" +frame_name+ "' style='width: " +look_up_left+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_" + frame_name + "' style='width: " + look_up_left + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking down and right //looking down and right
htmlString += "<h4 class='small font-weight-bold'>Looking down and right</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking down and right</h4>";
htmlString += "<span class='float-right' id='look_down_right_instant_" +frame_name+ "'>" +look_down_right+ "%</span>"; htmlString += "<span class='float-right' id='look_down_right_instant_" + frame_name + "'>" + look_down_right + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_" +frame_name+ "' style='width: " +look_down_right+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_" + frame_name + "' style='width: " + look_down_right + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking down and left //looking down and left
htmlString += "<h4 class='small font-weight-bold'>Looking down and left</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking down and left</h4>";
htmlString += "<span class='float-right' id='look_down_left_instant_" +frame_name+ "'>" +look_down_left+ "%</span>"; htmlString += "<span class='float-right' id='look_down_left_instant_" + frame_name + "'>" + look_down_left + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_" +frame_name+ "' style='width: " +look_down_left+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_" + frame_name + "' style='width: " + look_down_left + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//looking front //looking front
htmlString += "<h4 class='small font-weight-bold'>Looking front</h4>"; htmlString += "<h4 class='small font-weight-bold'>Looking front</h4>";
htmlString += "<span class='float-right' id='look_front_instant_" +frame_name+ "'>" +look_front+ "%</span>"; htmlString += "<span class='float-right' id='look_front_instant_" + frame_name + "'>" + look_front + "%</span>";
htmlString += "<div class='progress mb-4'>"; htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_" +frame_name+ "' style='width: " +look_front+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>"; htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_" + frame_name + "' style='width: " + look_front + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>"; htmlString += "</div>";
//ending the progress area //ending the progress area
htmlString += "</div>"; htmlString += "</div>";
...@@ -434,254 +395,6 @@ ...@@ -434,254 +395,6 @@
}); });
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
};
$(document).on('click', '.img-link', function (e) {
//removing previously displayed detections
$('.detections').remove();
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/');
let len = img_src_arr.length;
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name)
.then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name))
.catch((error) => alert('this is an error'));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
}
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number'));
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label)
.then((res) => res.json())
.then((out) => createDetectedStudentFrames(out))
.catch((error) => alert('this is the error: ' + error))
});
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<h6 class='font-italic'>" + title + "</h6>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
});
//disappearing the loader
$('#detection_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
}
})
}); });
</script> </script>
...@@ -854,22 +567,6 @@ ...@@ -854,22 +567,6 @@
alt="Loader"> alt="Loader">
</div> </div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!--this area will display the progress bars --> <!--this area will display the progress bars -->
<div class="progress_area" hidden> <div class="progress_area" hidden>
...@@ -882,7 +579,8 @@ ...@@ -882,7 +579,8 @@
<div class="progress-bar bg-danger" role="progressbar" <div class="progress-bar bg-danger" role="progressbar"
id="looking_up_right_width" id="looking_up_right_width"
style="width: 20%" style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="20" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--looking up and left --> <!--looking up and left -->
...@@ -894,7 +592,8 @@ ...@@ -894,7 +592,8 @@
<div class="progress-bar bg-warning" role="progressbar" <div class="progress-bar bg-warning" role="progressbar"
id="looking_up_left_width" id="looking_up_left_width"
style="width: 40%" style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="40" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--looking down and right --> <!--looking down and right -->
...@@ -906,7 +605,8 @@ ...@@ -906,7 +605,8 @@
<div class="progress-bar" role="progressbar" <div class="progress-bar" role="progressbar"
id="looking_down_right_width" id="looking_down_right_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--Looking down and left--> <!--Looking down and left-->
...@@ -917,7 +617,8 @@ ...@@ -917,7 +617,8 @@
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-info" role="progressbar"
id="looking_down_left_width" style="width: 80%" id="looking_down_left_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
<!--Looking front--> <!--Looking front-->
...@@ -926,9 +627,10 @@ ...@@ -926,9 +627,10 @@
</a> </a>
<span class="float-right" id="looking_front_perct">60%</span> <span class="float-right" id="looking_front_perct">60%</span>
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" <div class="progress-bar bg-gradient-dark" role="progressbar"
id="looking_front_width" style="width: 80%" id="looking_front_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="80" aria-valuemin="0"
aria-valuemax="100"></div>
</div> </div>
</div> </div>
...@@ -936,6 +638,9 @@ ...@@ -936,6 +638,9 @@
</div> </div>
</div>
<!--graph tab --> <!--graph tab -->
<div class="tab-pane fade" id="graph" role="tabpanel" <div class="tab-pane fade" id="graph" role="tabpanel"
aria-labelledby="profile-tab"> aria-labelledby="profile-tab">
...@@ -1007,10 +712,11 @@ ...@@ -1007,10 +712,11 @@
<!--button --> <!--button -->
<div class="text-right m-4"> <div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_activity"> <button type="button" class="btn btn-outline-success" id="integrate_gaze">
Process Process
</button> </button>
</div> </div>
</div> </div>
</div> </div>
</div> </div>
...@@ -1164,7 +870,8 @@ ...@@ -1164,7 +870,8 @@
</a> </a>
<span class="float-right" id="looking_down_right_instant_perct">50%</span> <span class="float-right" id="looking_down_right_instant_perct">50%</span>
<div class="progress mb-4"> <div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="looking_down_right_instant_width" <div class="progress-bar" role="progressbar"
id="looking_down_right_instant_width"
style="width: 60%" style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div> aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div> </div>
...@@ -1210,11 +917,11 @@ ...@@ -1210,11 +917,11 @@
</div> </div>
{# <video width="500" height="300" id="lecturer_video" controls>#} {# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#} {# <source src="#"#}
{# type="video/mp4">#} {# type="video/mp4">#}
{# Your browser does not support the video tag.#} {# Your browser does not support the video tag.#}
{# </video>#} {# </video>#}
</div> </div>
<!--end of lecture video section --> <!--end of lecture video section -->
......
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
Interface Interface
</div> </div>
{% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
...@@ -83,6 +85,7 @@ ...@@ -83,6 +85,7 @@
</div> </div>
</li> </li>
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
...@@ -97,6 +100,8 @@ ...@@ -97,6 +100,8 @@
</div> </div>
</li> </li>
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i> <i class="fas fa-fw fa-cog"></i>
...@@ -127,6 +132,8 @@ ...@@ -127,6 +132,8 @@
</div> </div>
</li> </li>
{% endif %}
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider"> <hr class="sidebar-divider">
...@@ -178,6 +185,8 @@ ...@@ -178,6 +185,8 @@
</div> </div>
</ul> </ul>
<!-- End of Sidebar --> <!-- End of Sidebar -->
<div id="content-wrapper" class="d-flex flex-column"> <div id="content-wrapper" class="d-flex flex-column">
......
{% load static %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block">
<img src="{% static 'FirstApp/images/user_redirect.png' %}" width="400" height="500"
alt="No image">
</div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Select the user type</h1>
</div>
<!--form -->
<form action="/process-user-redirect" method="POST" name="loginForm" class="user">
{% csrf_token %}
<div class="form-check mx-3">
<input class="form-check-input" type="radio" name="user_type"
id="admin" value="admin" checked>
<label class="form-check-label" for="admin">
Admin
</label>
</div>
<div style="padding-top: 20px">
<div class="form-check mx-3">
<input class="form-check-input" type="radio" name="user_type"
id="lecturer" value="lecturer">
<label class="form-check-label" for="lecturer">
Lecturer
</label>
</div>
<div style="padding-top: 20px">
<button type="submit" class="btn btn-primary btn-user btn-block">Proceed</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
...@@ -14,6 +14,7 @@ urlpatterns = [ ...@@ -14,6 +14,7 @@ urlpatterns = [
path('logout', views.logoutView), path('logout', views.logoutView),
path('register-user', views.register), path('register-user', views.register),
path('404', views.view404), path('404', views.view404),
path('401', views.view401),
path('500', views.view500), path('500', views.view500),
path('blank', views.blank), path('blank', views.blank),
path('gaze', views.gaze), path('gaze', views.gaze),
...@@ -32,10 +33,20 @@ urlpatterns = [ ...@@ -32,10 +33,20 @@ urlpatterns = [
# video results # video results
path('video_result', views.video_result), path('video_result', views.video_result),
# this is used for login # this is used to process login
path('process-login', views.loggedInView), path('process-login', views.loggedInView),
# this is used for login # this is used to process admin login
path('process-admin-login', views.processAdminLogin),
# this is used for user-redirect processing
path('process-user-redirect', views.processUserRedirect),
# this is used for admin login page
path('admin-login', views.adminLogin),
# this is used for activity
path('activity', views.activity), path('activity', views.activity),
# tables view # tables view
...@@ -44,6 +55,10 @@ urlpatterns = [ ...@@ -44,6 +55,10 @@ urlpatterns = [
# test view (delete later) # test view (delete later)
path('test', views.test), path('test', views.test),
# user direct view
path('user-direct', views.userDirect),
url(r'^register', views.RegisterViewSet), url(r'^register', views.RegisterViewSet),
# re_path('video/?video_name<str:video_name>', views.video), # re_path('video/?video_name<str:video_name>', views.video),
url(r'^teachers/', views.teachersList.as_view()), url(r'^teachers/', views.teachersList.as_view()),
...@@ -140,6 +155,7 @@ urlpatterns = [ ...@@ -140,6 +155,7 @@ urlpatterns = [
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video) # lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()), url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section ##### ###### POSE Section #####
# lecture video API (for Pose estimation) # lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()), url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
......
...@@ -109,13 +109,18 @@ class LectureViewSet(APIView): ...@@ -109,13 +109,18 @@ class LectureViewSet(APIView):
####### VIEWS ###### ####### VIEWS ######
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def hello(request): def hello(request):
try:
username = request.user.username username = request.user.username
# retrieve the lecturer # retrieve the lecturer
lecturer = request.session['lecturer'] lecturer = request.session['lecturer']
user_type = request.session['user_type']
print('user_type: ', user_type)
# retrieve the lecturer's timetable slots # retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter() lecturer_timetable = FacultyTimetable.objects.filter()
...@@ -194,15 +199,27 @@ def hello(request): ...@@ -194,15 +199,27 @@ def hello(request):
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer} context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context) return render(request, 'FirstApp/Home.html', context)
# in case of keyerror exception
except KeyError as exc:
return redirect('/401')
except Exception as exc:
return redirect('/500')
# this method will handle 404 error page
def view404(request): def view404(request):
return render(request, 'FirstApp/404.html') return render(request, 'FirstApp/404.html')
# this page will handle 401 error page
def view401(request):
return render(request, 'FirstApp/401.html')
# querying the database # querying the database
def blank(request): def blank(request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id') emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
return render(request, 'FirstApp/blank.html', {'details': emotions}) return render(request, 'FirstApp/blank.html', {'details': emotions})
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def gaze(request): def gaze(request):
try: try:
...@@ -221,6 +238,11 @@ def gaze(request): ...@@ -221,6 +238,11 @@ def gaze(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -240,7 +262,7 @@ def processGaze(request): ...@@ -240,7 +262,7 @@ def processGaze(request):
# the corresponding view for pose estimation # the corresponding view for pose estimation
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def pose(request): def pose(request):
try: try:
...@@ -295,7 +317,7 @@ def webcam(request): ...@@ -295,7 +317,7 @@ def webcam(request):
return redirect('/') return redirect('/')
# to process video for emotion detection # to process video for emotion detection
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def video(request): def video(request):
title = 'Student and Lecturer Performance Enhancement System' title = 'Student and Lecturer Performance Enhancement System'
video_name = request.GET.get('video_name') video_name = request.GET.get('video_name')
...@@ -310,7 +332,7 @@ def video(request): ...@@ -310,7 +332,7 @@ def video(request):
# extractor view # extractor view
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def extractor(request): def extractor(request):
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos')) folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)] videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
...@@ -358,7 +380,7 @@ def child(request): ...@@ -358,7 +380,7 @@ def child(request):
return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'}) return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'})
# displaying video results # displaying video results
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def video_result(request): def video_result(request):
try: try:
...@@ -434,7 +456,11 @@ def video_result(request): ...@@ -434,7 +456,11 @@ def video_result(request):
# append to the list # append to the list
due_lecture_list.append(obj) due_lecture_list.append(obj)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
print('what is wrong?: ', exc) print('what is wrong?: ', exc)
return redirect('/500') return redirect('/500')
...@@ -444,7 +470,7 @@ def video_result(request): ...@@ -444,7 +470,7 @@ def video_result(request):
# view for emotion page # view for emotion page
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def emotion_view(request): def emotion_view(request):
try: try:
...@@ -463,6 +489,11 @@ def emotion_view(request): ...@@ -463,6 +489,11 @@ def emotion_view(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -490,6 +521,7 @@ def loggedInView(request): ...@@ -490,6 +521,7 @@ def loggedInView(request):
login(request, user) login(request, user)
# setting up the session # setting up the session
request.session['lecturer'] = lecturer.id request.session['lecturer'] = lecturer.id
request.session['user_type'] = "Lecturer"
return redirect('/') return redirect('/')
...@@ -506,7 +538,7 @@ def logoutView(request): ...@@ -506,7 +538,7 @@ def logoutView(request):
logout(request) logout(request)
return redirect('/login') return redirect('/user-direct')
# 500 error page # 500 error page
...@@ -519,7 +551,7 @@ def tables(request): ...@@ -519,7 +551,7 @@ def tables(request):
return render(request, "FirstApp/tables.html") return render(request, "FirstApp/tables.html")
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def activity(request): def activity(request):
try: try:
...@@ -538,6 +570,11 @@ def activity(request): ...@@ -538,6 +570,11 @@ def activity(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exception
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -546,3 +583,60 @@ def activity(request): ...@@ -546,3 +583,60 @@ def activity(request):
def test(request): def test(request):
return render(request, "FirstApp/pdf_template.html") return render(request, "FirstApp/pdf_template.html")
# this method will handle user directing function
def userDirect(request):
return render(request, "FirstApp/user_direct.html")
# this method will handle user redirection process
def processUserRedirect(request):
if request.POST:
user_type = request.POST.get('user_type')
if user_type == 'admin':
return redirect('/admin-login')
elif user_type == 'lecturer':
return redirect('/login')
return redirect('/500')
# admin login page
def adminLogin(request):
return render(request, "FirstApp/admin_login.html")
# this method will process admin login
def processAdminLogin(request):
username = "not logged in"
message = "Invalid Username or Password"
adminLoginForm = AdminLoginForm(request.POST)
print('message: ', message)
try:
# if the details are valid, let the user log in
if adminLoginForm.is_valid():
email = adminLoginForm.cleaned_data.get('email')
user = User.objects.get(email=email)
admin = Admin.objects.get(email=email)
login(request, user)
# setting up the session
request.session['admin'] = admin.id
request.session['user_type'] = "Admin"
return redirect('/lecturer')
else:
message = "Please provide correct credntials"
except Exception as exc:
print('exception: ', exc)
return render(request, 'FirstApp/admin_login.html', {'message': message})
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment