Commit 539497be authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the addition of 401 page and full implementation of saving...

Committing the addition of 401 page and full implementation of saving activity, emotion and gaze estimations through a single process.
parent 745c0fb3
...@@ -300,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model): ...@@ -300,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model):
# POSE section # POSE section
# lecture pose estimation # lecture gaze estimation
class LectureGazeEstimation(models.Model): class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10) lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE) lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
......
...@@ -300,7 +300,6 @@ class LectureActivityProcess(APIView): ...@@ -300,7 +300,6 @@ class LectureActivityProcess(APIView):
LectureActivity( LectureActivity(
lecture_activity_id=new_lecture_activity_id, lecture_activity_id=new_lecture_activity_id,
lecture_video_id_id=lec_video_id, lecture_video_id_id=lec_video_id,
talking_perct=percentages['talking_perct'],
phone_perct=percentages['phone_perct'], phone_perct=percentages['phone_perct'],
listening_perct=percentages['listening_perct'], listening_perct=percentages['listening_perct'],
writing_perct=percentages['writing_perct'] writing_perct=percentages['writing_perct']
...@@ -473,16 +472,18 @@ class LectureEmotionProcess(APIView): ...@@ -473,16 +472,18 @@ class LectureEmotionProcess(APIView):
pass pass
def save_emotion_report(self, lec_video_id, percentages): def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id) lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last() last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id) new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
lecture_video_id = lec_video_data['id']
# creating a new lecture emotion report # creating a new lecture emotion report
LectureEmotionReport( LectureEmotionReport(
lecture_emotion_id=new_lecture_emotion_id, lecture_emotion_id=new_lecture_emotion_id,
lecture_video_id=lec_video, lecture_video_id_id=lecture_video_id,
happy_perct=percentages.happy_perct, happy_perct=percentages.happy_perct,
sad_perct=percentages.sad_perct, sad_perct=percentages.sad_perct,
angry_perct=percentages.angry_perct, angry_perct=percentages.angry_perct,
...@@ -685,17 +686,23 @@ class ProcessLectureGazeEstimation(APIView): ...@@ -685,17 +686,23 @@ class ProcessLectureGazeEstimation(APIView):
pass pass
def estimate_gaze(self, lec_video_id, percentages): def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id) lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last() last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True) lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0] lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id( new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id) last_lec_gaze.lecture_gaze_id)
new_lecture_gaze_primary_id = 1 if (last_lec_gaze is None) else int(last_lec_gaze.id) + 1
# get the video id
lecture_video_id = lec_video_data['id']
# creating a new lecture gaze estimation # creating a new lecture gaze estimation
LectureGazeEstimation( LectureGazeEstimation(
id=new_lecture_gaze_primary_id,
lecture_gaze_id=new_lecture_gaze_id, lecture_gaze_id=new_lecture_gaze_id,
lecture_video_id=lec_video, lecture_video_id_id=lecture_video_id,
looking_up_and_right_perct=percentages['head_up_right_perct'], looking_up_and_right_perct=percentages['head_up_right_perct'],
looking_up_and_left_perct=percentages['head_up_left_perct'], looking_up_and_left_perct=percentages['head_up_left_perct'],
looking_down_and_right_perct=percentages['head_down_right_perct'], looking_down_and_right_perct=percentages['head_down_right_perct'],
...@@ -723,7 +730,7 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -723,7 +730,7 @@ class GetLectureGazeEstimationViewSet(APIView):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name') lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames # retrieve the extracted frames
extracted = hge.getExtractedFrames(lecture_video_name) # extracted = hge.getExtractedFrames(lecture_video_name)
lecture_gaze_estimations = LectureGazeEstimation.objects.filter( lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id) lecture_video_id__lecture_video_id=lecture_video_id)
...@@ -731,7 +738,7 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -731,7 +738,7 @@ class GetLectureGazeEstimationViewSet(APIView):
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted # "extracted": extracted
}) })
......
...@@ -10,6 +10,7 @@ from .MongoModels import * ...@@ -10,6 +10,7 @@ from .MongoModels import *
from . models import VideoMeta from . models import VideoMeta
from . logic import custom_sorter as cs from . logic import custom_sorter as cs
from .logic import id_generator as ig from .logic import id_generator as ig
from .logic import activity_recognition as ar
# emotion recognition method # emotion recognition method
...@@ -47,7 +48,6 @@ def detect_emotion(video): ...@@ -47,7 +48,6 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
path = ''
meta_data = VideoMeta() meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
...@@ -65,6 +65,9 @@ def detect_emotion(video): ...@@ -65,6 +65,9 @@ def detect_emotion(video):
count_neutral = 0 count_neutral = 0
count_surprise = 0 count_surprise = 0
# for testing purposes
print('starting the emotion recognition process')
while (count_frames < frame_count): while (count_frames < frame_count):
# Grab a single frame of video # Grab a single frame of video
ret, frame = cap.read() ret, frame = cap.read()
...@@ -72,23 +75,7 @@ def detect_emotion(video): ...@@ -72,23 +75,7 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5) faces = face_classifier.detectMultiScale(gray,1.3,5)
label = emotion_recognition(classifier, face_classifier, frame)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
# counting the number of frames for each label, to calculate the percentage for each emotion later on... # counting the number of frames for each label, to calculate the percentage for each emotion later on...
...@@ -113,11 +100,9 @@ def detect_emotion(video): ...@@ -113,11 +100,9 @@ def detect_emotion(video):
elif (label == 'Surprise'): elif (label == 'Surprise'):
count_surprise += 1 count_surprise += 1
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3) # for testing purposes
# cv2.imwrite("".format(label, count), frame) print('emotion frame count: ', count_frames)
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
count_frames += 1 count_frames += 1
...@@ -132,6 +117,9 @@ def detect_emotion(video): ...@@ -132,6 +117,9 @@ def detect_emotion(video):
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
# for testing purposes
print('ending the emotion recognition process')
return meta_data return meta_data
...@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name): ...@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will # this method will
def get_frame_emotion_recognition(video_name): def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier( face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
frame_count = 0 frame_count = 0
...@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name): ...@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name):
# frame activity recognitions # frame activity recognitions
frame_emotion_recognitions = [] frame_emotion_recognitions = []
# # class labels # # class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
for frame in os.listdir(EXTRACTED_DIR):
# derive the frame folder path # for testing purposes
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) print('starting the emotion frame recognition process')
while (frame_count < no_of_frames):
ret, image = cap.read()
frame_name = "frame-{}".format(frame_count)
frame_details = {} frame_details = {}
frame_details['frame_name'] = frame frame_details['frame_name'] = frame_name
# initialize the count variables for a frame # initialize the count variables for a frame
happy_count = 0 happy_count = 0
...@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name): ...@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name):
neutral_count = 0 neutral_count = 0
surprise_count = 0 surprise_count = 0
# get the detections
detections = ar.person_detection(image, net)
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
for detections in os.listdir(FRAME_FOLDER): # if there are detections
if (len(detections) > 0):
# loop through the detections
for detection in detections:
# only take the images with the student name
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image) label = emotion_recognition(classifier, face_classifier, detection)
# checking for the label # checking for the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name): ...@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name):
# push to all the frame details # push to all the frame details
frame_emotion_recognitions.append(frame_details) frame_emotion_recognitions.append(frame_details)
else:
break
# for testing purposes
print('emotion frame recognition count: ', frame_count)
# increment the frame count
frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions) sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes
print('ending the emotion frame recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
...@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions): ...@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions):
# this method will retrieve activity frame groupings for a lecture # this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier( face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
...@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): while (frame_count < no_of_frames):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) # get the current frame
ret, image = cap.read()
# initializing the variables # initializing the variables
happy_count = 0 happy_count = 0
...@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0 neutral_count = 0
detection_count = 0 detection_count = 0
detections = ar.person_detection(image, net)
# if there are detections
if (len(detections) > 0):
# looping through the detections in each frame # looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER): for detection in detections:
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label # run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image) label = emotion_recognition(classifier, face_classifier, detection)
# increment the count based on the label # increment the count based on the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[frame_name]['neutral_count'] += neutral_count frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count frame_group_dict[frame_name]['detection_count'] += detection_count
else:
break
# for testing purposes
print('emotion frame groupings count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle some database operations # this section will handle some database operations
def save_frame_recognitions(video_name): def save_frame_recognitions(video_name):
# for testing purposes
print('starting the saving emotion frame recognition process')
# retrieve the lecture emotion id # retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name) lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True) lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
...@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name): ...@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name):
lec_emotion_frame_recognitions.save() lec_emotion_frame_recognitions.save()
# for testing purposes
print('ending the saving emotion frame recognition process')
# now return the frame recognitions # now return the frame recognitions
return frame_detections return frame_detections
...@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name): ...@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name):
# this method will save the emotion frame groupings to the database # this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving emotion frame grouoings process')
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict) frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db # save the frame group details into db
...@@ -631,5 +681,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -631,5 +681,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving emotion frame groupings process')
# save # save
new_lec_emotion_frame_groupings.save() new_lec_emotion_frame_groupings.save()
...@@ -50,38 +50,21 @@ def activity_recognition(video_path): ...@@ -50,38 +50,21 @@ def activity_recognition(video_path):
frame_count = 0 frame_count = 0
total_detections = 0 total_detections = 0
phone_checking_count = 0 phone_checking_count = 0
talking_count = 0
note_taking_count = 0 note_taking_count = 0
listening_count = 0 listening_count = 0
# video activity directory # for testing purposes
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path) print('starting the activity recognition process')
# creating the directory for the video
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
ret, image = video.read() ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size) image = cv2.resize(image, size)
detections = person_detection(image, net) detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # this is for testing purposes
# cv2.imwrite(FRAME_IMG, image) print('frame count: ', frame_count)
# if there are any person detections # if there are any person detections
if (len(detections) > 0): if (len(detections) > 0):
...@@ -90,6 +73,7 @@ def activity_recognition(video_path): ...@@ -90,6 +73,7 @@ def activity_recognition(video_path):
detection_count = 0 detection_count = 0
# looping through the person detections of the frame
for detection in detections: for detection in detections:
detection = cv2.resize(detection, size) detection = cv2.resize(detection, size)
...@@ -113,43 +97,33 @@ def activity_recognition(video_path): ...@@ -113,43 +97,33 @@ def activity_recognition(video_path):
elif (label == class_labels[2]): elif (label == class_labels[2]):
note_taking_count += 1 note_taking_count += 1
# saving the detection for the particular frame
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1 detection_count += 1
frame_count += 1 frame_count += 1
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label # calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0 phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0 # talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0 note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0 listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary # assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct percentages["phone_perct"] = phone_perct
percentages["talking_perct"] = talking_perct # percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct percentages["listening_perct"] = listening_perct
# for testing purposes
print('activity recognition process is over')
return percentages return percentages
def person_detection(image, net): def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
threshold = 0.2 threshold = 0.2
detected_person = [] detected_person = []
...@@ -391,14 +365,27 @@ def get_student_activity_evaluation(video_name): ...@@ -391,14 +365,27 @@ def get_student_activity_evaluation(video_name):
# recognize the activity for each frame # recognize the activity for each frame
def get_frame_activity_recognition(video_name): def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# load the model # class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam', model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
...@@ -407,45 +394,54 @@ def get_frame_activity_recognition(video_name): ...@@ -407,45 +394,54 @@ def get_frame_activity_recognition(video_name):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224) size = (224, 224)
# class labels # iteration
class_labels = ['Phone checking', 'Listening', 'Note taking'] video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0 frame_count = 0
# total_detections = 10
# frame activity recognitions # frame activity recognitions
frame_activity_recognitions = [] frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames # looping through the frames
for frame in os.listdir(EXTRACTED_DIR): while (frame_count < no_of_frames):
# define the count variables for each frame # define the count variables for each frame
phone_checking_count = 0 phone_checking_count = 0
listening_count = 0 listening_count = 0
note_taking_count = 0 note_taking_count = 0
ret, image = video.read()
# derive the frame folder path # derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame) # FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
frame_name = "frame-{}".format(frame_count)
frame_details = {} frame_details = {}
frame_details['frame_name'] = frame frame_details['frame_name'] = frame_name
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
detected_percentages = [] detected_percentages = []
# loop through each detection in the frame detections = person_detection(image, net)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# check whether the image is not the frame itself # if there are detections
if "frame" not in detection: if (len(detections) > 0):
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size) # loop through each detection in the frame
for detection in detections:
image_array = np.asarray(image) detection = cv2.resize(detection, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array # Load the image into the array
...@@ -467,6 +463,7 @@ def get_frame_activity_recognition(video_name): ...@@ -467,6 +463,7 @@ def get_frame_activity_recognition(video_name):
# increment the detection count # increment the detection count
detection_count += 1 detection_count += 1
# calculating the percentages for the frame # calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0 phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0 listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
...@@ -480,13 +477,26 @@ def get_frame_activity_recognition(video_name): ...@@ -480,13 +477,26 @@ def get_frame_activity_recognition(video_name):
# push to all the frame details # push to all the frame details
frame_activity_recognitions.append(frame_details) frame_activity_recognitions.append(frame_details)
else:
break
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions) sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
# this method will retrieve individual student evaluation # this method will retrieve individual student evaluation
def get_individual_student_evaluation(video_name, student_name): def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
...@@ -753,6 +763,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -753,6 +763,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle saving activity entities to the database # this section will handle saving activity entities to the database
def save_frame_recognition(video_name): def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id # retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name) lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True) lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
...@@ -787,6 +801,9 @@ def save_frame_recognition(video_name): ...@@ -787,6 +801,9 @@ def save_frame_recognition(video_name):
lec_activity_frame_recognitions.save() lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections # now return the frame detections
return frame_detections return frame_detections
...@@ -794,6 +811,8 @@ def save_frame_recognition(video_name): ...@@ -794,6 +811,8 @@ def save_frame_recognition(video_name):
# this method will save the activity frame groupings to the database # this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving activity frame groupings process')
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks, frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict) frame_group_dict)
...@@ -825,5 +844,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -825,5 +844,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving activity frame groupings process')
# save # save
new_lec_activity_frame_groupings.save() new_lec_activity_frame_groupings.save()
...@@ -144,18 +144,10 @@ def process_gaze_estimation(video_path): ...@@ -144,18 +144,10 @@ def process_gaze_estimation(video_path):
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path)) VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze") GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze")
# create a folder with the same name as the video
VIDEO_DIR = os.path.join(GAZE_DIR, video_path)
# define a dictionary to return the percentage values # define a dictionary to return the percentage values
percentages = {} percentages = {}
# checking whether the video directory exist
if os.path.isdir(VIDEO_DIR):
shutil.rmtree(VIDEO_DIR)
# create the new directory
os.mkdir(VIDEO_DIR)
# load the face detection model # load the face detection model
face_model = get_face_detector() face_model = get_face_detector()
...@@ -202,6 +194,9 @@ def process_gaze_estimation(video_path): ...@@ -202,6 +194,9 @@ def process_gaze_estimation(video_path):
[0, 0, 1]], dtype="double" [0, 0, 1]], dtype="double"
) )
# for testing purposes
print('starting the gaze estimation process')
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -285,35 +280,39 @@ def process_gaze_estimation(video_path): ...@@ -285,35 +280,39 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
if isLookingDown & isLookingRight: if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1 head_down_right_count += 1
elif isLookingDown & isLookingLeft: elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1 head_down_left_count += 1
elif isLookingUp & isLookingRight: elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1 head_up_right_count += 1
elif isLookingUp & isLookingLeft: elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1 head_up_left_count += 1
elif isLookingFront: elif isLookingFront:
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3) # cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1 head_front_count += 1
# indicate the student name # indicate the student name
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3) # cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count # increment the face count
face_count += 1 face_count += 1
# naming the new image # naming the new image
image_name = "frame-{}.png".format(frame_count) # image_name = "frame-{}.png".format(frame_count)
#
# new image path # # new image path
image_path = os.path.join(VIDEO_DIR, image_name) # image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image # save the new image
cv2.imwrite(image_path, img) # cv2.imwrite(image_path, img)
# for testing purposes
print('gaze estimation count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -323,8 +322,8 @@ def process_gaze_estimation(video_path): ...@@ -323,8 +322,8 @@ def process_gaze_estimation(video_path):
# after extracting the frames, save the changes to static content # after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w") # p = os.popen("python manage.py collectstatic", "w")
p.write("yes") # p.write("yes")
# calculate percentages # calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100 head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
...@@ -346,6 +345,9 @@ def process_gaze_estimation(video_path): ...@@ -346,6 +345,9 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows() cv2.destroyAllWindows()
cap.release() cap.release()
# for testing purposes
print('ending the gaze estimation process')
# return the dictionary # return the dictionary
return percentages return percentages
...@@ -370,7 +372,7 @@ def getExtractedFrames(lecture_video_name): ...@@ -370,7 +372,7 @@ def getExtractedFrames(lecture_video_name):
# this method will retrieve lecture gaze estimation for each frame # this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_esrimation_for_frames(video_name): def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory # get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
...@@ -422,6 +424,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -422,6 +424,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
[0, 0, 1]], dtype="double" [0, 0, 1]], dtype="double"
) )
# for testing purposes
print('starting the gaze estimation for frames process')
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -551,6 +557,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -551,6 +557,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# append the calculated percentages to the frame_detections # append the calculated percentages to the frame_detections
frame_detections.append(percentages) frame_detections.append(percentages)
# for testing purposes
print('gaze estimation frame recognition count: ', frame_count)
frame_count += 1 frame_count += 1
else: else:
...@@ -558,16 +567,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name): ...@@ -558,16 +567,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
# return the details
return frame_detections, frame_rate return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period # this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# declare variables to add percentage values
looking_up_right_perct_combined = 0.0 looking_up_right_perct_combined = 0.0
looking_up_left_perct_combined = 0.0 looking_up_left_perct_combined = 0.0
looking_down_right_perct_combined = 0.0 looking_down_right_perct_combined = 0.0
...@@ -601,16 +611,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): ...@@ -601,16 +611,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# calculate the average percentages # calculate the average percentages
looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1) looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1) looking_up_left_average_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1) looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1)
looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1) looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1)
looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1) looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1)
percentages = {} percentages = {}
percentages["looking_up_and_right_perct"] = looking_up_right_average_perct percentages["looking_up_and_right_perct"] = looking_up_right_average_perct
percentages["looking_up_and_left_perct"] = looking_up_left_perct_combined percentages["looking_up_and_left_perct"] = looking_up_left_average_perct
percentages["looking_down_and_right_perct"] = looking_down_right_perct_combined percentages["looking_down_and_right_perct"] = looking_down_right_average_perct
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
...@@ -677,6 +687,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -677,6 +687,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# assign the difference # assign the difference
frame_group_diff[key] = diff if diff > 0 else 1 frame_group_diff[key] = diff if diff > 0 else 1
# for testing purposes
print('starting gaze frame grouping process')
# looping through the frames # looping through the frames
while True: while True:
...@@ -802,6 +814,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -802,6 +814,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict[frame_name]['detection_count'] += detection_count frame_group_dict[frame_name]['detection_count'] += detection_count
# for testing purposes
print('gaze frame groupings count: ', frame_count)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -848,12 +863,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -848,12 +863,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# define the labels # define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct'] labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# for testing purposes
print('ending gaze frame grouping process')
# return the dictionary # return the dictionary
return frame_group_dict, labels return frame_group_dict, labels
# this section will handle some database operations # this section will handle some database operations
def save_frame_detections(video_name): def save_frame_detections(video_name):
# for testing purposes
print('starting the saving gaze frame recognition process')
# retrieve the lecture emotion id # retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name) lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True) lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
...@@ -868,7 +891,7 @@ def save_frame_detections(video_name): ...@@ -868,7 +891,7 @@ def save_frame_detections(video_name):
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id) ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections # calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name) frame_detections, frame_rate = get_lecture_gaze_estimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings # to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = [] frame_recognition_details = []
...@@ -892,6 +915,9 @@ def save_frame_detections(video_name): ...@@ -892,6 +915,9 @@ def save_frame_detections(video_name):
lec_gaze_frame_recognitions.save() lec_gaze_frame_recognitions.save()
# for testing purposes
print('ending the saving gaze frame recognition process')
# now return the frame recognitions # now return the frame recognitions
return frame_detections return frame_detections
...@@ -899,6 +925,10 @@ def save_frame_detections(video_name): ...@@ -899,6 +925,10 @@ def save_frame_detections(video_name):
# this method will save gaze frame groupings to the database # this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving gaze frame groupings process')
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict) frame_group_dict)
...@@ -928,6 +958,9 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -928,6 +958,9 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving gaze frame groupings process')
# save # save
new_lec_gaze_frame_groupings.save() new_lec_gaze_frame_groupings.save()
import os import os
import cv2 import cv2
import shutil import shutil
import datetime # import datetime
from datetime import timedelta
from FirstApp.MongoModels import * from FirstApp.MongoModels import *
from FirstApp.serializers import * from FirstApp.serializers import *
...@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name): ...@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name):
THRESHOLD_GAP = 5 THRESHOLD_GAP = 5
# calculating the real duration # calculating the real duration
real_duration = datetime.timedelta(seconds=(duration+THRESHOLD_GAP)) real_duration = timedelta(seconds=(duration))
# defines the number of seconds included for a frame group # defines the number of seconds included for a frame group
THRESHOLD_TIME = 10 THRESHOLD_TIME = 10
...@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name): ...@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name):
# loop through the threshold gap limit to define the time landmarks # loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP): for i in range(THRESHOLD_GAP):
initial_landmark += unit_gap initial_landmark += unit_gap
time_landmark = str(datetime.timedelta(seconds=initial_landmark)) time_landmark = str(timedelta(seconds=initial_landmark))
time_landmark_value = initial_landmark time_landmark_value = initial_landmark
time_landmarks.append(time_landmark) time_landmarks.append(time_landmark)
time_landmarks_values.append(time_landmark_value) time_landmarks_values.append(time_landmark_value)
...@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category): ...@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category):
# this section will handle some database operations # this section will handle some database operations
def save_time_landmarks(video_name): def save_time_landmarks(video_name):
# for testing purposes
print('starting the saving time landmarks process')
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last() last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \ new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id) ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
...@@ -233,12 +237,18 @@ def save_time_landmarks(video_name): ...@@ -233,12 +237,18 @@ def save_time_landmarks(video_name):
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
# for testing purposes
print('ending the saving time landmarks process')
new_lec_video_time_landmarks.save() new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database # this method will save frame landmarks to the database
def save_frame_landmarks(video_name): def save_frame_landmarks(video_name):
# for testing purposes
print('starting the saving frame landmarks process')
# retrieve the previous lecture video frame landmarks details # retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by( last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last() 'lecture_video_frame_landmarks_id').last()
...@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name): ...@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name):
new_lec_video_frame_landmarks.save() new_lec_video_frame_landmarks.save()
# for testing purposes
print('ending the saving frame landmarks process')
# now return the frame landmarks and the frame group dictionary # now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict return frame_landmarks, frame_group_dict
......
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- 404 Error Text -->
<div class="text-center">
<div class="error mx-auto" data-text="404">401</div>
<p class="lead text-gray-800 mb-5">Unauthorized access</p>
<p class="text-gray-500 mb-0">It looks like you do not have access to this url</p>
<p class="text-gray-500 mb-0">Please login with the correct user type</p>
<a href="/logout">&larr; Back to Login Page</a>
</div>
</div>
{% endblock %}
<!--end of container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
...@@ -216,6 +216,7 @@ ...@@ -216,6 +216,7 @@
//to handle the 'btn-success' (process) button //to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) { $(document).on('click', '.btn-success', function (e) {
//sending the POST request to process the lecture activities //sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id) fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json()) .then((res) => res.json())
......
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
Interface Interface
</div> </div>
{% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
...@@ -83,6 +85,7 @@ ...@@ -83,6 +85,7 @@
</div> </div>
</li> </li>
<!-- Nav Item - Pages Collapse Menu --> <!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
...@@ -97,6 +100,8 @@ ...@@ -97,6 +100,8 @@
</div> </div>
</li> </li>
<li class="nav-item"> <li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree"> <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i> <i class="fas fa-fw fa-cog"></i>
...@@ -127,6 +132,8 @@ ...@@ -127,6 +132,8 @@
</div> </div>
</li> </li>
{% endif %}
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider"> <hr class="sidebar-divider">
...@@ -178,6 +185,8 @@ ...@@ -178,6 +185,8 @@
</div> </div>
</ul> </ul>
<!-- End of Sidebar --> <!-- End of Sidebar -->
<div id="content-wrapper" class="d-flex flex-column"> <div id="content-wrapper" class="d-flex flex-column">
......
...@@ -14,6 +14,7 @@ urlpatterns = [ ...@@ -14,6 +14,7 @@ urlpatterns = [
path('logout', views.logoutView), path('logout', views.logoutView),
path('register-user', views.register), path('register-user', views.register),
path('404', views.view404), path('404', views.view404),
path('401', views.view401),
path('500', views.view500), path('500', views.view500),
path('blank', views.blank), path('blank', views.blank),
path('gaze', views.gaze), path('gaze', views.gaze),
......
...@@ -109,13 +109,18 @@ class LectureViewSet(APIView): ...@@ -109,13 +109,18 @@ class LectureViewSet(APIView):
####### VIEWS ###### ####### VIEWS ######
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def hello(request): def hello(request):
try:
username = request.user.username username = request.user.username
# retrieve the lecturer # retrieve the lecturer
lecturer = request.session['lecturer'] lecturer = request.session['lecturer']
user_type = request.session['user_type']
print('user_type: ', user_type)
# retrieve the lecturer's timetable slots # retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter() lecturer_timetable = FacultyTimetable.objects.filter()
...@@ -194,15 +199,27 @@ def hello(request): ...@@ -194,15 +199,27 @@ def hello(request):
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer} context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context) return render(request, 'FirstApp/Home.html', context)
# in case of keyerror exception
except KeyError as exc:
return redirect('/401')
except Exception as exc:
return redirect('/500')
# this method will handle 404 error page
def view404(request): def view404(request):
return render(request, 'FirstApp/404.html') return render(request, 'FirstApp/404.html')
# this page will handle 401 error page
def view401(request):
return render(request, 'FirstApp/401.html')
# querying the database # querying the database
def blank(request): def blank(request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id') emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
return render(request, 'FirstApp/blank.html', {'details': emotions}) return render(request, 'FirstApp/blank.html', {'details': emotions})
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def gaze(request): def gaze(request):
try: try:
...@@ -221,6 +238,11 @@ def gaze(request): ...@@ -221,6 +238,11 @@ def gaze(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -240,7 +262,7 @@ def processGaze(request): ...@@ -240,7 +262,7 @@ def processGaze(request):
# the corresponding view for pose estimation # the corresponding view for pose estimation
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def pose(request): def pose(request):
try: try:
...@@ -295,7 +317,7 @@ def webcam(request): ...@@ -295,7 +317,7 @@ def webcam(request):
return redirect('/') return redirect('/')
# to process video for emotion detection # to process video for emotion detection
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def video(request): def video(request):
title = 'Student and Lecturer Performance Enhancement System' title = 'Student and Lecturer Performance Enhancement System'
video_name = request.GET.get('video_name') video_name = request.GET.get('video_name')
...@@ -310,7 +332,7 @@ def video(request): ...@@ -310,7 +332,7 @@ def video(request):
# extractor view # extractor view
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def extractor(request): def extractor(request):
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos')) folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)] videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
...@@ -358,7 +380,7 @@ def child(request): ...@@ -358,7 +380,7 @@ def child(request):
return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'}) return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'})
# displaying video results # displaying video results
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def video_result(request): def video_result(request):
try: try:
...@@ -434,7 +456,11 @@ def video_result(request): ...@@ -434,7 +456,11 @@ def video_result(request):
# append to the list # append to the list
due_lecture_list.append(obj) due_lecture_list.append(obj)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
print('what is wrong?: ', exc) print('what is wrong?: ', exc)
return redirect('/500') return redirect('/500')
...@@ -444,7 +470,7 @@ def video_result(request): ...@@ -444,7 +470,7 @@ def video_result(request):
# view for emotion page # view for emotion page
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def emotion_view(request): def emotion_view(request):
try: try:
...@@ -463,6 +489,11 @@ def emotion_view(request): ...@@ -463,6 +489,11 @@ def emotion_view(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -490,6 +521,7 @@ def loggedInView(request): ...@@ -490,6 +521,7 @@ def loggedInView(request):
login(request, user) login(request, user)
# setting up the session # setting up the session
request.session['lecturer'] = lecturer.id request.session['lecturer'] = lecturer.id
request.session['user_type'] = "Lecturer"
return redirect('/') return redirect('/')
...@@ -519,7 +551,7 @@ def tables(request): ...@@ -519,7 +551,7 @@ def tables(request):
return render(request, "FirstApp/tables.html") return render(request, "FirstApp/tables.html")
@login_required(login_url='/login') @login_required(login_url='/user-direct')
def activity(request): def activity(request):
try: try:
...@@ -538,6 +570,11 @@ def activity(request): ...@@ -538,6 +570,11 @@ def activity(request):
subject_list.append(subject_serialized.data) subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exception
except Exception as exc: except Exception as exc:
return redirect('/500') return redirect('/500')
...@@ -593,6 +630,7 @@ def processAdminLogin(request): ...@@ -593,6 +630,7 @@ def processAdminLogin(request):
login(request, user) login(request, user)
# setting up the session # setting up the session
request.session['admin'] = admin.id request.session['admin'] = admin.id
request.session['user_type'] = "Admin"
return redirect('/summary/lecture') return redirect('/summary/lecture')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment