Commit 539497be authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the addition of 401 page and full implementation of saving...

Committing the addition of 401 page and full implementation of saving activity, emotion and gaze estimations through a single process.
parent 745c0fb3
......@@ -300,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model):
# POSE section
# lecture pose estimation
# lecture gaze estimation
class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
......
......@@ -300,7 +300,6 @@ class LectureActivityProcess(APIView):
LectureActivity(
lecture_activity_id=new_lecture_activity_id,
lecture_video_id_id=lec_video_id,
talking_perct=percentages['talking_perct'],
phone_perct=percentages['phone_perct'],
listening_perct=percentages['listening_perct'],
writing_perct=percentages['writing_perct']
......@@ -473,16 +472,18 @@ class LectureEmotionProcess(APIView):
pass
def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
lecture_video_id = lec_video_data['id']
# creating a new lecture emotion report
LectureEmotionReport(
lecture_emotion_id=new_lecture_emotion_id,
lecture_video_id=lec_video,
lecture_video_id_id=lecture_video_id,
happy_perct=percentages.happy_perct,
sad_perct=percentages.sad_perct,
angry_perct=percentages.angry_perct,
......@@ -685,17 +686,23 @@ class ProcessLectureGazeEstimation(APIView):
pass
def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id)
new_lecture_gaze_primary_id = 1 if (last_lec_gaze is None) else int(last_lec_gaze.id) + 1
# get the video id
lecture_video_id = lec_video_data['id']
# creating a new lecture gaze estimation
LectureGazeEstimation(
id=new_lecture_gaze_primary_id,
lecture_gaze_id=new_lecture_gaze_id,
lecture_video_id=lec_video,
lecture_video_id_id=lecture_video_id,
looking_up_and_right_perct=percentages['head_up_right_perct'],
looking_up_and_left_perct=percentages['head_up_left_perct'],
looking_down_and_right_perct=percentages['head_down_right_perct'],
......@@ -723,7 +730,7 @@ class GetLectureGazeEstimationViewSet(APIView):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = hge.getExtractedFrames(lecture_video_name)
# extracted = hge.getExtractedFrames(lecture_video_name)
lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id)
......@@ -731,7 +738,7 @@ class GetLectureGazeEstimationViewSet(APIView):
return Response({
"response": serializer.data,
"extracted": extracted
# "extracted": extracted
})
......
......@@ -10,6 +10,7 @@ from .MongoModels import *
from . models import VideoMeta
from . logic import custom_sorter as cs
from .logic import id_generator as ig
from .logic import activity_recognition as ar
# emotion recognition method
......@@ -47,7 +48,6 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
path = ''
meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
......@@ -65,6 +65,9 @@ def detect_emotion(video):
count_neutral = 0
count_surprise = 0
# for testing purposes
print('starting the emotion recognition process')
while (count_frames < frame_count):
# Grab a single frame of video
ret, frame = cap.read()
......@@ -72,52 +75,34 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
label = emotion_recognition(classifier, face_classifier, frame)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
# counting the number of frames for each label, to calculate the percentage for each emotion later on...
# counting the number of frames for each label, to calculate the percentage for each emotion later on...
if (label == 'Anger'):
count_angry += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Anger')
# cv2.imwrite(os.path.join(path, 'Anger-{0}.jpg'.format(count)), frame)
if (label == 'Anger'):
count_angry += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Anger')
# cv2.imwrite(os.path.join(path, 'Anger-{0}.jpg'.format(count)), frame)
elif (label == 'Happy'):
count_happy += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Happy')
# cv2.imwrite(os.path.join(path, 'Happy-{0}.jpg'.format(count)), frame)
elif (label == 'Happy'):
count_happy += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Happy')
# cv2.imwrite(os.path.join(path, 'Happy-{0}.jpg'.format(count)), frame)
elif (label == 'Neutral'):
count_neutral += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Neutral')
# cv2.imwrite(os.path.join(path, 'Neutral-{0}.jpg'.format(count)), frame)
elif (label == 'Neutral'):
count_neutral += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Neutral')
# cv2.imwrite(os.path.join(path, 'Neutral-{0}.jpg'.format(count)), frame)
elif (label == 'Sad'):
count_sad += 1
elif (label == 'Sad'):
count_sad += 1
elif (label == 'Surprise'):
count_surprise += 1
elif (label == 'Surprise'):
count_surprise += 1
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# cv2.imwrite("".format(label, count), frame)
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# for testing purposes
print('emotion frame count: ', count_frames)
count_frames += 1
......@@ -132,6 +117,9 @@ def detect_emotion(video):
cap.release()
cv2.destroyAllWindows()
# for testing purposes
print('ending the emotion recognition process')
return meta_data
......@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will
def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables
frame_count = 0
......@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name):
# frame activity recognitions
frame_emotion_recognitions = []
# # class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
for frame in os.listdir(EXTRACTED_DIR):
# derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# for testing purposes
print('starting the emotion frame recognition process')
while (frame_count < no_of_frames):
ret, image = cap.read()
frame_name = "frame-{}".format(frame_count)
frame_details = {}
frame_details['frame_name'] = frame
frame_details['frame_name'] = frame_name
# initialize the count variables for a frame
happy_count = 0
......@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name):
neutral_count = 0
surprise_count = 0
# get the detections
detections = ar.person_detection(image, net)
# to count the extracted detections for a frame
detection_count = 0
for detections in os.listdir(FRAME_FOLDER):
# if there are detections
if (len(detections) > 0):
# loop through the detections
for detection in detections:
# only take the images with the student name
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image)
label = emotion_recognition(classifier, face_classifier, detection)
# checking for the label
if label == class_labels[0]:
......@@ -324,26 +331,40 @@ def get_frame_emotion_recognition(video_name):
# calculating the percentages for the frame
happy_perct = float(happy_count / detection_count) * 100 if detection_count > 0 else 0
sad_perct = float(sad_count / detection_count) * 100 if detection_count > 0 else 0
angry_perct = float(angry_count / detection_count) * 100 if detection_count > 0 else 0
neutral_perct = float(neutral_count / detection_count) * 100 if detection_count > 0 else 0
surprise_perct = float(surprise_count / detection_count) * 100 if detection_count > 0 else 0
# calculating the percentages for the frame
happy_perct = float(happy_count / detection_count) * 100 if detection_count > 0 else 0
sad_perct = float(sad_count / detection_count) * 100 if detection_count > 0 else 0
angry_perct = float(angry_count / detection_count) * 100 if detection_count > 0 else 0
neutral_perct = float(neutral_count / detection_count) * 100 if detection_count > 0 else 0
surprise_perct = float(surprise_count / detection_count) * 100 if detection_count > 0 else 0
# this dictionary will be returned
frame_details['happy_perct'] = happy_perct
frame_details['sad_perct'] = sad_perct
frame_details['angry_perct'] = angry_perct
frame_details['neutral_perct'] = neutral_perct
frame_details['surprise_perct'] = surprise_perct
# this dictionary will be returned
frame_details['happy_perct'] = happy_perct
frame_details['sad_perct'] = sad_perct
frame_details['angry_perct'] = angry_perct
frame_details['neutral_perct'] = neutral_perct
frame_details['surprise_perct'] = surprise_perct
# push to all the frame details
frame_emotion_recognitions.append(frame_details)
# push to all the frame details
frame_emotion_recognitions.append(frame_details)
else:
break
# for testing purposes
print('emotion frame recognition count: ', frame_count)
# increment the frame count
frame_count += 1
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes
print('ending the emotion frame recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions
......@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions):
# this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables
......@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
while (frame_count < no_of_frames):
# get the current frame
ret, image = cap.read()
# initializing the variables
happy_count = 0
......@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0
detection_count = 0
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
detections = ar.person_detection(image, net)
# if there are detections
if (len(detections) > 0):
# looping through the detections in each frame
for detection in detections:
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image)
label = emotion_recognition(classifier, face_classifier, detection)
# increment the count based on the label
if label == class_labels[0]:
......@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count
else:
break
# for testing purposes
print('emotion frame groupings count: ', frame_count)
# increment the frame count
frame_count += 1
......@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle some database operations
def save_frame_recognitions(video_name):
# for testing purposes
print('starting the saving emotion frame recognition process')
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
......@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name):
lec_emotion_frame_recognitions.save()
# for testing purposes
print('ending the saving emotion frame recognition process')
# now return the frame recognitions
return frame_detections
......@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name):
# this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving emotion frame grouoings process')
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db
......@@ -631,5 +681,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving emotion frame groupings process')
# save
new_lec_emotion_frame_groupings.save()
......@@ -50,38 +50,21 @@ def activity_recognition(video_path):
frame_count = 0
total_detections = 0
phone_checking_count = 0
talking_count = 0
note_taking_count = 0
listening_count = 0
# video activity directory
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path)
# creating the directory for the video
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
# for testing purposes
print('starting the activity recognition process')
while (frame_count < no_of_frames):
ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size)
detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2.imwrite(FRAME_IMG, image)
# this is for testing purposes
print('frame count: ', frame_count)
# if there are any person detections
if (len(detections) > 0):
......@@ -90,6 +73,7 @@ def activity_recognition(video_path):
detection_count = 0
# looping through the person detections of the frame
for detection in detections:
detection = cv2.resize(detection, size)
......@@ -113,43 +97,33 @@ def activity_recognition(video_path):
elif (label == class_labels[2]):
note_taking_count += 1
# saving the detection for the particular frame
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1
frame_count += 1
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
# talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct
percentages["talking_perct"] = talking_perct
# percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
# for testing purposes
print('activity recognition process is over')
return percentages
def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
threshold = 0.2
detected_person = []
......@@ -391,14 +365,27 @@ def get_student_activity_evaluation(video_name):
# recognize the activity for each frame
def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True)
# load the model
# class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
......@@ -407,45 +394,54 @@ def get_frame_activity_recognition(video_name):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0
# total_detections = 10
# frame activity recognitions
frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
while (frame_count < no_of_frames):
# define the count variables for each frame
phone_checking_count = 0
listening_count = 0
note_taking_count = 0
ret, image = video.read()
# derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
frame_name = "frame-{}".format(frame_count)
frame_details = {}
frame_details['frame_name'] = frame
frame_details['frame_name'] = frame_name
# to count the extracted detections for a frame
detection_count = 0
detected_percentages = []
# loop through each detection in the frame
for detection in os.listdir(FRAME_FOLDER):
detections = person_detection(image, net)
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# check whether the image is not the frame itself
if "frame" not in detection:
image = cv2.imread(DETECTION_PATH)
# if there are detections
if (len(detections) > 0):
image = cv2.resize(image, size)
# loop through each detection in the frame
for detection in detections:
image_array = np.asarray(image)
detection = cv2.resize(detection, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
......@@ -467,26 +463,40 @@ def get_frame_activity_recognition(video_name):
# increment the detection count
detection_count += 1
# calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
note_taking_perct = float(note_taking_count / detection_count) * 100 if detection_count > 0 else 0
# adding the percentage values to the frame details
frame_details['phone_perct'] = phone_checking_perct
frame_details['listening_perct'] = listening_perct
frame_details['note_perct'] = note_taking_perct
# calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
note_taking_perct = float(note_taking_count / detection_count) * 100 if detection_count > 0 else 0
# adding the percentage values to the frame details
frame_details['phone_perct'] = phone_checking_perct
frame_details['listening_perct'] = listening_perct
frame_details['note_perct'] = note_taking_perct
# push to all the frame details
frame_activity_recognitions.append(frame_details)
else:
break
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# push to all the frame details
frame_activity_recognitions.append(frame_details)
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions
# this method will retrieve individual student evaluation
def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
......@@ -753,6 +763,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle saving activity entities to the database
def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
......@@ -787,6 +801,9 @@ def save_frame_recognition(video_name):
lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections
return frame_detections
......@@ -794,6 +811,8 @@ def save_frame_recognition(video_name):
# this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving activity frame groupings process')
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
......@@ -825,5 +844,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving activity frame groupings process')
# save
new_lec_activity_frame_groupings.save()
......@@ -144,18 +144,10 @@ def process_gaze_estimation(video_path):
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze")
# create a folder with the same name as the video
VIDEO_DIR = os.path.join(GAZE_DIR, video_path)
# define a dictionary to return the percentage values
percentages = {}
# checking whether the video directory exist
if os.path.isdir(VIDEO_DIR):
shutil.rmtree(VIDEO_DIR)
# create the new directory
os.mkdir(VIDEO_DIR)
# load the face detection model
face_model = get_face_detector()
......@@ -202,6 +194,9 @@ def process_gaze_estimation(video_path):
[0, 0, 1]], dtype="double"
)
# for testing purposes
print('starting the gaze estimation process')
# iterate the video frames
while True:
ret, img = cap.read()
......@@ -285,35 +280,39 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1
elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1
elif isLookingFront:
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1
# indicate the student name
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count
face_count += 1
# naming the new image
image_name = "frame-{}.png".format(frame_count)
# new image path
image_path = os.path.join(VIDEO_DIR, image_name)
# image_name = "frame-{}.png".format(frame_count)
#
# # new image path
# image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image
cv2.imwrite(image_path, img)
# cv2.imwrite(image_path, img)
# for testing purposes
print('gaze estimation count: ', frame_count)
# increment the frame count
frame_count += 1
......@@ -323,8 +322,8 @@ def process_gaze_estimation(video_path):
# after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
......@@ -346,6 +345,9 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows()
cap.release()
# for testing purposes
print('ending the gaze estimation process')
# return the dictionary
return percentages
......@@ -370,7 +372,7 @@ def getExtractedFrames(lecture_video_name):
# this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_esrimation_for_frames(video_name):
def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
......@@ -422,6 +424,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
[0, 0, 1]], dtype="double"
)
# for testing purposes
print('starting the gaze estimation for frames process')
# iterate the video frames
while True:
ret, img = cap.read()
......@@ -551,6 +557,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# append the calculated percentages to the frame_detections
frame_detections.append(percentages)
# for testing purposes
print('gaze estimation frame recognition count: ', frame_count)
frame_count += 1
else:
......@@ -558,16 +567,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
# return the details
return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# declare variables to add percentage values
looking_up_right_perct_combined = 0.0
looking_up_left_perct_combined = 0.0
looking_down_right_perct_combined = 0.0
......@@ -601,16 +611,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# calculate the average percentages
looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_average_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1)
looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1)
looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1)
percentages = {}
percentages["looking_up_and_right_perct"] = looking_up_right_average_perct
percentages["looking_up_and_left_perct"] = looking_up_left_perct_combined
percentages["looking_down_and_right_perct"] = looking_down_right_perct_combined
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined
percentages["looking_up_and_left_perct"] = looking_up_left_average_perct
percentages["looking_down_and_right_perct"] = looking_down_right_average_perct
percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
......@@ -677,6 +687,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# for testing purposes
print('starting gaze frame grouping process')
# looping through the frames
while True:
......@@ -802,6 +814,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict[frame_name]['detection_count'] += detection_count
# for testing purposes
print('gaze frame groupings count: ', frame_count)
# increment the frame count
frame_count += 1
......@@ -848,12 +863,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# for testing purposes
print('ending gaze frame grouping process')
# return the dictionary
return frame_group_dict, labels
# this section will handle some database operations
def save_frame_detections(video_name):
# for testing purposes
print('starting the saving gaze frame recognition process')
# retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
......@@ -868,7 +891,7 @@ def save_frame_detections(video_name):
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name)
frame_detections, frame_rate = get_lecture_gaze_estimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = []
......@@ -892,6 +915,9 @@ def save_frame_detections(video_name):
lec_gaze_frame_recognitions.save()
# for testing purposes
print('ending the saving gaze frame recognition process')
# now return the frame recognitions
return frame_detections
......@@ -899,6 +925,10 @@ def save_frame_detections(video_name):
# this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving gaze frame groupings process')
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
......@@ -928,6 +958,9 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving gaze frame groupings process')
# save
new_lec_gaze_frame_groupings.save()
import os
import cv2
import shutil
import datetime
# import datetime
from datetime import timedelta
from FirstApp.MongoModels import *
from FirstApp.serializers import *
......@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name):
THRESHOLD_GAP = 5
# calculating the real duration
real_duration = datetime.timedelta(seconds=(duration+THRESHOLD_GAP))
real_duration = timedelta(seconds=(duration))
# defines the number of seconds included for a frame group
THRESHOLD_TIME = 10
......@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name):
# loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP):
initial_landmark += unit_gap
time_landmark = str(datetime.timedelta(seconds=initial_landmark))
time_landmark = str(timedelta(seconds=initial_landmark))
time_landmark_value = initial_landmark
time_landmarks.append(time_landmark)
time_landmarks_values.append(time_landmark_value)
......@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category):
# this section will handle some database operations
def save_time_landmarks(video_name):
# for testing purposes
print('starting the saving time landmarks process')
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
......@@ -233,12 +237,18 @@ def save_time_landmarks(video_name):
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
# for testing purposes
print('ending the saving time landmarks process')
new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database
def save_frame_landmarks(video_name):
# for testing purposes
print('starting the saving frame landmarks process')
# retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last()
......@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name):
new_lec_video_frame_landmarks.save()
# for testing purposes
print('ending the saving frame landmarks process')
# now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict
......
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- 404 Error Text -->
<div class="text-center">
<div class="error mx-auto" data-text="404">401</div>
<p class="lead text-gray-800 mb-5">Unauthorized access</p>
<p class="text-gray-500 mb-0">It looks like you do not have access to this url</p>
<p class="text-gray-500 mb-0">Please login with the correct user type</p>
<a href="/logout">&larr; Back to Login Page</a>
</div>
</div>
{% endblock %}
<!--end of container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
......@@ -216,6 +216,7 @@
//to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) {
//sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
......
......@@ -66,6 +66,8 @@
Interface
</div>
{% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
......@@ -83,6 +85,7 @@
</div>
</li>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
......@@ -97,6 +100,8 @@
</div>
</li>
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i>
......@@ -127,6 +132,8 @@
</div>
</li>
{% endif %}
<!-- Divider -->
<hr class="sidebar-divider">
......@@ -178,6 +185,8 @@
</div>
</ul>
<!-- End of Sidebar -->
<div id="content-wrapper" class="d-flex flex-column">
......
......@@ -14,6 +14,7 @@ urlpatterns = [
path('logout', views.logoutView),
path('register-user', views.register),
path('404', views.view404),
path('401', views.view401),
path('500', views.view500),
path('blank', views.blank),
path('gaze', views.gaze),
......
......@@ -109,100 +109,117 @@ class LectureViewSet(APIView):
####### VIEWS ######
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def hello(request):
username = request.user.username
# retrieve the lecturer
lecturer = request.session['lecturer']
# retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter()
# serialize the timetable
lecturer_timetable_serialized = FacultyTimetableSerializer(lecturer_timetable, many=True)
lecturer_details = []
# loop through the serialized timetable
for timetable in lecturer_timetable_serialized.data:
# retrieve daily timetable
daily_timetable = timetable['timetable']
# loop through the daily timetable
for day_timetable in daily_timetable:
date = ''
lecture_index = 0
try:
username = request.user.username
# retrieve the lecturer
lecturer = request.session['lecturer']
# loop through each timeslots
for slots in day_timetable:
user_type = request.session['user_type']
if slots == "date":
date = day_timetable[slots]
print('user_type: ', user_type)
elif slots == "time_slots":
slot = day_timetable[slots]
# retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter()
# loop through each slot
for lecture in slot:
# serialize the timetable
lecturer_timetable_serialized = FacultyTimetableSerializer(lecturer_timetable, many=True)
# check whether the lecturer is the current lecturer
if lecturer == lecture['lecturer']['id']:
lecturer_lecture_details = {}
lecturer_lecture_details['date'] = date
lecturer_lecture_details['start_time'] = lecture['start_time']
lecturer_lecture_details['end_time'] = lecture['end_time']
lecturer_lecture_details['subject_name'] = lecture['subject']['name']
lecturer_lecture_details['index'] = lecture_index
lecturer_lecture_details['lecturer'] = lecture['lecturer']['id']
# append to the lecturer_details
lecturer_details.append(lecturer_lecture_details)
lecturer_details = []
# increment the index
lecture_index += 1
# loop through the serialized timetable
for timetable in lecturer_timetable_serialized.data:
# sorting the dates in lecturer_details list
# for details in lecturer_details:
lecturer_details.sort(key=lambda date: datetime.strptime(str(date['date']), "%Y-%m-%d"), reverse=True)
# retrieve daily timetable
daily_timetable = timetable['timetable']
# loop through the daily timetable
for day_timetable in daily_timetable:
date = ''
lecture_index = 0
# loop through each timeslots
for slots in day_timetable:
if slots == "date":
date = day_timetable[slots]
elif slots == "time_slots":
slot = day_timetable[slots]
# loop through each slot
for lecture in slot:
# check whether the lecturer is the current lecturer
if lecturer == lecture['lecturer']['id']:
lecturer_lecture_details = {}
lecturer_lecture_details['date'] = date
lecturer_lecture_details['start_time'] = lecture['start_time']
lecturer_lecture_details['end_time'] = lecture['end_time']
lecturer_lecture_details['subject_name'] = lecture['subject']['name']
lecturer_lecture_details['index'] = lecture_index
lecturer_lecture_details['lecturer'] = lecture['lecturer']['id']
# append to the lecturer_details
lecturer_details.append(lecturer_lecture_details)
# increment the index
lecture_index += 1
# sorting the dates in lecturer_details list
# for details in lecturer_details:
lecturer_details.sort(key=lambda date: datetime.strptime(str(date['date']), "%Y-%m-%d"), reverse=True)
obj = {'Message': 'Student and Lecturer Performance Enhancement System', 'username': username}
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
videos = []
durations = []
#
# for videoPath in videoPaths:
# video = Video()
# cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# duration = int(frame_count / fps)
# durations.append(duration)
# videoName = os.path.basename(videoPath)
# # videoName = videos.append(os.path.basename(videoPath))
# durationObj = datetime.timedelta(seconds=duration)
# video.path = videoPath
# video.name = videoName
# video.duration = str(durationObj)
# videos.append(video)
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context)
# in case of keyerror exception
except KeyError as exc:
return redirect('/401')
obj = {'Message': 'Student and Lecturer Performance Enhancement System', 'username': username}
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
videos = []
durations = []
#
# for videoPath in videoPaths:
# video = Video()
# cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# duration = int(frame_count / fps)
# durations.append(duration)
# videoName = os.path.basename(videoPath)
# # videoName = videos.append(os.path.basename(videoPath))
# durationObj = datetime.timedelta(seconds=duration)
# video.path = videoPath
# video.name = videoName
# video.duration = str(durationObj)
# videos.append(video)
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context)
except Exception as exc:
return redirect('/500')
# this method will handle 404 error page
def view404(request):
return render(request, 'FirstApp/404.html')
# this page will handle 401 error page
def view401(request):
return render(request, 'FirstApp/401.html')
# querying the database
def blank(request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
return render(request, 'FirstApp/blank.html', {'details': emotions})
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def gaze(request):
try:
......@@ -221,6 +238,11 @@ def gaze(request):
subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc:
return redirect('/500')
......@@ -240,7 +262,7 @@ def processGaze(request):
# the corresponding view for pose estimation
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def pose(request):
try:
......@@ -295,7 +317,7 @@ def webcam(request):
return redirect('/')
# to process video for emotion detection
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def video(request):
title = 'Student and Lecturer Performance Enhancement System'
video_name = request.GET.get('video_name')
......@@ -310,7 +332,7 @@ def video(request):
# extractor view
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def extractor(request):
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
......@@ -358,7 +380,7 @@ def child(request):
return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'})
# displaying video results
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def video_result(request):
try:
......@@ -434,7 +456,11 @@ def video_result(request):
# append to the list
due_lecture_list.append(obj)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc:
print('what is wrong?: ', exc)
return redirect('/500')
......@@ -444,7 +470,7 @@ def video_result(request):
# view for emotion page
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def emotion_view(request):
try:
......@@ -463,6 +489,11 @@ def emotion_view(request):
subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc:
return redirect('/500')
......@@ -490,6 +521,7 @@ def loggedInView(request):
login(request, user)
# setting up the session
request.session['lecturer'] = lecturer.id
request.session['user_type'] = "Lecturer"
return redirect('/')
......@@ -519,7 +551,7 @@ def tables(request):
return render(request, "FirstApp/tables.html")
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def activity(request):
try:
......@@ -538,6 +570,11 @@ def activity(request):
subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exception
except Exception as exc:
return redirect('/500')
......@@ -593,6 +630,7 @@ def processAdminLogin(request):
login(request, user)
# setting up the session
request.session['admin'] = admin.id
request.session['user_type'] = "Admin"
return redirect('/summary/lecture')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment