Commit 1b776b39 authored by I.K Seneviratne's avatar I.K Seneviratne

Merge branch 'monitoring_student_behavior_IT17138000' into 'QA_RELEASE'

Monitoring student behavior it17138000

See merge request !7
parents 865e853d 23d79aeb
......@@ -40,6 +40,15 @@ class Lecturer(models.Model):
return self.lecturer_id
# admin model
class Admin(models.Model):
admin_id = models.CharField(max_length=10)
name = models.CharField(max_length=20)
email = models.EmailField()
def __str__(self):
return self.admin_id
# Lecturer_subject model
class LecturerSubject(models.Model):
lec_subject_id = models.CharField(max_length=10)
......@@ -56,6 +65,12 @@ class LecturerCredentials(models.Model):
password = models.CharField(max_length=15)
# admin credential details
class AdminCredentialDetails(models.Model):
username = models.ForeignKey(Admin, on_delete=models.CASCADE)
password = models.CharField(max_length=15)
# timetable based on daily basis
class DailyTimeTable(models.Model):
slot_id = models.AutoField(auto_created=True, primary_key=True)
......@@ -285,7 +300,7 @@ class LectureEmotionFrameRecognitions(models.Model):
# POSE section
# lecture pose estimation
# lecture gaze estimation
class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
......
......@@ -13,3 +13,5 @@ admin.site.register(FacultyTimetable)
admin.site.register(LectureVideo)
admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation)
admin.site.register(Admin)
admin.site.register(AdminCredentialDetails)
\ No newline at end of file
......@@ -300,7 +300,6 @@ class LectureActivityProcess(APIView):
LectureActivity(
lecture_activity_id=new_lecture_activity_id,
lecture_video_id_id=lec_video_id,
talking_perct=percentages['talking_perct'],
phone_perct=percentages['phone_perct'],
listening_perct=percentages['listening_perct'],
writing_perct=percentages['writing_perct']
......@@ -473,16 +472,18 @@ class LectureEmotionProcess(APIView):
pass
def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
lecture_video_id = lec_video_data['id']
# creating a new lecture emotion report
LectureEmotionReport(
lecture_emotion_id=new_lecture_emotion_id,
lecture_video_id=lec_video,
lecture_video_id_id=lecture_video_id,
happy_perct=percentages.happy_perct,
sad_perct=percentages.sad_perct,
angry_perct=percentages.angry_perct,
......@@ -685,17 +686,23 @@ class ProcessLectureGazeEstimation(APIView):
pass
def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id)
new_lecture_gaze_primary_id = 1 if (last_lec_gaze is None) else int(last_lec_gaze.id) + 1
# get the video id
lecture_video_id = lec_video_data['id']
# creating a new lecture gaze estimation
LectureGazeEstimation(
id=new_lecture_gaze_primary_id,
lecture_gaze_id=new_lecture_gaze_id,
lecture_video_id=lec_video,
lecture_video_id_id=lecture_video_id,
looking_up_and_right_perct=percentages['head_up_right_perct'],
looking_up_and_left_perct=percentages['head_up_left_perct'],
looking_down_and_right_perct=percentages['head_down_right_perct'],
......@@ -723,7 +730,7 @@ class GetLectureGazeEstimationViewSet(APIView):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = hge.getExtractedFrames(lecture_video_name)
# extracted = hge.getExtractedFrames(lecture_video_name)
lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id)
......@@ -731,7 +738,7 @@ class GetLectureGazeEstimationViewSet(APIView):
return Response({
"response": serializer.data,
"extracted": extracted
# "extracted": extracted
})
......
......@@ -10,6 +10,7 @@ from .MongoModels import *
from . models import VideoMeta
from . logic import custom_sorter as cs
from .logic import id_generator as ig
from .logic import activity_recognition as ar
# emotion recognition method
......@@ -47,7 +48,6 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
path = ''
meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
......@@ -65,6 +65,9 @@ def detect_emotion(video):
count_neutral = 0
count_surprise = 0
# for testing purposes
print('starting the emotion recognition process')
while (count_frames < frame_count):
# Grab a single frame of video
ret, frame = cap.read()
......@@ -72,23 +75,7 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
label = emotion_recognition(classifier, face_classifier, frame)
# counting the number of frames for each label, to calculate the percentage for each emotion later on...
......@@ -113,11 +100,9 @@ def detect_emotion(video):
elif (label == 'Surprise'):
count_surprise += 1
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# cv2.imwrite("".format(label, count), frame)
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# for testing purposes
print('emotion frame count: ', count_frames)
count_frames += 1
......@@ -132,6 +117,9 @@ def detect_emotion(video):
cap.release()
cv2.destroyAllWindows()
# for testing purposes
print('ending the emotion recognition process')
return meta_data
......@@ -263,11 +251,24 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will
def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables
frame_count = 0
......@@ -276,16 +277,21 @@ def get_frame_emotion_recognition(video_name):
# frame activity recognitions
frame_emotion_recognitions = []
# # class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
for frame in os.listdir(EXTRACTED_DIR):
# derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# for testing purposes
print('starting the emotion frame recognition process')
while (frame_count < no_of_frames):
ret, image = cap.read()
frame_name = "frame-{}".format(frame_count)
frame_details = {}
frame_details['frame_name'] = frame
frame_details['frame_name'] = frame_name
# initialize the count variables for a frame
happy_count = 0
......@@ -294,18 +300,19 @@ def get_frame_emotion_recognition(video_name):
neutral_count = 0
surprise_count = 0
# get the detections
detections = ar.person_detection(image, net)
# to count the extracted detections for a frame
detection_count = 0
for detections in os.listdir(FRAME_FOLDER):
# if there are detections
if (len(detections) > 0):
# loop through the detections
for detection in detections:
# only take the images with the student name
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image)
label = emotion_recognition(classifier, face_classifier, detection)
# checking for the label
if label == class_labels[0]:
......@@ -341,9 +348,23 @@ def get_frame_emotion_recognition(video_name):
# push to all the frame details
frame_emotion_recognitions.append(frame_details)
else:
break
# for testing purposes
print('emotion frame recognition count: ', frame_count)
# increment the frame count
frame_count += 1
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes
print('ending the emotion frame recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions
......@@ -409,15 +430,28 @@ def get_student_emotion_summary_for_period(emotions):
# this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables
......@@ -441,9 +475,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
while (frame_count < no_of_frames):
# get the current frame
ret, image = cap.read()
# initializing the variables
happy_count = 0
......@@ -453,17 +489,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0
detection_count = 0
detections = ar.person_detection(image, net)
# if there are detections
if (len(detections) > 0):
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
for detection in detections:
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image)
label = emotion_recognition(classifier, face_classifier, detection)
# increment the count based on the label
if label == class_labels[0]:
......@@ -503,7 +539,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count
else:
break
# for testing purposes
print('emotion frame groupings count: ', frame_count)
# increment the frame count
frame_count += 1
......@@ -558,6 +598,10 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle some database operations
def save_frame_recognitions(video_name):
# for testing purposes
print('starting the saving emotion frame recognition process')
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
......@@ -595,6 +639,9 @@ def save_frame_recognitions(video_name):
lec_emotion_frame_recognitions.save()
# for testing purposes
print('ending the saving emotion frame recognition process')
# now return the frame recognitions
return frame_detections
......@@ -602,6 +649,9 @@ def save_frame_recognitions(video_name):
# this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving emotion frame grouoings process')
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db
......@@ -631,5 +681,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving emotion frame groupings process')
# save
new_lec_emotion_frame_groupings.save()
......@@ -58,3 +58,50 @@ class LecturerCredentialsForm(forms.ModelForm):
widgets = {
'password': forms.PasswordInput()
}
# admin login form
class AdminLoginForm(forms.Form):
# username = forms.CharField(max_length=100)
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
def clean(self):
# cleaned_username = self.cleaned_data.get('username')
cleaned_email = self.cleaned_data.get('email')
cleaned_password = self.cleaned_data.get('password')
admin = Admin.objects.get(email=cleaned_email)
# if an admin is already in the system
if (admin):
# retrieve the User object
user = User.objects.get(email=cleaned_email)
is_user = user.check_password(cleaned_password)
# if the password is correct
if (is_user):
# lec_credentials = LecturerCredentials.objects.filter(username_id=lecturer.id)
admin_credentials = AdminCredentialDetails.objects.get(username_id=admin.id)
print('credentials: ', admin_credentials)
# if lecture credentials are already created
if (admin_credentials):
admin_credentials.password = user.password
admin_credentials.save(force_update=True)
else:
LecturerCredentials(
username_id=admin.id,
password=user.password
).save()
else:
raise forms.ValidationError("Username or password is incorrect")
else:
print('the admin does not exist')
raise forms.ValidationError("The admin does not exist")
return super(AdminLoginForm, self).clean()
......@@ -50,38 +50,21 @@ def activity_recognition(video_path):
frame_count = 0
total_detections = 0
phone_checking_count = 0
talking_count = 0
note_taking_count = 0
listening_count = 0
# video activity directory
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path)
# creating the directory for the video
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
# for testing purposes
print('starting the activity recognition process')
while (frame_count < no_of_frames):
ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size)
detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2.imwrite(FRAME_IMG, image)
# this is for testing purposes
print('frame count: ', frame_count)
# if there are any person detections
if (len(detections) > 0):
......@@ -90,6 +73,7 @@ def activity_recognition(video_path):
detection_count = 0
# looping through the person detections of the frame
for detection in detections:
detection = cv2.resize(detection, size)
......@@ -113,43 +97,33 @@ def activity_recognition(video_path):
elif (label == class_labels[2]):
note_taking_count += 1
# saving the detection for the particular frame
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1
frame_count += 1
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
# talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct
percentages["talking_perct"] = talking_perct
# percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
# for testing purposes
print('activity recognition process is over')
return percentages
def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
threshold = 0.2
detected_person = []
......@@ -391,14 +365,27 @@ def get_student_activity_evaluation(video_name):
# recognize the activity for each frame
def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True)
# load the model
# class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
......@@ -407,45 +394,54 @@ def get_frame_activity_recognition(video_name):
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
frame_count = 0
# total_detections = 10
# frame activity recognitions
frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
while (frame_count < no_of_frames):
# define the count variables for each frame
phone_checking_count = 0
listening_count = 0
note_taking_count = 0
ret, image = video.read()
# derive the frame folder path
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
frame_name = "frame-{}".format(frame_count)
frame_details = {}
frame_details['frame_name'] = frame
frame_details['frame_name'] = frame_name
# to count the extracted detections for a frame
detection_count = 0
detected_percentages = []
# loop through each detection in the frame
for detection in os.listdir(FRAME_FOLDER):
detections = person_detection(image, net)
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# check whether the image is not the frame itself
if "frame" not in detection:
image = cv2.imread(DETECTION_PATH)
# if there are detections
if (len(detections) > 0):
image = cv2.resize(image, size)
# loop through each detection in the frame
for detection in detections:
image_array = np.asarray(image)
detection = cv2.resize(detection, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
......@@ -467,6 +463,7 @@ def get_frame_activity_recognition(video_name):
# increment the detection count
detection_count += 1
# calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
......@@ -480,13 +477,26 @@ def get_frame_activity_recognition(video_name):
# push to all the frame details
frame_activity_recognitions.append(frame_details)
else:
break
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions
# this method will retrieve individual student evaluation
def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
......@@ -753,6 +763,10 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this section will handle saving activity entities to the database
def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
......@@ -787,6 +801,9 @@ def save_frame_recognition(video_name):
lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections
return frame_detections
......@@ -794,6 +811,8 @@ def save_frame_recognition(video_name):
# this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving activity frame groupings process')
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
......@@ -825,5 +844,8 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving activity frame groupings process')
# save
new_lec_activity_frame_groupings.save()
......@@ -144,18 +144,10 @@ def process_gaze_estimation(video_path):
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze")
# create a folder with the same name as the video
VIDEO_DIR = os.path.join(GAZE_DIR, video_path)
# define a dictionary to return the percentage values
percentages = {}
# checking whether the video directory exist
if os.path.isdir(VIDEO_DIR):
shutil.rmtree(VIDEO_DIR)
# create the new directory
os.mkdir(VIDEO_DIR)
# load the face detection model
face_model = get_face_detector()
......@@ -202,6 +194,9 @@ def process_gaze_estimation(video_path):
[0, 0, 1]], dtype="double"
)
# for testing purposes
print('starting the gaze estimation process')
# iterate the video frames
while True:
ret, img = cap.read()
......@@ -285,35 +280,39 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1
elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1
elif isLookingFront:
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1
# indicate the student name
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count
face_count += 1
# naming the new image
image_name = "frame-{}.png".format(frame_count)
# new image path
image_path = os.path.join(VIDEO_DIR, image_name)
# image_name = "frame-{}.png".format(frame_count)
#
# # new image path
# image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image
cv2.imwrite(image_path, img)
# cv2.imwrite(image_path, img)
# for testing purposes
print('gaze estimation count: ', frame_count)
# increment the frame count
frame_count += 1
......@@ -323,8 +322,8 @@ def process_gaze_estimation(video_path):
# after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
......@@ -346,6 +345,9 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows()
cap.release()
# for testing purposes
print('ending the gaze estimation process')
# return the dictionary
return percentages
......@@ -370,7 +372,7 @@ def getExtractedFrames(lecture_video_name):
# this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_esrimation_for_frames(video_name):
def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
......@@ -422,6 +424,10 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
[0, 0, 1]], dtype="double"
)
# for testing purposes
print('starting the gaze estimation for frames process')
# iterate the video frames
while True:
ret, img = cap.read()
......@@ -551,6 +557,9 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# append the calculated percentages to the frame_detections
frame_detections.append(percentages)
# for testing purposes
print('gaze estimation frame recognition count: ', frame_count)
frame_count += 1
else:
......@@ -558,16 +567,17 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
# return the details
return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# declare variables to add percentage values
looking_up_right_perct_combined = 0.0
looking_up_left_perct_combined = 0.0
looking_down_right_perct_combined = 0.0
......@@ -601,16 +611,16 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# calculate the average percentages
looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_average_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1)
looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1)
looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1)
percentages = {}
percentages["looking_up_and_right_perct"] = looking_up_right_average_perct
percentages["looking_up_and_left_perct"] = looking_up_left_perct_combined
percentages["looking_down_and_right_perct"] = looking_down_right_perct_combined
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined
percentages["looking_up_and_left_perct"] = looking_up_left_average_perct
percentages["looking_down_and_right_perct"] = looking_down_right_average_perct
percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
......@@ -677,6 +687,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# for testing purposes
print('starting gaze frame grouping process')
# looping through the frames
while True:
......@@ -802,6 +814,9 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict[frame_name]['detection_count'] += detection_count
# for testing purposes
print('gaze frame groupings count: ', frame_count)
# increment the frame count
frame_count += 1
......@@ -848,12 +863,20 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# for testing purposes
print('ending gaze frame grouping process')
# return the dictionary
return frame_group_dict, labels
# this section will handle some database operations
def save_frame_detections(video_name):
# for testing purposes
print('starting the saving gaze frame recognition process')
# retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
......@@ -868,7 +891,7 @@ def save_frame_detections(video_name):
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name)
frame_detections, frame_rate = get_lecture_gaze_estimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = []
......@@ -892,6 +915,9 @@ def save_frame_detections(video_name):
lec_gaze_frame_recognitions.save()
# for testing purposes
print('ending the saving gaze frame recognition process')
# now return the frame recognitions
return frame_detections
......@@ -899,6 +925,10 @@ def save_frame_detections(video_name):
# this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
print('starting the saving gaze frame groupings process')
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
......@@ -928,6 +958,9 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# for testing purposes
print('ending the saving gaze frame groupings process')
# save
new_lec_gaze_frame_groupings.save()
import os
import cv2
import shutil
import datetime
# import datetime
from datetime import timedelta
from FirstApp.MongoModels import *
from FirstApp.serializers import *
......@@ -94,7 +95,7 @@ def getTimeLandmarks(video_name):
THRESHOLD_GAP = 5
# calculating the real duration
real_duration = datetime.timedelta(seconds=(duration+THRESHOLD_GAP))
real_duration = timedelta(seconds=(duration))
# defines the number of seconds included for a frame group
THRESHOLD_TIME = 10
......@@ -112,7 +113,7 @@ def getTimeLandmarks(video_name):
# loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP):
initial_landmark += unit_gap
time_landmark = str(datetime.timedelta(seconds=initial_landmark))
time_landmark = str(timedelta(seconds=initial_landmark))
time_landmark_value = initial_landmark
time_landmarks.append(time_landmark)
time_landmarks_values.append(time_landmark_value)
......@@ -204,6 +205,9 @@ def getFrameLandmarks(video_name, category):
# this section will handle some database operations
def save_time_landmarks(video_name):
# for testing purposes
print('starting the saving time landmarks process')
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
......@@ -233,12 +237,18 @@ def save_time_landmarks(video_name):
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
# for testing purposes
print('ending the saving time landmarks process')
new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database
def save_frame_landmarks(video_name):
# for testing purposes
print('starting the saving frame landmarks process')
# retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last()
......@@ -271,6 +281,9 @@ def save_frame_landmarks(video_name):
new_lec_video_frame_landmarks.save()
# for testing purposes
print('ending the saving frame landmarks process')
# now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict
......
# Generated by Django 2.2.11 on 2020-10-20 16:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0014_lecturegazeframerecognitions'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin_id', models.CharField(max_length=10)),
('name', models.CharField(max_length=20)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='AdminCredentialDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=15)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Admin')),
],
),
migrations.DeleteModel(
name='LecturePoseEstimation',
),
]
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- 404 Error Text -->
<div class="text-center">
<div class="error mx-auto" data-text="404">401</div>
<p class="lead text-gray-800 mb-5">Unauthorized access</p>
<p class="text-gray-500 mb-0">It looks like you do not have access to this url</p>
<p class="text-gray-500 mb-0">Please login with the correct user type</p>
<a href="/logout">&larr; Back to Login Page</a>
</div>
</div>
{% endblock %}
<!--end of container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
......@@ -742,7 +742,7 @@
//to handle the 'integrate' modal
$('#integrate_gaze').click(function () {
$('#integrate_activity').click(function () {
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
......@@ -1215,66 +1215,6 @@
</div>
<!--2nd column -->
{# <div class="col-lg-6">#}
{# <!--card content -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#}
{# </div>#}
{##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#}
{# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# </div>#}
{# </div>#}
<!--detection person card -->
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by activity#}
{# type)</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#}
{# </div>#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{##}
{# </div>#}
{# </div>#}
{# </div>#}
<!--2nd column -->
<div class="col-lg-6">
<!--card -->
......@@ -1292,7 +1232,7 @@
<!--button -->
<div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_gaze">
<button type="button" class="btn btn-outline-success" id="integrate_activity">
Process
</button>
</div>
......
{% load static %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block">
<img src="{% static 'FirstApp/images/admin.jpg' %}" width="400" height="600"
alt="No image">
</div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Welcome Back!</h1>
</div>
<!--form -->
<form action="/process-admin-login" method="POST" name="loginForm" class="user">
{% csrf_token %}
<div class="form-group">
<input type="email" name="email" class="form-control form-control-user"
id="exampleInputEmail" aria-describedby="emailHelp"
placeholder="Enter Email Address...">
</div>
<div class="form-group">
<input type="password" name="password" class="form-control form-control-user"
id="exampleInputPassword" placeholder="Password">
<div class="alert alert-danger m-4">{{ message }}</div>
</div>
<div class="form-group">
<div class="custom-control custom-checkbox small">
<input type="checkbox" class="custom-control-input" id="customCheck">
<label class="custom-control-label" for="customCheck">Remember Me</label>
</div>
</div>
<button type="submit" class="btn btn-primary btn-user btn-block">Login</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
......@@ -216,6 +216,7 @@
//to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) {
//sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
......
......@@ -66,6 +66,8 @@
Interface
</div>
{% if request.session.user_type == "Lecturer" %}
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
......@@ -83,6 +85,7 @@
</div>
</li>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
......@@ -97,6 +100,8 @@
</div>
</li>
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i>
......@@ -127,6 +132,8 @@
</div>
</li>
{% endif %}
<!-- Divider -->
<hr class="sidebar-divider">
......@@ -178,6 +185,8 @@
</div>
</ul>
<!-- End of Sidebar -->
<div id="content-wrapper" class="d-flex flex-column">
......
{% load static %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block">
<img src="{% static 'FirstApp/images/user_redirect.png' %}" width="400" height="500"
alt="No image">
</div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Select the user type</h1>
</div>
<!--form -->
<form action="/process-user-redirect" method="POST" name="loginForm" class="user">
{% csrf_token %}
<div class="form-check mx-3">
<input class="form-check-input" type="radio" name="user_type"
id="admin" value="admin" checked>
<label class="form-check-label" for="admin">
Admin
</label>
</div>
<div style="padding-top: 20px">
<div class="form-check mx-3">
<input class="form-check-input" type="radio" name="user_type"
id="lecturer" value="lecturer">
<label class="form-check-label" for="lecturer">
Lecturer
</label>
</div>
<div style="padding-top: 20px">
<button type="submit" class="btn btn-primary btn-user btn-block">Proceed</button>
<hr>
</form>
<hr>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
......@@ -14,6 +14,7 @@ urlpatterns = [
path('logout', views.logoutView),
path('register-user', views.register),
path('404', views.view404),
path('401', views.view401),
path('500', views.view500),
path('blank', views.blank),
path('gaze', views.gaze),
......@@ -32,10 +33,20 @@ urlpatterns = [
# video results
path('video_result', views.video_result),
# this is used for login
# this is used to process login
path('process-login', views.loggedInView),
# this is used for login
# this is used to process admin login
path('process-admin-login', views.processAdminLogin),
# this is used for user-redirect processing
path('process-user-redirect', views.processUserRedirect),
# this is used for admin login page
path('admin-login', views.adminLogin),
# this is used for activity
path('activity', views.activity),
# tables view
......@@ -44,6 +55,10 @@ urlpatterns = [
# test view (delete later)
path('test', views.test),
# user direct view
path('user-direct', views.userDirect),
url(r'^register', views.RegisterViewSet),
# re_path('video/?video_name<str:video_name>', views.video),
url(r'^teachers/', views.teachersList.as_view()),
......
......@@ -109,13 +109,18 @@ class LectureViewSet(APIView):
####### VIEWS ######
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def hello(request):
try:
username = request.user.username
# retrieve the lecturer
lecturer = request.session['lecturer']
user_type = request.session['user_type']
print('user_type: ', user_type)
# retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter()
......@@ -194,15 +199,27 @@ def hello(request):
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context)
# in case of keyerror exception
except KeyError as exc:
return redirect('/401')
except Exception as exc:
return redirect('/500')
# this method will handle 404 error page
def view404(request):
return render(request, 'FirstApp/404.html')
# this page will handle 401 error page
def view401(request):
return render(request, 'FirstApp/401.html')
# querying the database
def blank(request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
return render(request, 'FirstApp/blank.html', {'details': emotions})
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def gaze(request):
try:
......@@ -221,6 +238,11 @@ def gaze(request):
subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc:
return redirect('/500')
......@@ -240,7 +262,7 @@ def processGaze(request):
# the corresponding view for pose estimation
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def pose(request):
try:
......@@ -295,7 +317,7 @@ def webcam(request):
return redirect('/')
# to process video for emotion detection
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def video(request):
title = 'Student and Lecturer Performance Enhancement System'
video_name = request.GET.get('video_name')
......@@ -310,7 +332,7 @@ def video(request):
# extractor view
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def extractor(request):
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
......@@ -358,7 +380,7 @@ def child(request):
return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'})
# displaying video results
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def video_result(request):
try:
......@@ -434,7 +456,11 @@ def video_result(request):
# append to the list
due_lecture_list.append(obj)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc:
print('what is wrong?: ', exc)
return redirect('/500')
......@@ -444,7 +470,7 @@ def video_result(request):
# view for emotion page
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def emotion_view(request):
try:
......@@ -463,6 +489,11 @@ def emotion_view(request):
subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exceptions
except Exception as exc:
return redirect('/500')
......@@ -490,6 +521,7 @@ def loggedInView(request):
login(request, user)
# setting up the session
request.session['lecturer'] = lecturer.id
request.session['user_type'] = "Lecturer"
return redirect('/')
......@@ -506,7 +538,7 @@ def logoutView(request):
logout(request)
return redirect('/login')
return redirect('/user-direct')
# 500 error page
......@@ -519,7 +551,7 @@ def tables(request):
return render(request, "FirstApp/tables.html")
@login_required(login_url='/login')
@login_required(login_url='/user-direct')
def activity(request):
try:
......@@ -538,6 +570,11 @@ def activity(request):
subject_list.append(subject_serialized.data)
# handling the keyError
except KeyError as exc:
return redirect('/401')
# handling the general exception
except Exception as exc:
return redirect('/500')
......@@ -546,3 +583,60 @@ def activity(request):
def test(request):
return render(request, "FirstApp/pdf_template.html")
# this method will handle user directing function
def userDirect(request):
return render(request, "FirstApp/user_direct.html")
# this method will handle user redirection process
def processUserRedirect(request):
if request.POST:
user_type = request.POST.get('user_type')
if user_type == 'admin':
return redirect('/admin-login')
elif user_type == 'lecturer':
return redirect('/login')
return redirect('/500')
# admin login page
def adminLogin(request):
return render(request, "FirstApp/admin_login.html")
# this method will process admin login
def processAdminLogin(request):
username = "not logged in"
message = "Invalid Username or Password"
adminLoginForm = AdminLoginForm(request.POST)
print('message: ', message)
try:
# if the details are valid, let the user log in
if adminLoginForm.is_valid():
email = adminLoginForm.cleaned_data.get('email')
user = User.objects.get(email=email)
admin = Admin.objects.get(email=email)
login(request, user)
# setting up the session
request.session['admin'] = admin.id
request.session['user_type'] = "Admin"
return redirect('/summary/lecture')
else:
message = "Please provide correct credntials"
except Exception as exc:
print('exception: ', exc)
return render(request, 'FirstApp/admin_login.html', {'message': message})
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment