Commit 3a6e7948 authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the full implementation of the lecture Gaze estimation frame...

Committing the full implementation of the lecture Gaze estimation frame groupings in the lecturer Home page.
parent 1653846c
...@@ -1027,7 +1027,7 @@ class GetLectureEmotionSummary(APIView): ...@@ -1027,7 +1027,7 @@ class GetLectureEmotionSummary(APIView):
# new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \ # new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
# ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id) # ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
# #
# # retrieve the lecture activity id # # retrieve the lecture emotion id
# lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name) # lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
# lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True) # lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
# lec_emotion_id = lec_emotion_ser.data[0]['id'] # lec_emotion_id = lec_emotion_ser.data[0]['id']
...@@ -1067,7 +1067,7 @@ class GetLectureGazeSummary(APIView): ...@@ -1067,7 +1067,7 @@ class GetLectureGazeSummary(APIView):
video_name = request.query_params.get('video_name') video_name = request.query_params.get('video_name')
# checking the existence of lecture activity frame grouping records in the db # checking the existence of lecture activity frame grouping records in the db
isExist = LectureGazeFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists() isExist = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
if (isExist): if (isExist):
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name) # frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
...@@ -1084,11 +1084,12 @@ class GetLectureGazeSummary(APIView): ...@@ -1084,11 +1084,12 @@ class GetLectureGazeSummary(APIView):
for landmark in retrieved_frame_landmarks: for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark']) frame_landmarks.append(landmark['landmark'])
# retrieve the frame groupings
lec_gaze_frame_groupings = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name) lec_gaze_frame_groupings = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name)
lec_gaze_frame_groupings_ser = LectureGazeFrameGroupingsSerializer(lec_gaze_frame_groupings, many=True) lec_gaze_frame_groupings_ser = LectureGazeFrameGroupingsSerializer(lec_gaze_frame_groupings, many=True)
lec_gaze_frame_groupings_data = lec_gaze_frame_groupings_ser.data[0] lec_gaze_frame_groupings_data = lec_gaze_frame_groupings_ser.data[0]
# take the frame group details out of it
frame_group_details = lec_gaze_frame_groupings_data["frame_group_details"] frame_group_details = lec_gaze_frame_groupings_data["frame_group_details"]
...@@ -1097,87 +1098,69 @@ class GetLectureGazeSummary(APIView): ...@@ -1097,87 +1098,69 @@ class GetLectureGazeSummary(APIView):
frame_group_percentages[group['frame_group']] = group['frame_group_percentages'] frame_group_percentages[group['frame_group']] = group['frame_group_percentages']
class_labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
class_labels = ['phone_perct', 'listen_perct', 'note_perct']
return Response({ return Response({
"frame_landmarks": frame_landmarks, "frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages, "frame_group_percentages": frame_group_percentages,
"activity_labels": class_labels "gaze_labels": class_labels
}) })
# else: else:
#
# # retrieve the previous lecture video frame landmarks details frame_landmarks = []
# last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
# 'lecture_video_frame_landmarks_id').last() # retrieve frame landmarks from db
# new_lecture_video_frame_landmarks_id = "LVFL00001" if (last_lec_video_frame_landmarks is None) else \ lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
# ig.generate_new_id(last_lec_video_frame_landmarks.lecture_video_frame_landmarks_id) lecture_video_id__video_name=video_name)
# lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
# lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
# frame_group_percentages, activity_labels = ar.activity_frame_groupings(video_name, frame_landmarks, frame_group_dict) retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
#
# # creating a new list to display in the frontend
# # retrieve lecture video details for landmark in retrieved_frame_landmarks:
# lec_video = LectureVideo.objects.filter(video_name=video_name) frame_landmarks.append(int(landmark['landmark']))
# lec_video_ser = LectureVideoSerializer(lec_video, many=True)
# lec_video_id = lec_video_ser.data[0]['id']
# l, frame_group_dict = ve.getFrameLandmarks(video_name, "Gaze")
# print('frame group dict: ', frame_group_dict)
# # save the frame landmarks details into db (temp method) frame_group_percentages, gaze_labels = hge.gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# db_frame_landmarks = []
# # save the frame group details into db (temp method)
# for landmark in frame_landmarks:
# landmark_obj = Landmarks() last_lec_gaze_frame_grouping = LectureGazeFrameGroupings.objects.order_by('lecture_gaze_frame_groupings_id').last()
# landmark_obj.landmark = landmark new_lecture_gaze_frame_grouping_id = "LGFG00001" if (last_lec_gaze_frame_grouping is None) else \
# ig.generate_new_id(last_lec_gaze_frame_grouping.lecture_gaze_frame_groupings_id)
# db_frame_landmarks.append(landmark_obj)
# # retrieve the lecture activity id
# lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
# new_lec_video_frame_landmarks = LectureVideoFrameLandmarks() lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
# new_lec_video_frame_landmarks.lecture_video_frame_landmarks_id = new_lecture_video_frame_landmarks_id lec_gaze_id = lec_gaze_ser.data[0]['id']
# new_lec_video_frame_landmarks.lecture_video_id_id = lec_video_id
# new_lec_video_frame_landmarks.frame_landmarks = db_frame_landmarks # create the frame group details
# frame_group_details = []
# new_lec_video_frame_landmarks.save()
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails'
# lec_gaze_frame_group_details = LectureGazeFrameGroupDetails()
# # save the frame group details into db (temp method) lec_gaze_frame_group_details.frame_group = key
# lec_gaze_frame_group_details.frame_group_percentages = frame_group_percentages[key]
# last_lec_activity_frame_grouping = LectureActivityFrameGroupings.objects.order_by('lecture_activity_frame_groupings_id').last()
# new_lecture_activity_frame_grouping_id = "LAFG00001" if (last_lec_activity_frame_grouping is None) else \ frame_group_details.append(lec_gaze_frame_group_details)
# ig.generate_new_id(last_lec_activity_frame_grouping.lecture_activity_frame_groupings_id)
#
# # retrieve the lecture activity id new_lec_gaze_frame_groupings = LectureGazeFrameGroupings()
# lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name) new_lec_gaze_frame_groupings.lecture_gaze_frame_groupings_id = new_lecture_gaze_frame_grouping_id
# lec_activity_ser = LectureActivitySerializer(lec_activity, many=True) new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
# lec_activity_id = lec_activity_ser.data[0]['id'] new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
#
# # create the frame group details # save
# frame_group_details = [] new_lec_gaze_frame_groupings.save()
#
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails' return Response({
# lec_activity_frame_group_details = LectureActivityFrameGroupDetails() "frame_landmarks": frame_landmarks,
# lec_activity_frame_group_details.frame_group = key "frame_group_percentages": frame_group_percentages,
# lec_activity_frame_group_details.frame_group_percentages = frame_group_percentages[key] "gaze_labels": gaze_labels
# })
# frame_group_details.append(lec_activity_frame_group_details) \ No newline at end of file
#
#
# new_lec_activity_frame_groupings = LectureActivityFrameGroupings()
# new_lec_activity_frame_groupings.lecture_activity_frame_groupings_id = new_lecture_activity_frame_grouping_id
# new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
# new_lec_activity_frame_groupings.frame_group_details = frame_group_details
#
# # save
# new_lec_activity_frame_groupings.save()
#
#
# return Response({
# "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages,
# "activity_labels": activity_labels
# })
\ No newline at end of file
...@@ -618,6 +618,7 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -618,6 +618,7 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name)) EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name)) VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
print('video path: ', VIDEO_PATH)
# load the face detection model # load the face detection model
face_model = get_face_detector() face_model = get_face_detector()
...@@ -626,9 +627,7 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -626,9 +627,7 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
cap = cv2.VideoCapture(VIDEO_PATH) cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read() ret, img = cap.read()
size = img.shape size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points. # 3D model points.
model_points = np.array([ model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip (0.0, 0.0, 0.0), # Nose tip
...@@ -639,8 +638,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -639,8 +638,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
(150.0, -150.0, -125.0) # Right mouth corner (150.0, -150.0, -125.0) # Right mouth corner
]) ])
# define a variable to count the frames
frame_count = 0
# set a threshold angle # set a threshold angle
# THRESHOLD = 15 # THRESHOLD = 15
...@@ -662,8 +659,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -662,8 +659,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# initializing the count variables # initializing the count variables
frame_count = 0 frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group # get the frame differences for each frame group
frame_group_diff = {} frame_group_diff = {}
...@@ -699,7 +694,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -699,7 +694,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# find the number of faces # find the number of faces
faces = find_faces(img, face_model) faces = find_faces(img, face_model)
student_count = 0
# iterate through each detected face # iterate through each detected face
for face in faces: for face in faces:
...@@ -711,8 +705,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -711,8 +705,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
isLookingLeft = False isLookingLeft = False
isLookingFront = False isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates # retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face) marks, facebox = detect_marks(img, landmark_model, face)
...@@ -779,61 +771,78 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -779,61 +771,78 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
elif isLookingFront: elif isLookingFront:
head_front_count += 1 head_front_count += 1
# increment the face count
face_count += 1
# increment the detection count # increment the detection count
detection_count += 1 detection_count += 1
# finding the time landmark that the current frame is in # finding the time landmark that the current frame is in
for i in frame_landmarks: for i in frame_landmarks:
index = frame_landmarks.index(i) index = frame_landmarks.index(i)
j = index + 1 j = index + 1
# checking whether the next index is within the range # checking whether the next index is within the range
if j < len(frame_landmarks): if j < len(frame_landmarks):
next_value = frame_landmarks[j] next_value = frame_landmarks[j]
# checking the correct time landmark range # checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value): if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value) frame_name = "{}-{}".format(i, next_value)
print('frame group dict: ', frame_group_dict[frame_name])
frame_group_dict[frame_name]['upright_count'] += head_up_right_count
frame_group_dict[frame_name]['upleft_count'] += head_up_left_count
frame_group_dict[frame_name]['downright_count'] += head_down_right_count
frame_group_dict[frame_name]['downleft_count'] += head_down_left_count
frame_group_dict[frame_name]['front_count'] += head_front_count
frame_group_dict[frame_name]['detection_count'] += detection_count
frame_group_dict[frame_name]['upright_count'] += head_up_right_count
frame_group_dict[frame_name]['upleft_count'] += head_up_left_count
frame_group_dict[frame_name]['downright_count'] += head_down_right_count
frame_group_dict[frame_name]['downleft_count'] += head_down_left_count
frame_group_dict[frame_name]['front_count'] += head_front_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
# calculate the percentage values else:
for key in frame_group_dict.keys(): break
frame_group_details = frame_group_dict[key]
frame_group_phone_count = frame_group_details['phone_count']
frame_group_listen_count = frame_group_details['listen_count'] # calculate the percentage values
frame_group_note_count = frame_group_details['note_count'] for key in frame_group_dict.keys():
group_detection_count = frame_group_details['detection_count'] frame_group_details = frame_group_dict[key]
frame_group_upright_count = frame_group_details['upright_count']
frame_group_upleft_count = frame_group_details['upleft_count']
frame_group_downright_count = frame_group_details['downright_count']
frame_group_downleft_count = frame_group_details['downleft_count']
frame_group_front_count = frame_group_details['front_count']
print('detection count: ', frame_group_details['detection_count'])
group_detection_count = 1 if frame_group_details['detection_count'] == 0 else frame_group_details['detection_count']
frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100 frame_group_upright_perct = float(frame_group_upright_count / group_detection_count) * 100
frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100 frame_group_upleft_perct = float(frame_group_upleft_count / group_detection_count) * 100
frame_group_note_perct = float(frame_group_note_count / group_detection_count) * 100 frame_group_downright_perct = float(frame_group_downright_count / group_detection_count) * 100
frame_group_downleft_perct = float(frame_group_downleft_count / group_detection_count) * 100
frame_group_front_perct = float(frame_group_front_count / group_detection_count) * 100
# assign the values to the same dictionary # assign the values to the same dictionary
frame_group_dict[key]['phone_perct'] = round(frame_group_phone_perct, 1) frame_group_dict[key]['upright_perct'] = round(frame_group_upright_perct, 1)
frame_group_dict[key]['listen_perct'] = round(frame_group_listen_perct, 1) frame_group_dict[key]['upleft_perct'] = round(frame_group_upleft_perct, 1)
frame_group_dict[key]['note_perct'] = round(frame_group_note_perct, 1) frame_group_dict[key]['downright_perct'] = round(frame_group_downright_perct, 1)
frame_group_dict[key]['downleft_perct'] = round(frame_group_downleft_perct, 1)
frame_group_dict[key]['front_perct'] = round(frame_group_front_perct, 1)
# removing irrelevant items from the dictionary # removing irrelevant items from the dictionary
frame_group_dict[key].pop('phone_count') frame_group_dict[key].pop('upright_count')
frame_group_dict[key].pop('listen_count') frame_group_dict[key].pop('upleft_count')
frame_group_dict[key].pop('note_count') frame_group_dict[key].pop('downright_count')
frame_group_dict[key].pop('detection_count') frame_group_dict[key].pop('downleft_count')
frame_group_dict[key].pop('front_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
# define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# return the dictionary # return the dictionary
return frame_group_dict return frame_group_dict, labels
...@@ -356,11 +356,11 @@ class LectureGazeFrameGroupingsSerializer(serializers.ModelSerializer): ...@@ -356,11 +356,11 @@ class LectureGazeFrameGroupingsSerializer(serializers.ModelSerializer):
group_details["frame_group_percentages"] = {} group_details["frame_group_percentages"] = {}
group_details["frame_group"] = frame_group.frame_group group_details["frame_group"] = frame_group.frame_group
group_details["frame_group_percentages"]["looking_up_and_right_perct"] = frame_group.frame_group_percentages.looking_up_and_right_perct group_details["frame_group_percentages"]["upright_perct"] = frame_group.frame_group_percentages.upright_perct
group_details["frame_group_percentages"]["looking_up_and_left_perct"] = frame_group.frame_group_percentages.looking_up_and_left_perct group_details["frame_group_percentages"]["upleft_perct"] = frame_group.frame_group_percentages.upleft_perct
group_details["frame_group_percentages"]["looking_down_and_right_perct"] = frame_group.frame_group_percentages.looking_down_and_right_perct group_details["frame_group_percentages"]["downright_perct"] = frame_group.frame_group_percentages.downright_perct
group_details["frame_group_percentages"]["looking_down_and_left_perct"] = frame_group.frame_group_percentages.looking_down_and_left_perct group_details["frame_group_percentages"]["downleft_perct"] = frame_group.frame_group_percentages.downleft_perct
group_details["frame_group_percentages"]["looking_front_perct"] = frame_group.frame_group_percentages.looking_front_perct group_details["frame_group_percentages"]["front_perct"] = frame_group.frame_group_percentages.front_perct
return_data.append(group_details) return_data.append(group_details)
......
...@@ -282,7 +282,7 @@ ...@@ -282,7 +282,7 @@
}); });
//this function will handle the gaze 'summary' button //this function will handle the gaze 'summary' button
$('#gaze_summary_btn').click(function () { $('#gaze_summary_btn').click(function (e) {
//change the innerHTML of the clicked button //change the innerHTML of the clicked button
e.target.innerHTML = "<span class='font-italic'>Processing</span>"; e.target.innerHTML = "<span class='font-italic'>Processing</span>";
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment