Commit 3a6e7948 authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the full implementation of the lecture Gaze estimation frame...

Committing the full implementation of the lecture Gaze estimation frame groupings in the lecturer Home page.
parent 1653846c
......@@ -1027,7 +1027,7 @@ class GetLectureEmotionSummary(APIView):
# new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
# ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
#
# # retrieve the lecture activity id
# # retrieve the lecture emotion id
# lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
# lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
# lec_emotion_id = lec_emotion_ser.data[0]['id']
......@@ -1067,7 +1067,7 @@ class GetLectureGazeSummary(APIView):
video_name = request.query_params.get('video_name')
# checking the existence of lecture activity frame grouping records in the db
isExist = LectureGazeFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists()
isExist = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
......@@ -1084,11 +1084,12 @@ class GetLectureGazeSummary(APIView):
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark'])
# retrieve the frame groupings
lec_gaze_frame_groupings = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name)
lec_gaze_frame_groupings_ser = LectureGazeFrameGroupingsSerializer(lec_gaze_frame_groupings, many=True)
lec_gaze_frame_groupings_data = lec_gaze_frame_groupings_ser.data[0]
# take the frame group details out of it
frame_group_details = lec_gaze_frame_groupings_data["frame_group_details"]
......@@ -1097,87 +1098,69 @@ class GetLectureGazeSummary(APIView):
frame_group_percentages[group['frame_group']] = group['frame_group_percentages']
class_labels = ['phone_perct', 'listen_perct', 'note_perct']
class_labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"activity_labels": class_labels
"gaze_labels": class_labels
})
# else:
#
# # retrieve the previous lecture video frame landmarks details
# last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
# 'lecture_video_frame_landmarks_id').last()
# new_lecture_video_frame_landmarks_id = "LVFL00001" if (last_lec_video_frame_landmarks is None) else \
# ig.generate_new_id(last_lec_video_frame_landmarks.lecture_video_frame_landmarks_id)
#
#
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
# frame_group_percentages, activity_labels = ar.activity_frame_groupings(video_name, frame_landmarks, frame_group_dict)
#
#
# # retrieve lecture video details
# lec_video = LectureVideo.objects.filter(video_name=video_name)
# lec_video_ser = LectureVideoSerializer(lec_video, many=True)
# lec_video_id = lec_video_ser.data[0]['id']
#
#
# # save the frame landmarks details into db (temp method)
# db_frame_landmarks = []
#
# for landmark in frame_landmarks:
# landmark_obj = Landmarks()
# landmark_obj.landmark = landmark
#
# db_frame_landmarks.append(landmark_obj)
#
#
# new_lec_video_frame_landmarks = LectureVideoFrameLandmarks()
# new_lec_video_frame_landmarks.lecture_video_frame_landmarks_id = new_lecture_video_frame_landmarks_id
# new_lec_video_frame_landmarks.lecture_video_id_id = lec_video_id
# new_lec_video_frame_landmarks.frame_landmarks = db_frame_landmarks
#
# new_lec_video_frame_landmarks.save()
#
#
#
# # save the frame group details into db (temp method)
#
# last_lec_activity_frame_grouping = LectureActivityFrameGroupings.objects.order_by('lecture_activity_frame_groupings_id').last()
# new_lecture_activity_frame_grouping_id = "LAFG00001" if (last_lec_activity_frame_grouping is None) else \
# ig.generate_new_id(last_lec_activity_frame_grouping.lecture_activity_frame_groupings_id)
#
# # retrieve the lecture activity id
# lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
# lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
# lec_activity_id = lec_activity_ser.data[0]['id']
#
# # create the frame group details
# frame_group_details = []
#
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails'
# lec_activity_frame_group_details = LectureActivityFrameGroupDetails()
# lec_activity_frame_group_details.frame_group = key
# lec_activity_frame_group_details.frame_group_percentages = frame_group_percentages[key]
#
# frame_group_details.append(lec_activity_frame_group_details)
#
#
# new_lec_activity_frame_groupings = LectureActivityFrameGroupings()
# new_lec_activity_frame_groupings.lecture_activity_frame_groupings_id = new_lecture_activity_frame_grouping_id
# new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
# new_lec_activity_frame_groupings.frame_group_details = frame_group_details
#
# # save
# new_lec_activity_frame_groupings.save()
#
#
# return Response({
# "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages,
# "activity_labels": activity_labels
# })
\ No newline at end of file
else:
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(int(landmark['landmark']))
l, frame_group_dict = ve.getFrameLandmarks(video_name, "Gaze")
print('frame group dict: ', frame_group_dict)
frame_group_percentages, gaze_labels = hge.gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db (temp method)
last_lec_gaze_frame_grouping = LectureGazeFrameGroupings.objects.order_by('lecture_gaze_frame_groupings_id').last()
new_lecture_gaze_frame_grouping_id = "LGFG00001" if (last_lec_gaze_frame_grouping is None) else \
ig.generate_new_id(last_lec_gaze_frame_grouping.lecture_gaze_frame_groupings_id)
# retrieve the lecture activity id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_id = lec_gaze_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_gaze_frame_group_details = LectureGazeFrameGroupDetails()
lec_gaze_frame_group_details.frame_group = key
lec_gaze_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_gaze_frame_group_details)
new_lec_gaze_frame_groupings = LectureGazeFrameGroupings()
new_lec_gaze_frame_groupings.lecture_gaze_frame_groupings_id = new_lecture_gaze_frame_grouping_id
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_gaze_frame_groupings.save()
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"gaze_labels": gaze_labels
})
\ No newline at end of file
......@@ -618,6 +618,7 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
print('video path: ', VIDEO_PATH)
# load the face detection model
face_model = get_face_detector()
......@@ -626,9 +627,7 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
......@@ -639,8 +638,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
(150.0, -150.0, -125.0) # Right mouth corner
])
# define a variable to count the frames
frame_count = 0
# set a threshold angle
# THRESHOLD = 15
......@@ -662,8 +659,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# initializing the count variables
frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group
frame_group_diff = {}
......@@ -699,7 +694,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# find the number of faces
faces = find_faces(img, face_model)
student_count = 0
# iterate through each detected face
for face in faces:
......@@ -711,8 +705,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
isLookingLeft = False
isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
......@@ -779,8 +771,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
elif isLookingFront:
head_front_count += 1
# increment the face count
face_count += 1
# increment the detection count
detection_count += 1
......@@ -798,6 +788,8 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
print('frame group dict: ', frame_group_dict[frame_name])
frame_group_dict[frame_name]['upright_count'] += head_up_right_count
frame_group_dict[frame_name]['upleft_count'] += head_up_left_count
frame_group_dict[frame_name]['downright_count'] += head_down_right_count
......@@ -805,35 +797,52 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_dict[frame_name]['front_count'] += head_front_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
else:
break
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_phone_count = frame_group_details['phone_count']
frame_group_listen_count = frame_group_details['listen_count']
frame_group_note_count = frame_group_details['note_count']
group_detection_count = frame_group_details['detection_count']
frame_group_upright_count = frame_group_details['upright_count']
frame_group_upleft_count = frame_group_details['upleft_count']
frame_group_downright_count = frame_group_details['downright_count']
frame_group_downleft_count = frame_group_details['downleft_count']
frame_group_front_count = frame_group_details['front_count']
print('detection count: ', frame_group_details['detection_count'])
group_detection_count = 1 if frame_group_details['detection_count'] == 0 else frame_group_details['detection_count']
frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100
frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100
frame_group_note_perct = float(frame_group_note_count / group_detection_count) * 100
frame_group_upright_perct = float(frame_group_upright_count / group_detection_count) * 100
frame_group_upleft_perct = float(frame_group_upleft_count / group_detection_count) * 100
frame_group_downright_perct = float(frame_group_downright_count / group_detection_count) * 100
frame_group_downleft_perct = float(frame_group_downleft_count / group_detection_count) * 100
frame_group_front_perct = float(frame_group_front_count / group_detection_count) * 100
# assign the values to the same dictionary
frame_group_dict[key]['phone_perct'] = round(frame_group_phone_perct, 1)
frame_group_dict[key]['listen_perct'] = round(frame_group_listen_perct, 1)
frame_group_dict[key]['note_perct'] = round(frame_group_note_perct, 1)
frame_group_dict[key]['upright_perct'] = round(frame_group_upright_perct, 1)
frame_group_dict[key]['upleft_perct'] = round(frame_group_upleft_perct, 1)
frame_group_dict[key]['downright_perct'] = round(frame_group_downright_perct, 1)
frame_group_dict[key]['downleft_perct'] = round(frame_group_downleft_perct, 1)
frame_group_dict[key]['front_perct'] = round(frame_group_front_perct, 1)
# removing irrelevant items from the dictionary
frame_group_dict[key].pop('phone_count')
frame_group_dict[key].pop('listen_count')
frame_group_dict[key].pop('note_count')
frame_group_dict[key].pop('upright_count')
frame_group_dict[key].pop('upleft_count')
frame_group_dict[key].pop('downright_count')
frame_group_dict[key].pop('downleft_count')
frame_group_dict[key].pop('front_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
# define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# return the dictionary
return frame_group_dict
return frame_group_dict, labels
......@@ -356,11 +356,11 @@ class LectureGazeFrameGroupingsSerializer(serializers.ModelSerializer):
group_details["frame_group_percentages"] = {}
group_details["frame_group"] = frame_group.frame_group
group_details["frame_group_percentages"]["looking_up_and_right_perct"] = frame_group.frame_group_percentages.looking_up_and_right_perct
group_details["frame_group_percentages"]["looking_up_and_left_perct"] = frame_group.frame_group_percentages.looking_up_and_left_perct
group_details["frame_group_percentages"]["looking_down_and_right_perct"] = frame_group.frame_group_percentages.looking_down_and_right_perct
group_details["frame_group_percentages"]["looking_down_and_left_perct"] = frame_group.frame_group_percentages.looking_down_and_left_perct
group_details["frame_group_percentages"]["looking_front_perct"] = frame_group.frame_group_percentages.looking_front_perct
group_details["frame_group_percentages"]["upright_perct"] = frame_group.frame_group_percentages.upright_perct
group_details["frame_group_percentages"]["upleft_perct"] = frame_group.frame_group_percentages.upleft_perct
group_details["frame_group_percentages"]["downright_perct"] = frame_group.frame_group_percentages.downright_perct
group_details["frame_group_percentages"]["downleft_perct"] = frame_group.frame_group_percentages.downleft_perct
group_details["frame_group_percentages"]["front_perct"] = frame_group.frame_group_percentages.front_perct
return_data.append(group_details)
......
......@@ -282,7 +282,7 @@
});
//this function will handle the gaze 'summary' button
$('#gaze_summary_btn').click(function () {
$('#gaze_summary_btn').click(function (e) {
//change the innerHTML of the clicked button
e.target.innerHTML = "<span class='font-italic'>Processing</span>";
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment