Commit 0f479a6f authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the modification in saving the processed files in activity, emotion...

Committing the modification in saving the processed files in activity, emotion and gaze estimation components.
parent 9fa0f329
......@@ -14,4 +14,10 @@ admin.site.register(LectureVideo)
admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation)
admin.site.register(Admin)
admin.site.register(AdminCredentialDetails)
\ No newline at end of file
admin.site.register(AdminCredentialDetails)
admin.site.register(LectureActivityFrameRecognitions)
admin.site.register(LectureActivityFrameGroupings)
admin.site.register(LectureEmotionFrameRecognitions)
admin.site.register(LectureEmotionFrameGroupings)
admin.site.register(LectureGazeFrameRecognitions)
admin.site.register(LectureGazeFrameGroupings)
\ No newline at end of file
......@@ -375,17 +375,19 @@ class LectureEmotionProcess(APIView):
def get(self, request):
video_name = request.query_params.get('lecture_video_name')
video_id = request.query_params.get('lecture_video_id')
# video_id = request.query_params.get('lecture_video_id')
int_video_id = int(request.query_params.get('lecture_video_id'))
percentages = ed.detect_emotion(video_name)
percentages.calcPercentages()
self.save_emotion_report(video_id, percentages)
self.save_emotion_report(int_video_id, percentages)
return Response({"response": True})
def post(self, request):
pass
def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
# lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
......@@ -500,7 +502,8 @@ class ProcessLectureGazeEstimation(APIView):
pass
def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
# lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
......@@ -921,65 +924,65 @@ class GetLectureEmotionSummary(APIView):
"emotion_labels": class_labels
})
# else:
#
# frame_landmarks = []
#
# # retrieve frame landmarks from db
# lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
# lecture_video_id__video_name=video_name)
# lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
# lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
#
# retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
#
# # creating a new list to display in the frontend
# for landmark in retrieved_frame_landmarks:
# frame_landmarks.append(int(landmark['landmark']))
#
#
# l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
# frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
#
#
#
# # save the frame group details into db (temp method)
#
# last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
# new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
# ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
#
# # retrieve the lecture emotion id
# lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
# lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
# lec_emotion_id = lec_emotion_ser.data[0]['id']
#
# # create the frame group details
# frame_group_details = []
#
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails'
# lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
# lec_emotion_frame_group_details.frame_group = key
# lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
#
# frame_group_details.append(lec_emotion_frame_group_details)
#
#
# new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
# new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
# new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
# new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
#
# # save
# new_lec_emotion_frame_groupings.save()
#
#
# return Response({
# "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages,
# "emotion_labels": emotion_labels
# })
else:
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(int(landmark['landmark']))
l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db (temp method)
last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_id = lec_emotion_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
lec_emotion_frame_group_details.frame_group = key
lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_emotion_frame_group_details)
new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_emotion_frame_groupings.save()
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"emotion_labels": emotion_labels
})
# this API will retrieve lecture gaze summary
......
......@@ -52,6 +52,8 @@ def emotion_recognition(classifier, face_classifier, image):
roi_gray = gray[y:y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame)
# draw a rectangle
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float') / 255.0
......@@ -63,6 +65,9 @@ def emotion_recognition(classifier, face_classifier, image):
preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()]
# put the emotion label
cv2.putText(image, label, (x, y), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)
return label
......@@ -79,6 +84,7 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
EMOTION_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\emotion")
meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
......@@ -99,6 +105,20 @@ def detect_emotion(video):
# for testing purposes
print('starting the emotion recognition process')
# get width and height of the video frames
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# get the video frame size
size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(EMOTION_DIR, video)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
while (count_frames < frame_count):
# Grab a single frame of video
ret, frame = cap.read()
......@@ -135,6 +155,9 @@ def detect_emotion(video):
# for testing purposes
print('emotion frame count: ', count_frames)
# write the video frame to the video writer
output.write(frame)
count_frames += 1
# setting up the counted values
......@@ -146,8 +169,13 @@ def detect_emotion(video):
meta_data.surprise_count = count_surprise
cap.release()
output.release()
cv2.destroyAllWindows()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes
print('ending the emotion recognition process')
......@@ -198,6 +226,8 @@ def get_frame_emotion_recognition(video_name):
# for testing purposes
print('starting the emotion frame recognition process')
# looping through the frames
while (frame_count < no_of_frames):
......
......@@ -40,7 +40,7 @@ def activity_recognition(video_path):
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_06.h5")
# ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
......@@ -83,10 +83,13 @@ def activity_recognition(video_path):
# for testing purposes
print('starting the activity recognition process')
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(ACTIVITY_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter("videos/cam_video.mp4", vid_cod, 30.0, size)
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
# looping through the frames
while (frame_count < no_of_frames):
......@@ -140,12 +143,24 @@ def activity_recognition(video_path):
# counting the detections under each label
if (label == class_labels[0]):
label = "Phone checking"
phone_checking_count += 1
elif (label == class_labels[1]):
listening_count += 1
elif (label == class_labels[2]):
label = "Writing"
note_taking_count += 1
# vertical_pos = startY + int(endY / 2)
vertical_pos = int(endY / 2)
# put the identified label above the detected person
# cv2.putText(image, label, (startX, vertical_pos), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image, label, (startX, vertical_pos), cv2.FONT_HERSHEY_COMPLEX, 4, (0, 255, 0), 10)
# increment the no.of persons
no_of_persons += 1
# increment the detection count
detection_count += 1
......@@ -166,6 +181,10 @@ def activity_recognition(video_path):
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes
print('activity recognition process is over')
......
......@@ -171,6 +171,7 @@ def process_gaze_estimation(video_path):
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
......@@ -211,6 +212,18 @@ def process_gaze_estimation(video_path):
# for testing purposes
print('starting the gaze estimation process')
# get the frame sizes
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
frame_size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(GAZE_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, frame_size)
# iterate the video frames
while True:
ret, img = cap.read()
......@@ -285,14 +298,19 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1
elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1
elif isLookingFront:
cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1
......@@ -304,6 +322,9 @@ def process_gaze_estimation(video_path):
# for testing purposes
print('gaze estimation count: ', frame_count)
# write to the video writer
output.write(img)
# increment the frame count
frame_count += 1
......@@ -330,6 +351,12 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows()
cap.release()
output.release()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes
print('ending the gaze estimation process')
......@@ -538,6 +565,7 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# for testing purposes
print('ending the gaze estimation for frames process')
......
......@@ -143,7 +143,8 @@
$('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length);
$('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id;
{#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name;
......
......@@ -142,7 +142,8 @@
$('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length);
$('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id;
{#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name;
......
......@@ -189,6 +189,8 @@ def video_result(request):
data = serializer.data
print('data length: ', len(data))
# iterate through the existing lecture videos for the lecturer
for video in data:
video_id = video['id']
......@@ -197,6 +199,8 @@ def video_result(request):
# check whether the video id exist in the Activity Recognition table
lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists()
print('lecture activity existence: ', lec_activity)
if lec_activity == False:
to_do_lecture_list.append({
"lecturer": lecturer,
......@@ -227,11 +231,15 @@ def video_result(request):
# loop through the to-do lecture list
for item in to_do_lecture_list:
isDate = item['date'] == str(day_timetable['date'])
print('item date: ', item['date'])
print('timetable date: ', str(day_timetable['date']))
# isLecturer = item['lecturer'] ==
# check for the particular lecture on the day
if isDate:
slots = day_timetable['time_slots']
# loop through the slots
for slot in slots:
# check for the lecturer and subject
......@@ -260,6 +268,8 @@ def video_result(request):
print('what is wrong?: ', exc)
return redirect('/500')
print('due lectures: ', due_lecture_list)
return render(request, "FirstApp/video_results.html",
{"lecturer": lecturer, "due_lectures": due_lecture_list})
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment