Commit 765ece4e authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the partial implementation of the workflow for lecture student behavior.

parent 033e0193
......@@ -279,7 +279,7 @@ class LectureActivityProcess(APIView):
def get(self, request):
video_name = request.query_params.get('lecture_video_name')
video_id = request.query_params.get('lecture_video_id')
video_id = int(request.query_params.get('lecture_video_id'))
percentages = ar.activity_recognition(video_name)
self.activity(video_id, percentages)
return Response({"response": True})
......@@ -288,9 +288,10 @@ class LectureActivityProcess(APIView):
pass
def activity(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
last_lec_activity = LectureActivity.objects.order_by('lecture_activity_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
new_lecture_activity_id = ig.generate_new_id(last_lec_activity.lecture_activity_id)
# creating a new lecture activity
......@@ -303,6 +304,21 @@ class LectureActivityProcess(APIView):
writing_perct=percentages['writing_perct']
).save()
# get the video name
video_name = lec_video_data['video_name']
# then save the frame recognitions to the database
_ = ar.save_frame_recognition(video_name)
# save the time landmarks and frame landmarks
ve.save_time_landmarks(video_name)
frame_landmarks, frame_group_dict = ve.save_frame_landmarks(video_name)
# then save the activity frame groupings
ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
class GetLectureActivityDetections(APIView):
......@@ -378,41 +394,8 @@ class GetLectureActivityRecognitionsForFrames(APIView):
else:
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
lec_activity_data = lec_activity_ser.data[0]
lec_activity_id = lec_activity_data['id']
# create a new lecture activity frame detections id
last_lec_activity_frame_recognitions = LectureActivityFrameRecognitions.objects.order_by('lecture_activity_frame_recognition_id').last()
new_lecture_activity_frame_recognitions_id = "LAFR00001" if (last_lec_activity_frame_recognitions is None) else \
ig.generate_new_id(last_lec_activity_frame_recognitions.lecture_activity_frame_recognition_id)
# calculate the frame detections
frame_detections = ar.get_frame_activity_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_activity_frame_recognition_details = LectureActivityFrameRecognitionDetails()
lec_activity_frame_recognition_details.frame_name = detection['frame_name']
lec_activity_frame_recognition_details.phone_perct = detection['phone_perct']
lec_activity_frame_recognition_details.listen_perct = detection['listening_perct']
lec_activity_frame_recognition_details.note_perct = detection['note_perct']
frame_recognition_details.append(lec_activity_frame_recognition_details)
lec_activity_frame_recognitions = LectureActivityFrameRecognitions()
lec_activity_frame_recognitions.lecture_activity_frame_recognition_id = new_lecture_activity_frame_recognitions_id
lec_activity_frame_recognitions.lecture_activity_id_id = lec_activity_id
lec_activity_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_activity_frame_recognitions.save()
# perform the action of saving frame recognitions to database
frame_detections = ar.save_frame_recognition(video_name)
return Response({
"response": frame_detections
......@@ -490,6 +473,7 @@ class LectureEmotionProcess(APIView):
def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
......@@ -504,6 +488,20 @@ class LectureEmotionProcess(APIView):
surprise_perct=percentages.surprise_perct
).save()
# get the video name
video_name = lec_video_data['video_name']
# then save the frame recognition details to the database
_ = ed.save_frame_recognitions(video_name)
# retrieve the frame landmarks and frame group dictionary
frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
# then save emotion frame groupings
ed.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# to get a lecture emotion report
class GetLectureEmotionReportViewSet(APIView):
......@@ -576,45 +574,8 @@ class GetLectureEmotionRecognitionsForFrames(APIView):
})
else:
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_data = lec_emotion_ser.data[0]
lec_emotion_id = lec_emotion_data['id']
# create a new lecture activity frame detections id
last_lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions.objects.order_by(
'lecture_emotion_frame_recognition_id').last()
new_lecture_emotion_frame_recognitions_id = "LEFR00001" if (
last_lec_emotion_frame_recognitions is None) else \
ig.generate_new_id(last_lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id)
# calculate the frame detections
frame_detections = ed.get_frame_emotion_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_emotion_frame_recognition_details = LectureEmotionFrameRecognitionDetails()
lec_emotion_frame_recognition_details.frame_name = detection['frame_name']
lec_emotion_frame_recognition_details.happy_perct = detection['happy_perct']
lec_emotion_frame_recognition_details.sad_perct = detection['sad_perct']
lec_emotion_frame_recognition_details.angry_perct = detection['angry_perct']
lec_emotion_frame_recognition_details.surprise_perct = detection['surprise_perct']
lec_emotion_frame_recognition_details.neutral_perct = detection['neutral_perct']
frame_recognition_details.append(lec_emotion_frame_recognition_details)
lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions()
lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id = new_lecture_emotion_frame_recognitions_id
lec_emotion_frame_recognitions.lecture_emotion_id_id = lec_emotion_id
lec_emotion_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_emotion_frame_recognitions.save()
# save the frame recognitions into the database
frame_detections = ed.save_frame_recognitions(video_name)
return Response({
"response": frame_detections
......@@ -725,6 +686,7 @@ class ProcessLectureGazeEstimation(APIView):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id)
......@@ -739,6 +701,18 @@ class ProcessLectureGazeEstimation(APIView):
looking_front_perct=percentages['head_front_perct']
).save()
# get the video name
video_name = lec_video_data['video_name']
# then save the frame recognitions to the database
_ = hge.save_frame_detections(video_name)
# get the frame landmarks and frame group dictionary
frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name, "Gaze")
# then save the gaze frame groupings to the database
hge.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# the API to retrieve lecture gaze estimation
class GetLectureGazeEstimationViewSet(APIView):
......@@ -765,7 +739,7 @@ class GetLectureGazeEstimationForFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# finding the existence of Lecture activity frame recognition record
# finding the existence of Lecture gaze frame recognition record
isExist = LectureGazeFrameRecognitions.objects.filter(
lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
......@@ -784,42 +758,8 @@ class GetLectureGazeEstimationForFrames(APIView):
else:
# retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_data = lec_gaze_ser.data[0]
lec_gaze_id = lec_gaze_data['id']
# create a new lecture activity frame detections id
last_lec_gaze_frame_recognitions = LectureGazeFrameRecognitions.objects.order_by(
'lecture_gaze_frame_recognition_id').last()
new_lecture_gaze_frame_recognitions_id = "LGFR00001" if (
last_lec_gaze_frame_recognitions is None) else \
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections
frame_detections, frame_rate = hge.get_lecture_gaze_esrimation_for_frames(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_gaze_frame_recognition_details = LectureGazeFrameRecognitionDetails()
lec_gaze_frame_recognition_details.frame_name = detection['frame_name']
lec_gaze_frame_recognition_details.upright_perct = detection['upright_perct']
lec_gaze_frame_recognition_details.upleft_perct = detection['upleft_perct']
lec_gaze_frame_recognition_details.downright_perct = detection['downright_perct']
lec_gaze_frame_recognition_details.downleft_perct = detection['downleft_perct']
lec_gaze_frame_recognition_details.front_perct = detection['front_perct']
frame_recognition_details.append(lec_gaze_frame_recognition_details)
lec_gaze_frame_recognitions = LectureGazeFrameRecognitions()
lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id = new_lecture_gaze_frame_recognitions_id
lec_gaze_frame_recognitions.lecture_gaze_id_id = lec_gaze_id
lec_gaze_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_gaze_frame_recognitions.save()
# save recognition details into the database
frame_detections = hge.save_frame_detections(video_name)
return Response({
"response": frame_detections
......
......@@ -5,11 +5,17 @@ from keras.preprocessing import image
import cv2
import os
import numpy as np
from .MongoModels import *
from . models import VideoMeta
from . logic import custom_sorter as cs
from .logic import id_generator as ig
# emotion recognition method
from .serializers import LectureEmotionSerializer
def emotion_recognition(classifier, face_classifier, image):
label = ""
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
......@@ -548,3 +554,82 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# return the dictionary
return frame_group_dict, emotion_labels
# this section will handle some database operations
def save_frame_recognitions(video_name):
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_data = lec_emotion_ser.data[0]
lec_emotion_id = lec_emotion_data['id']
# create a new lecture activity frame detections id
last_lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions.objects.order_by(
'lecture_emotion_frame_recognition_id').last()
new_lecture_emotion_frame_recognitions_id = "LEFR00001" if (
last_lec_emotion_frame_recognitions is None) else \
ig.generate_new_id(last_lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id)
# calculate the frame detections
frame_detections = get_frame_emotion_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_emotion_frame_recognition_details = LectureEmotionFrameRecognitionDetails()
lec_emotion_frame_recognition_details.frame_name = detection['frame_name']
lec_emotion_frame_recognition_details.happy_perct = detection['happy_perct']
lec_emotion_frame_recognition_details.sad_perct = detection['sad_perct']
lec_emotion_frame_recognition_details.angry_perct = detection['angry_perct']
lec_emotion_frame_recognition_details.surprise_perct = detection['surprise_perct']
lec_emotion_frame_recognition_details.neutral_perct = detection['neutral_perct']
frame_recognition_details.append(lec_emotion_frame_recognition_details)
lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions()
lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id = new_lecture_emotion_frame_recognitions_id
lec_emotion_frame_recognitions.lecture_emotion_id_id = lec_emotion_id
lec_emotion_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_emotion_frame_recognitions.save()
# now return the frame recognitions
return frame_detections
# this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db
last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_id = lec_emotion_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
lec_emotion_frame_group_details.frame_group = key
lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_emotion_frame_group_details)
new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_emotion_frame_groupings.save()
......@@ -5,7 +5,10 @@ import numpy as np
import cv2
import os
import shutil
from . custom_sorter import *
from .custom_sorter import *
from ..MongoModels import *
from ..serializers import *
from . import id_generator as ig
def activity_recognition(video_path):
......@@ -55,30 +58,30 @@ def activity_recognition(video_path):
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path)
# creating the directory for the video
if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
shutil.rmtree(VIDEO_ACTIVITY_DIR)
# create the video directory
os.mkdir(VIDEO_ACTIVITY_DIR)
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
while (frame_count < no_of_frames):
ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
frame_name = "frame-{}.png".format(frame_count)
FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
if (os.path.isdir(FRAME_DIR)):
shutil.rmtree(FRAME_DIR)
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
os.mkdir(FRAME_DIR)
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size)
detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite(FRAME_IMG, image)
# cv2.imwrite(FRAME_IMG, image)
# if there are any person detections
if (len(detections) > 0):
......@@ -111,22 +114,21 @@ def activity_recognition(video_path):
note_taking_count += 1
# saving the detection for the particular frame
detection_name = "detection-{}.png".format(detection_count)
detection_image_path = os.path.join(FRAME_DIR, detection_name)
# converting detected image into grey-scale
detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
cv2.imwrite(detection_image_path, detection)
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1
frame_count += 1
# after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
......@@ -140,7 +142,6 @@ def activity_recognition(video_path):
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
return percentages
......@@ -162,8 +163,6 @@ def person_detection(image, net):
person_count = 0
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
# (note: normalization is done via the authors of the MobileNet SSD
......@@ -212,10 +211,8 @@ def person_detection(image, net):
# retrieving the extracted frames and detections for a given video
def getExtractedFrames(folder_name):
image_list = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(folder_name))
......@@ -240,9 +237,9 @@ def getExtractedFrames(folder_name):
else:
return "No extracted frames were found"
# get detections for a given frame name
def get_detections(video_name, frame_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_name)
......@@ -257,7 +254,6 @@ def get_detections(video_name, frame_name):
# get detections for a given class name
def get_detections_for_label(video_name, label_index):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
......@@ -328,7 +324,6 @@ def get_detections_for_label(video_name, label_index):
# to get the student evaluations
def get_student_activity_evaluation(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
......@@ -401,7 +396,6 @@ def get_frame_activity_recognition(video_name):
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
np.set_printoptions(suppress=True)
# load the model
......@@ -473,7 +467,6 @@ def get_frame_activity_recognition(video_name):
# increment the detection count
detection_count += 1
# calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
......@@ -575,7 +568,6 @@ def get_individual_student_evaluation(video_name, student_name):
# this method will retrieve student activity summary for given time period
def get_student_activity_summary_for_period(activities):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
......@@ -590,7 +582,6 @@ def get_student_activity_summary_for_period(activities):
# iterate through the activities
for activity in activities:
individual_activity = {}
individual_activity["phone_perct"] = float(activity['phone_perct'])
individual_activity["listening_perct"] = float(activity['listening_perct'])
......@@ -603,7 +594,6 @@ def get_student_activity_summary_for_period(activities):
# append to the list
individual_lec_activities.append(individual_activity)
# calculate the average percentages
phone_checking_average_perct = round((phone_checking_perct_combined / no_of_activities), 1)
listening_average_perct = round((listening_perct_combined / no_of_activities), 1)
......@@ -656,7 +646,6 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
......@@ -682,7 +671,6 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
......@@ -700,11 +688,9 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
elif label == class_labels[2]:
note_count += 1
# increment the detection count
detection_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
......@@ -718,14 +704,11 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['phone_count'] += phone_count
frame_group_dict[frame_name]['listen_count'] += listen_count
frame_group_dict[frame_name]['note_count'] += note_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
......@@ -764,6 +747,83 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# print('frame group dict: ', frame_group_dict)
activity_labels = ['phone_perct', 'listen_perct', 'note_perct']
# return the dictionary
return frame_group_dict, activity_labels
# this section will handle saving activity entities to the database
def save_frame_recognition(video_name):
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
lec_activity_data = lec_activity_ser.data[0]
lec_activity_id = lec_activity_data['id']
# create a new lecture activity frame detections id
last_lec_activity_frame_recognitions = LectureActivityFrameRecognitions.objects.order_by(
'lecture_activity_frame_recognition_id').last()
new_lecture_activity_frame_recognitions_id = "LAFR00001" if (last_lec_activity_frame_recognitions is None) else \
ig.generate_new_id(last_lec_activity_frame_recognitions.lecture_activity_frame_recognition_id)
# calculate the frame detections
frame_detections = get_frame_activity_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_activity_frame_recognition_details = LectureActivityFrameRecognitionDetails()
lec_activity_frame_recognition_details.frame_name = detection['frame_name']
lec_activity_frame_recognition_details.phone_perct = detection['phone_perct']
lec_activity_frame_recognition_details.listen_perct = detection['listening_perct']
lec_activity_frame_recognition_details.note_perct = detection['note_perct']
frame_recognition_details.append(lec_activity_frame_recognition_details)
lec_activity_frame_recognitions = LectureActivityFrameRecognitions()
lec_activity_frame_recognitions.lecture_activity_frame_recognition_id = new_lecture_activity_frame_recognitions_id
lec_activity_frame_recognitions.lecture_activity_id_id = lec_activity_id
lec_activity_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_activity_frame_recognitions.save()
# now return the frame detections
return frame_detections
# this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
# save the frame group details into db
last_lec_activity_frame_grouping = LectureActivityFrameGroupings.objects.order_by(
'lecture_activity_frame_groupings_id').last()
new_lecture_activity_frame_grouping_id = "LAFG00001" if (last_lec_activity_frame_grouping is None) else \
ig.generate_new_id(last_lec_activity_frame_grouping.lecture_activity_frame_groupings_id)
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
lec_activity_id = lec_activity_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_activity_frame_group_details = LectureActivityFrameGroupDetails()
lec_activity_frame_group_details.frame_group = key
lec_activity_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_activity_frame_group_details)
new_lec_activity_frame_groupings = LectureActivityFrameGroupings()
new_lec_activity_frame_groupings.lecture_activity_frame_groupings_id = new_lecture_activity_frame_grouping_id
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_activity_frame_groupings.save()
......@@ -16,6 +16,10 @@ import os
import shutil
import math
from ..MongoModels import *
from ..serializers import *
from . import id_generator as ig
def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):
"""Return the 3D points present as 2D for making annotation box"""
......@@ -846,3 +850,84 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
# return the dictionary
return frame_group_dict, labels
# this section will handle some database operations
def save_frame_detections(video_name):
# retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_data = lec_gaze_ser.data[0]
lec_gaze_id = lec_gaze_data['id']
# create a new lecture activity frame detections id
last_lec_gaze_frame_recognitions = LectureGazeFrameRecognitions.objects.order_by(
'lecture_gaze_frame_recognition_id').last()
new_lecture_gaze_frame_recognitions_id = "LGFR00001" if (
last_lec_gaze_frame_recognitions is None) else \
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_gaze_frame_recognition_details = LectureGazeFrameRecognitionDetails()
lec_gaze_frame_recognition_details.frame_name = detection['frame_name']
lec_gaze_frame_recognition_details.upright_perct = detection['upright_perct']
lec_gaze_frame_recognition_details.upleft_perct = detection['upleft_perct']
lec_gaze_frame_recognition_details.downright_perct = detection['downright_perct']
lec_gaze_frame_recognition_details.downleft_perct = detection['downleft_perct']
lec_gaze_frame_recognition_details.front_perct = detection['front_perct']
frame_recognition_details.append(lec_gaze_frame_recognition_details)
lec_gaze_frame_recognitions = LectureGazeFrameRecognitions()
lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id = new_lecture_gaze_frame_recognitions_id
lec_gaze_frame_recognitions.lecture_gaze_id_id = lec_gaze_id
lec_gaze_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_gaze_frame_recognitions.save()
# now return the frame recognitions
return frame_detections
# this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
# save the frame group details into db
last_lec_gaze_frame_grouping = LectureGazeFrameGroupings.objects.order_by('lecture_gaze_frame_groupings_id').last()
new_lecture_gaze_frame_grouping_id = "LGFG00001" if (last_lec_gaze_frame_grouping is None) else \
ig.generate_new_id(last_lec_gaze_frame_grouping.lecture_gaze_frame_groupings_id)
# retrieve the lecture activity id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_id = lec_gaze_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_gaze_frame_group_details = LectureGazeFrameGroupDetails()
lec_gaze_frame_group_details.frame_group = key
lec_gaze_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_gaze_frame_group_details)
new_lec_gaze_frame_groupings = LectureGazeFrameGroupings()
new_lec_gaze_frame_groupings.lecture_gaze_frame_groupings_id = new_lecture_gaze_frame_grouping_id
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_gaze_frame_groupings.save()
......@@ -3,6 +3,11 @@ import cv2
import shutil
import datetime
from FirstApp.MongoModels import *
from FirstApp.serializers import *
from . import id_generator as ig
def VideoExtractor(request):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
......@@ -193,3 +198,99 @@ def getFrameLandmarks(video_name, category):
'front_count': 0, 'detection_count': 0}
return frame_landmarks, frame_group_dict
# this section will handle some database operations
def save_time_landmarks(video_name):
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
# retrieve lecture video details
lec_video = LectureVideo.objects.filter(video_name=video_name)
lec_video_ser = LectureVideoSerializer(lec_video, many=True)
lec_video_id = lec_video_ser.data[0]['id']
# save the landmark details in the db
time_landmarks = getTimeLandmarks(video_name)
db_time_landmarks = []
# loop through the time landmarks
for landmark in time_landmarks:
landmark_obj = Landmarks()
landmark_obj.landmark = landmark
db_time_landmarks.append(landmark_obj)
new_lec_video_time_landmarks = LectureVideoTimeLandmarks()
new_lec_video_time_landmarks.lecture_video_time_landmarks_id = new_lecture_video_time_landmarks_id
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database
def save_frame_landmarks(video_name):
# retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last()
new_lecture_video_frame_landmarks_id = "LVFL00001" if (last_lec_video_frame_landmarks is None) else \
ig.generate_new_id(last_lec_video_frame_landmarks.lecture_video_frame_landmarks_id)
frame_landmarks, frame_group_dict = getFrameLandmarks(video_name, "Activity")
# retrieve lecture video details
lec_video = LectureVideo.objects.filter(video_name=video_name)
lec_video_ser = LectureVideoSerializer(lec_video, many=True)
lec_video_id = lec_video_ser.data[0]['id']
# save the frame landmarks details into db
db_frame_landmarks = []
for landmark in frame_landmarks:
landmark_obj = Landmarks()
landmark_obj.landmark = landmark
db_frame_landmarks.append(landmark_obj)
new_lec_video_frame_landmarks = LectureVideoFrameLandmarks()
new_lec_video_frame_landmarks.lecture_video_frame_landmarks_id = new_lecture_video_frame_landmarks_id
new_lec_video_frame_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_frame_landmarks.frame_landmarks = db_frame_landmarks
new_lec_video_frame_landmarks.save()
# now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict
# this method will retrieve the frame landmarks from the database
def get_frame_landmarks(video_name):
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark'])
# now return the frame landmarks
return frame_landmarks
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment