Commit 033e0193 authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the full implementation of saving lecture emotion and gaze...

Committing the full implementation of saving lecture emotion and gaze estimation frame recordings to the database.
parent 369f5fcb
......@@ -259,6 +259,31 @@ class LectureEmotionFrameGroupings(models.Model):
return self.lecture_emotion_frame_groupings_id
# this abstract class will contain lecture emotion frame recognition details
class LectureEmotionFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
happy_perct = models.FloatField()
sad_perct = models.FloatField()
angry_perct = models.FloatField()
surprise_perct = models.FloatField()
neutral_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture emotion frame recognitions
class LectureEmotionFrameRecognitions(models.Model):
lecture_emotion_frame_recognition_id = models.CharField(max_length=15)
lecture_emotion_id = models.ForeignKey(LectureEmotionReport, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LectureEmotionFrameRecognitionDetails)
def __str__(self):
return self.lecture_emotion_frame_recognition_id
# POSE section
# lecture pose estimation
class LectureGazeEstimation(models.Model):
......@@ -305,3 +330,27 @@ class LectureGazeFrameGroupings(models.Model):
def __str__(self):
return self.lecture_gaze_frame_groupings_id
# this abstract class will contain lecture gaze frame recognition details
class LectureGazeFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
upright_perct = models.FloatField()
upleft_perct = models.FloatField()
downright_perct = models.FloatField()
downleft_perct = models.FloatField()
front_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture gaze frame recognitions
class LectureGazeFrameRecognitions(models.Model):
lecture_gaze_frame_recognition_id = models.CharField(max_length=15)
lecture_gaze_id = models.ForeignKey(LectureGazeEstimation, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LectureGazeFrameRecognitionDetails)
def __str__(self):
return self.lecture_gaze_frame_recognition_id
\ No newline at end of file
......@@ -557,13 +557,71 @@ class GetLectureEmotionRecognitionsForFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# finding the existence of Lecture emotion frame recognition record
isExist = LectureEmotionFrameRecognitions.objects.filter(
lecture_emotion_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
lecture_emotion_frame_recognitions = LectureEmotionFrameRecognitions.objects.filter(
lecture_emotion_id__lecture_video_id__video_name=video_name)
lecture_emotion_frame_recognitions_ser = LectureEmotionFrameRecognitionsSerializer(
lecture_emotion_frame_recognitions, many=True)
lecture_emotion_frame_recognitions_data = lecture_emotion_frame_recognitions_ser.data[0]
frame_detections = lecture_emotion_frame_recognitions_data['frame_recognition_details']
return Response({
"response": frame_detections
})
else:
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_data = lec_emotion_ser.data[0]
lec_emotion_id = lec_emotion_data['id']
# create a new lecture activity frame detections id
last_lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions.objects.order_by(
'lecture_emotion_frame_recognition_id').last()
new_lecture_emotion_frame_recognitions_id = "LEFR00001" if (
last_lec_emotion_frame_recognitions is None) else \
ig.generate_new_id(last_lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id)
# calculate the frame detections
frame_detections = ed.get_frame_emotion_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_emotion_frame_recognition_details = LectureEmotionFrameRecognitionDetails()
lec_emotion_frame_recognition_details.frame_name = detection['frame_name']
lec_emotion_frame_recognition_details.happy_perct = detection['happy_perct']
lec_emotion_frame_recognition_details.sad_perct = detection['sad_perct']
lec_emotion_frame_recognition_details.angry_perct = detection['angry_perct']
lec_emotion_frame_recognition_details.surprise_perct = detection['surprise_perct']
lec_emotion_frame_recognition_details.neutral_perct = detection['neutral_perct']
frame_recognition_details.append(lec_emotion_frame_recognition_details)
lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions()
lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id = new_lecture_emotion_frame_recognitions_id
lec_emotion_frame_recognitions.lecture_emotion_id_id = lec_emotion_id
lec_emotion_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_emotion_frame_recognitions.save()
return Response({
"response": frame_detections
})
##### POSE #####
class GetLectureVideoForPose(APIView):
......@@ -706,14 +764,69 @@ class GetLectureGazeEstimationForFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# finding the existence of Lecture activity frame recognition record
isExist = LectureGazeFrameRecognitions.objects.filter(
lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
lecture_gaze_frame_recognitions = LectureGazeFrameRecognitions.objects.filter(
lecture_gaze_id__lecture_video_id__video_name=video_name)
lecture_gaze_frame_recognitions_ser = LectureGazeFrameRecognitionsSerializer(
lecture_gaze_frame_recognitions, many=True)
lecture_gaze_frame_recognitions_data = lecture_gaze_frame_recognitions_ser.data[0]
frame_detections = lecture_gaze_frame_recognitions_data['frame_recognition_details']
return Response({
"response": frame_detections
})
else:
# retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_data = lec_gaze_ser.data[0]
lec_gaze_id = lec_gaze_data['id']
# create a new lecture activity frame detections id
last_lec_gaze_frame_recognitions = LectureGazeFrameRecognitions.objects.order_by(
'lecture_gaze_frame_recognition_id').last()
new_lecture_gaze_frame_recognitions_id = "LGFR00001" if (
last_lec_gaze_frame_recognitions is None) else \
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections
frame_detections, frame_rate = hge.get_lecture_gaze_esrimation_for_frames(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_gaze_frame_recognition_details = LectureGazeFrameRecognitionDetails()
lec_gaze_frame_recognition_details.frame_name = detection['frame_name']
lec_gaze_frame_recognition_details.upright_perct = detection['upright_perct']
lec_gaze_frame_recognition_details.upleft_perct = detection['upleft_perct']
lec_gaze_frame_recognition_details.downright_perct = detection['downright_perct']
lec_gaze_frame_recognition_details.downleft_perct = detection['downleft_perct']
lec_gaze_frame_recognition_details.front_perct = detection['front_perct']
frame_recognition_details.append(lec_gaze_frame_recognition_details)
lec_gaze_frame_recognitions = LectureGazeFrameRecognitions()
lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id = new_lecture_gaze_frame_recognitions_id
lec_gaze_frame_recognitions.lecture_gaze_id_id = lec_gaze_id
lec_gaze_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_gaze_frame_recognitions.save()
return Response({
"response": frame_detections,
"frame_rate": frame_rate
"response": frame_detections
})
##### VIDEO RESULTS SECTION #####
# this API find the lectures which are yet to be processed
......
......@@ -538,11 +538,11 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
# collect the percentages to a dictionary
percentages['frame_name'] = "frame-{}".format(frame_count)
percentages['head_up_right_perct'] = head_up_right_perct
percentages['head_up_left_perct'] = head_up_left_perct
percentages['head_down_right_perct'] = head_down_right_perct
percentages['head_down_left_perct'] = head_down_left_perct
percentages['head_front_perct'] = head_front_perct
percentages['upright_perct'] = head_up_right_perct
percentages['upleft_perct'] = head_up_left_perct
percentages['downright_perct'] = head_down_right_perct
percentages['downleft_perct'] = head_down_left_perct
percentages['front_perct'] = head_front_perct
# append the calculated percentages to the frame_detections
frame_detections.append(percentages)
......
# Generated by Django 2.2.11 on 2020-10-17 14:01
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0012_lectureactivityframerecognitions'),
]
operations = [
migrations.CreateModel(
name='LectureEmotionFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_emotion_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureEmotionFrameRecognitionDetails)),
('lecture_emotion_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureEmotionReport')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-17 17:06
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0013_lectureemotionframerecognitions'),
]
operations = [
migrations.CreateModel(
name='LectureGazeFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_gaze_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureGazeFrameRecognitionDetails)),
('lecture_gaze_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureGazeEstimation')),
],
),
]
......@@ -356,6 +356,39 @@ class LectureEmotionFrameGroupingsSerializer(serializers.ModelSerializer):
# lecture emotion frame recognition serializer
class LectureEmotionFrameRecognitionsSerializer(serializers.ModelSerializer):
lecture_emotion_id = LectureEmotionSerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["happy_perct"] = frame_recognition.happy_perct
recognition["sad_perct"] = frame_recognition.sad_perct
recognition["angry_perct"] = frame_recognition.angry_perct
recognition["surprise_perct"] = frame_recognition.surprise_perct
recognition["neutral_perct"] = frame_recognition.neutral_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LectureEmotionFrameRecognitions
fields = '__all__'
# lecture video meta serializer
class VideoMetaSerializer(serializers.ModelSerializer):
......@@ -401,3 +434,36 @@ class LectureGazeFrameGroupingsSerializer(serializers.ModelSerializer):
class Meta:
model = LectureGazeFrameGroupings
fields = '__all__'
# lecture emotion frame recognition serializer
class LectureGazeFrameRecognitionsSerializer(serializers.ModelSerializer):
lecture_gaze_id = LectureGazeEstimationSerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["upright_perct"] = frame_recognition.upright_perct
recognition["upleft_perct"] = frame_recognition.upleft_perct
recognition["downright_perct"] = frame_recognition.downright_perct
recognition["downleft_perct"] = frame_recognition.downleft_perct
recognition["front_perct"] = frame_recognition.front_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LectureGazeFrameRecognitions
fields = '__all__'
\ No newline at end of file
......@@ -774,7 +774,8 @@
response.map((frame) => {
let frame_name = frame.frame_name;
let phone_perct = Math.round(frame.phone_perct, 0);
let listen_perct = Math.round(frame.listening_perct, 0);
let listen_perct = Math.round(frame.listen_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let note_perct = Math.round(frame.note_perct, 0);
//append to the html string
......
......@@ -304,11 +304,11 @@
//creating the html string, iteratively
response.map((frame) => {
let frame_name = frame.frame_name;
let look_up_right = Math.round(frame.head_up_right_perct, 0);
let look_up_left = Math.round(frame.head_up_left_perct, 0);
let look_down_right = Math.round(frame.head_down_right_perct, 0);
let look_down_left = Math.round(frame.head_down_left_perct, 0);
let look_front = Math.round(frame.head_front_perct, 0);
let look_up_right = Math.round(frame.upright_perct, 0);
let look_up_left = Math.round(frame.upleft_perct, 0);
let look_down_right = Math.round(frame.downright_perct, 0);
let look_down_left = Math.round(frame.downleft_perct, 0);
let look_front = Math.round(frame.front_perct, 0);
//append to the html string
//looking up and right
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment