Commit e68790cc authored by I.K Seneviratne's avatar I.K Seneviratne

Merge remote-tracking branch 'origin/QA_RELEASE' into monitoring_student_behavior_IT17138000

parents adc4413c 80462c9f
...@@ -210,3 +210,44 @@ class LecturerAudioSummaryPeriodAPI(APIView): ...@@ -210,3 +210,44 @@ class LecturerAudioSummaryPeriodAPI(APIView):
}) })
# this section is for student and lecturer behavior integration
class StudentLecturerIntegratedAPI(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# finding the existence of Lecture activity frame recognition record
isExist = LecturerActivityFrameRecognitions.objects.filter(
lecturer_meta_id__lecturer_video_id__lecture_video_name=video_name).exists()
if (isExist):
lecture_activity_frame_recognitions = LecturerActivityFrameRecognitions.objects.filter(
lecturer_meta_id__lecturer_video_id__lecture_video_name=video_name)
lecture_activity_frame_recognitions_ser = LecturerActivityFrameRecognitionsSerializer(
lecture_activity_frame_recognitions, many=True)
lecture_activity_frame_recognitions_data = lecture_activity_frame_recognitions_ser.data[0]
frame_detections = lecture_activity_frame_recognitions_data['frame_recognition_details']
fps = lecture_activity_frame_recognitions_data['fps']
int_fps = int(fps)
return Response({
"frame_recognitions": frame_detections,
"fps": fps
})
else:
# frame_recognitions = classroom_activity.get_lecturer_activity_for_frames(video_name)
frame_recognitions, fps = classroom_activity.save_frame_recognition(video_name)
int_fps = int(fps)
# print('frame recognitions: ', frame_recognitions)
return Response({
"frame_recognitions": frame_recognitions,
"fps": fps
})
...@@ -5,6 +5,13 @@ import numpy as np ...@@ -5,6 +5,13 @@ import numpy as np
import cv2 import cv2
import os import os
from FirstApp.logic.custom_sorter import custom_object_sorter
from FirstApp.logic.id_generator import generate_new_id
from MonitorLecturerApp.models import LecturerVideoMetaData, LecturerActivityFrameRecognitions, \
LecturerActivityFrameRecognitionDetails
from MonitorLecturerApp.serializers import LecturerVideoMetaDataSerializer
def activity_recognition(video_name): def activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "MonitorLecturerApp\\models") CLASSIFIER_DIR = os.path.join(BASE_DIR, "MonitorLecturerApp\\models")
...@@ -108,3 +115,162 @@ def activity_recognition(video_name): ...@@ -108,3 +115,162 @@ def activity_recognition(video_name):
# this method will calculated lecturer activity for frames
def get_lecturer_activity_for_frames(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\lecturer_videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "MonitorLecturerApp\\models")
CLASSIFIER_PATH = os.path.join(CLASSIFIER_DIR, "keras_model_updated.h5")
# load our serialized persosn detection model from disk
print("[INFO] loading model...")
np.set_printoptions(suppress=True)
class_labels = ['Seated Teaching', 'Teaching by Standing', 'Teaching by Walking']
model = tensorflow.keras.models.load_model(CLASSIFIER_PATH)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
fps = video.get(cv2.CAP_PROP_FPS)
print('fps: ', fps)
frame_count = 0
# frame activity recognitions
frame_activity_recognitions = []
# for testing purposes
print('starting the frame activity recognition process')
# looping through the frames
while (frame_count < no_of_frames):
# define the count variables for each frame
sitting_count = 0
standing_count = 0
walking_count = 0
ret, image = video.read()
# derive the frame name
frame_name = "frame-{}".format(frame_count)
frame_details = {}
frame_details['frame_name'] = frame_name
detection = cv2.resize(image, size)
image_array = np.asarray(detection)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
# increment the relevant count, based on the label
if (label == class_labels[0]):
sitting_count += 1
elif (label == class_labels[1]):
standing_count += 1
elif (label == class_labels[2]):
walking_count += 1
print('current frame: ', frame_count)
# increment frame count
frame_count += 1
# calculating the percentages for the frame
sitting_perct = float(sitting_count) * 100
standing_perct = float(standing_count) * 100
walking_perct = float(walking_count) * 100
# adding the percentage values to the frame details
frame_details['sitting_perct'] = sitting_perct
frame_details['standing_perct'] = standing_perct
frame_details['walking_perct'] = walking_perct
# push to all the frame details
frame_activity_recognitions.append(frame_details)
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = custom_object_sorter(frame_activity_recognitions)
# for testing purposes
print('ending the frame activity recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions, fps
# this section will handle saving activity entities to the database
def save_frame_recognition(video_name):
# for testing purposes
print('starting the saving activity frame recognition process')
# retrieve the lecture activity id
lec_activity = LecturerVideoMetaData.objects.filter(lecturer_video_id__lecture_video_name=video_name)
lec_activity_ser = LecturerVideoMetaDataSerializer(lec_activity, many=True)
lec_activity_data = lec_activity_ser.data[0]
lec_activity_id = lec_activity_data['id']
# create a new lecture activity frame detections id
last_lec_activity_frame_recognitions = LecturerActivityFrameRecognitions.objects.order_by(
'lecturer_activity_frame_recognition_id').last()
new_lecture_activity_frame_recognitions_id = "LLAFR00001" if (last_lec_activity_frame_recognitions is None) else \
generate_new_id(last_lec_activity_frame_recognitions.lecturer_activity_frame_recognition_id)
# calculate the frame detections
frame_detections, fps = get_lecturer_activity_for_frames(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_activity_frame_recognition_details = LecturerActivityFrameRecognitionDetails()
lec_activity_frame_recognition_details.frame_name = detection['frame_name']
lec_activity_frame_recognition_details.sitting_perct = detection['sitting_perct']
lec_activity_frame_recognition_details.standing_perct = detection['standing_perct']
lec_activity_frame_recognition_details.walking_perct = detection['walking_perct']
frame_recognition_details.append(lec_activity_frame_recognition_details)
lec_activity_frame_recognitions = LecturerActivityFrameRecognitions()
lec_activity_frame_recognitions.lecturer_activity_frame_recognition_id = new_lecture_activity_frame_recognitions_id
lec_activity_frame_recognitions.lecturer_meta_id_id = lec_activity_id
lec_activity_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_activity_frame_recognitions.fps = float(fps)
lec_activity_frame_recognitions.save()
# for testing purposes
print('ending the saving activity frame recognition process')
# now return the frame detections
return frame_detections, fps
# Generated by Django 2.2.11 on 2020-10-25 10:09
import MonitorLecturerApp.models
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('MonitorLecturerApp', '0004_lecturervideometadata_lecturer_video_id'),
]
operations = [
migrations.CreateModel(
name='LecturerActivityFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecturer_activity_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=MonitorLecturerApp.models.LecturerActivityFrameRecognitionDetails)),
('lecturer_meta_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MonitorLecturerApp.LecturerVideoMetaData')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-25 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MonitorLecturerApp', '0005_lectureractivityframerecognitions'),
]
operations = [
migrations.AddField(
model_name='lectureractivityframerecognitions',
name='fps',
field=models.FloatField(default=30.0),
),
]
...@@ -87,3 +87,27 @@ class LecturerAudioText (models.Model): ...@@ -87,3 +87,27 @@ class LecturerAudioText (models.Model):
def __str__(self): def __str__(self):
return self.lecturer_audio_text_id return self.lecturer_audio_text_id
# this abstract class will contain lecture activity frame recognition details
class LecturerActivityFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
sitting_perct = models.FloatField()
standing_perct = models.FloatField()
walking_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture activity frame recognitions
class LecturerActivityFrameRecognitions(models.Model):
lecturer_activity_frame_recognition_id = models.CharField(max_length=15)
lecturer_meta_id = models.ForeignKey(LecturerVideoMetaData, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LecturerActivityFrameRecognitionDetails)
fps = models.FloatField(default=30.0)
def __str__(self):
return self.lecturer_activity_frame_recognition_id
...@@ -2,7 +2,7 @@ from rest_framework import serializers ...@@ -2,7 +2,7 @@ from rest_framework import serializers
from FirstApp.serializers import LecturerSerializer, SubjectSerializer from FirstApp.serializers import LecturerSerializer, SubjectSerializer
from LectureSummarizingApp.models import LectureAudioSummary from LectureSummarizingApp.models import LectureAudioSummary
from .models import RegisterTeacher from .models import RegisterTeacher, LecturerActivityFrameRecognitions
from .models import LecturerAudioText, LecturerVideoMetaData, LecturerVideo, LectureRecordedVideo from .models import LecturerAudioText, LecturerVideoMetaData, LecturerVideo, LectureRecordedVideo
...@@ -44,3 +44,35 @@ class LecturerVideoMetaDataSerializer(serializers.ModelSerializer): ...@@ -44,3 +44,35 @@ class LecturerVideoMetaDataSerializer(serializers.ModelSerializer):
class Meta: class Meta:
model = LecturerVideoMetaData model = LecturerVideoMetaData
fields = '__all__' fields = '__all__'
# lecture activity frame recognition serializer
class LecturerActivityFrameRecognitionsSerializer(serializers.ModelSerializer):
lecturer_meta_id = LecturerVideoMetaDataSerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["sitting_perct"] = frame_recognition.sitting_perct
recognition["standing_perct"] = frame_recognition.standing_perct
recognition["walking_perct"] = frame_recognition.walking_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LecturerActivityFrameRecognitions
fields = '__all__'
...@@ -336,23 +336,23 @@ ...@@ -336,23 +336,23 @@
<div class="sidebar-heading"> <div class="sidebar-heading">
</div> </div>
{##}
<!-- Nav Item - Pages Collapse Menu --> {# <!-- Nav Item - Pages Collapse Menu -->#}
<li class="nav-item"> {# <li class="nav-item">#}
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" {# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages"#}
aria-expanded="true" aria-controls="collapsePages"> {# aria-expanded="true" aria-controls="collapsePages">#}
<i class="fas fa-fw fa-folder"></i> {# <i class="fas fa-fw fa-folder"></i>#}
<span>Pages</span> {# <span>Pages</span>#}
</a> {# </a>#}
<div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar"> {# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#}
<div class="bg-white py-2 collapse-inner rounded"> {# <div class="bg-white py-2 collapse-inner rounded">#}
<!-- <h6 class="collapse-header">Login Screens:</h6>--> {# <!-- <h6 class="collapse-header">Login Screens:</h6>-->#}
<a class="collapse-item" href="index.html">Dashboard</a> {# <a class="collapse-item" href="index.html">Dashboard</a>#}
<a class="collapse-item" href="/lecturer/lecture-video">Video Page</a> {# <a class="collapse-item" href="/lecturer/lecture-video">Video Page</a>#}
{##}
</div> {# </div>#}
</div> {# </div>#}
</li> {# </li>#}
<!-- Divider --> <!-- Divider -->
<hr class="sidebar-divider d-none d-md-block"> <hr class="sidebar-divider d-none d-md-block">
......
{% extends 'MonitorLecturerApp/template.html' %} {% extends 'FirstApp/template.html' %}
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<body id="page-top"> <body id="page-top">
......
...@@ -24,6 +24,9 @@ urlpatterns = [ ...@@ -24,6 +24,9 @@ urlpatterns = [
path('lecture-video', views.lecVideo), path('lecture-video', views.lecVideo),
# path('Video', views.hello) # path('Video', views.hello)
# delete this path later
path('test-frame-recognitions', views.testFrameRecognitions),
##### LECTURER ACTIVITY SECTION ##### ##### LECTURER ACTIVITY SECTION #####
# API to retrieve activity recognition # API to retrieve activity recognition
url(r'^activities/$', api.ActivityRecognitionAPI.as_view()), url(r'^activities/$', api.ActivityRecognitionAPI.as_view()),
...@@ -31,6 +34,9 @@ urlpatterns = [ ...@@ -31,6 +34,9 @@ urlpatterns = [
# API to retrieve lecturer video meta data results # API to retrieve lecturer video meta data results
url(r'^get-lecturer-video-results/$', api.GetLectureVideoResultsAPI.as_view()), url(r'^get-lecturer-video-results/$', api.GetLectureVideoResultsAPI.as_view()),
# API to retrieve lecturer video frame recognitions
url(r'^get-lecturer-video-frame-recognitions/$', api.StudentLecturerIntegratedAPI.as_view()),
##### END OF LECTURER ACTIVITY SECTION ##### ##### END OF LECTURER ACTIVITY SECTION #####
......
...@@ -187,3 +187,6 @@ def lecVideo(request): ...@@ -187,3 +187,6 @@ def lecVideo(request):
# for audioPath in audiopaths: # for audioPath in audiopaths:
# audio = tAudio() # audio = tAudio()
def testFrameRecognitions(request):
return render(request, "MonitorLecturerApp/test_frame_recognitions.html")
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment