Commit 0568d7fa authored by SohanDanushka's avatar SohanDanushka

Merge branch 'QA_RELEASE' into db_and_monitoring

parents 18054e0d e083d4cf
......@@ -4,4 +4,7 @@
<option name="languageLevel" value="ES6" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
<component name="PyCharmProfessionalAdvertiser">
<option name="shown" value="true" />
</component>
</project>
\ No newline at end of file
from django.contrib import admin
from .models import Student, Attendance
admin.site.register(Student)
admin.site.register(Attendance)
# Register your models here.
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from .models import Student, Subject, Attendance
from .serializers import StudentSerializer, SubjectSerializer, AttendanceSerializer, FileSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
from . import record
from rest_framework.views import *
class StudentAPIView(APIView):
def get(self, request):
students = Student.objects.all()
serializer = StudentSerializer(students, many=True)
return Response(serializer.data)
def post(self, request):
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StudentDetails(APIView):
def get_object(self, pk):
try:
return Student.objects.get(studentId=pk)
except Student.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
def get(self, request, pk):
student = self.get_object(pk)
serializer = StudentSerializer(student)
return Response(serializer.data)
def put(self, request, pk):
student = self.get_object(pk)
serializer = StudentSerializer(student, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
student = self.get_object(pk)
student.delete(student)
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def student_list(request):
if request.method == 'GET':
students = Student.objects.all()
serializer = StudentSerializer(students, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GEt', 'PUT', 'DELETE'])
def student_detail(request, pk):
try:
student = Student.objects.get(studentId=pk)
except Student.DoesNotExist:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = StudentSerializer(student)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = StudentSerializer(student, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
student.delete()
return HttpResponse(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def subject_list(request):
if request.method == 'GET':
subjects = Subject.objects.all()
serializer = SubjectSerializer(subjects, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = SubjectSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@csrf_exempt
def subject_detail (request, pk):
try:
subject = Subject.objects.get(subjectId=pk)
except subject.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = SubjectSerializer(subject)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = SubjectSerializer(subject, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
subject.delete()
return HttpResponse(status=204)
@api_view(['GET', 'POST'])
def attendance_list(request):
if request.method == 'GET':
attendance = Attendance.objects.all()
serializer = AttendanceSerializer(attendance, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = AttendanceSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class FileView(APIView):
# parser_classes = (MultiPartParser, FormParser)
def post(self, request, *args, **kwargs):
file_serializer = FileSerializer(data=request.data)
if file_serializer.is_valid():
file_serializer.save()
return Response(file_serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# this API will initiate the lecture
class InitiateLecture(APIView):
def get(self, request):
record.initiate()
return Response({
"response": "success"
})
# Generated by Django 2.2.12 on 2020-09-23 11:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('FirstApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
('remark', models.CharField(max_length=20)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('studentId', models.CharField(max_length=10, primary_key=True, serialize=False)),
('studentFirstName', models.CharField(max_length=100)),
('studentLastName', models.CharField(max_length=100)),
('password', models.CharField(max_length=100)),
('year', models.CharField(max_length=100)),
('semester', models.CharField(max_length=100)),
('batch', models.CharField(max_length=100)),
('faculty', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Lecture',
fields=[
('lectureID', models.CharField(max_length=10, primary_key=True, serialize=False)),
('startTime', models.DateField()),
('endTime', models.DateField()),
('day', models.CharField(max_length=20)),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Subject')),
],
),
migrations.CreateModel(
name='Attendance',
fields=[
('attendanceID', models.CharField(max_length=10, primary_key=True, serialize=False)),
('date', models.DateField()),
('attendance', models.BooleanField()),
('feedback', models.CharField(blank=True, max_length=50, null=True)),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='AttendanceApp.Student')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Subject')),
],
),
]
# from django.db import models
from djongo import models
from FirstApp.MongoModels import Subject
class Student(models.Model):
studentId = models.CharField(primary_key=True, max_length=10)
studentFirstName = models.CharField(max_length=100)
studentLastName = models.CharField(max_length=100)
password = models.CharField(max_length=100)
year = models.CharField(max_length=100)
semester = models.CharField(max_length=100)
batch = models.CharField(max_length=100)
faculty = models.CharField(max_length=100)
def __str__(self):
return self.studentId
# class Subject(models.Model):
# subjectId = models.CharField(primary_key=True, max_length=10)
# subjectName = models.CharField(max_length=100)
# LecturerInCharge = models.CharField(max_length=100)
#
# def __str__(self):
# return self.subjectId
class Attendance(models.Model):
attendanceID = models.CharField(primary_key=True, max_length=10)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
date = models.DateField()
attendance = models.BooleanField()
feedback = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.attendanceID
class File(models.Model):
file = models.FileField(blank=False, null=False)
remark = models.CharField(max_length=20)
timestamp = models.DateTimeField(auto_now_add=True)
class Lecture(models.Model):
lectureID = models.CharField(primary_key=True, max_length=10)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
startTime = models.DateField()
endTime = models.DateField()
day = models.CharField(max_length=20)
from django.db import models
# Create your models here.
import numpy as np
import os
import cv2
import getpass
import time
from datetime import datetime
filename = 'video.avi'
frames_per_second = 24.0
res = '720p'
# Set resolution for the video capture
# Function adapted from https://kirr.co/0l6qmh
def change_res(cap, width, height):
cap.set(3, width)
cap.set(4, height)
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# grab resolution dimensions and set video capture to it.
def get_dims(cap, res='1080p'):
width, height = STD_DIMENSIONS["480p"]
if res in STD_DIMENSIONS:
width,height = STD_DIMENSIONS[res]
## change the current caputre device
## to the resulting resolution
change_res(cap, width, height)
return width, height
# Video Encoding, might require additional installs
# Types of Codes: http://www.fourcc.org/codecs.php
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
def get_video_type(filename):
filename, ext = os.path.splitext(filename)
if ext in VIDEO_TYPE:
return VIDEO_TYPE[ext]
return VIDEO_TYPE['avi']
def initiate():
cap = cv2.VideoCapture(0)
out = cv2.VideoWriter(filename, get_video_type(filename), 25, get_dims(cap, res))
while True:
ret, frame = cap.read()
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
\ No newline at end of file
from rest_framework import serializers
from FirstApp.serializers import SubjectSerializer
from .models import Student, Subject, Attendance, File
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = '__all__'
#
# class SubjectSerializer(serializers.ModelSerializer):
# class Meta:
# model = Subject
# fields = '__all__'
class AttendanceSerializer(serializers.ModelSerializer):
subject = SubjectSerializer()
class Meta:
model = Attendance
fields = '__all__'
class FileSerializer(serializers.ModelSerializer):
class Meta():
model = File
fields = ('file', 'remark', 'timestamp')
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
{% block javascript %}
{% load static %}
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Page level plugins -->
<script src="{% static 'FirstApp/vendor/datatables/jquery.dataTables.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.js' %}"></script>
<!-- Page level custom scripts -->
<script src="{% static 'FirstApp/js/demo/datatables-demo.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Load TensorFlow.js -->
<script src="https://unpkg.com/@tensorflow/tfjs"></script>
<!-- Load Posenet -->
<script src="https://unpkg.com/@tensorflow-models/posenet">
</script>
<script type="text/javascript">
$(document).ready(function() {
$('#initiate_btn').click(function() {
fetch('http://127.0.0.1:8000/attendance/process-initiate-lecture')
.then((res) => res.json())
.then((out) => alert(out.response))
.catch((err) => alert('error: ' + err))
});
})
</script>
{% endblock %}
{% block 'container-fluid' %}
<div class="container">
<div class="row p-4">
<div class="col-lg-12">
<div class="text-center">
<div class="card">
<div class="card-header">
<h4 class="card-title">Starting the lecture....</h4>
</div>
<div class="card-body">
<button type="button" class="btn btn-success" id="initiate_btn">Initiate Lecture</button>
</div>
</div>
</div>
</div>
</div>
</div>
{% endblock %}
</body>
</html>
\ No newline at end of file
from django.urls import path, re_path, include
from django.urls import path
from .api import student_list, student_detail, subject_list, subject_detail, attendance_list, StudentAPIView, \
StudentDetails
from django.conf.urls import url
from .api import FileView, InitiateLecture
from . import views
urlpatterns = [
path('', views.first)
path('students/', student_list),
path('students/<str:pk>', student_detail),
path('subjects/', subject_list),
path('subjects/<str:pk>', subject_detail),
path('attendance/', attendance_list),
path('student/', StudentAPIView.as_view()),
path('initiate-lecture', views.initiate_lecture),
# class based
path('student/', StudentAPIView.as_view()),
path('student/<str:pk>', StudentDetails.as_view()),
url(r'^upload/$', FileView.as_view(), name='file-upload'),
# this url will initiate the lecture
url(r'^process-initiate-lecture/$', InitiateLecture.as_view())
]
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def first(request):
return HttpResponse('<h1>Hello Attendance App</h1>')
def initiate_lecture(request):
return render(request, "AttendanceApp/Initiate_lecture.html")
\ No newline at end of file
......@@ -111,6 +111,36 @@ class LectureVideo(models.Model):
return self.lecture_video_id
class Landmarks(models.Model):
landmark = models.CharField(max_length=15)
class Meta:
abstract = True
# lecture video time landmarks table
class LectureVideoTimeLandmarks(models.Model):
lecture_video_time_landmarks_id = models.CharField(max_length=15)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
time_landmarks = models.ArrayField(Landmarks)
def __str__(self):
return self.lecture_video_time_landmarks_id
# lecture video frame landmarks table
class LectureVideoFrameLandmarks(models.Model):
lecture_video_frame_landmarks_id = models.CharField(max_length=15)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
frame_landmarks = models.ArrayField(Landmarks)
def __str__(self):
return self.lecture_video_frame_landmarks_id
# ACTIVITY section
# lecture activity table
class LectureActivity(models.Model):
lecture_activity_id = models.CharField(max_length=10)
......@@ -124,6 +154,60 @@ class LectureActivity(models.Model):
return self.lecture_activity_id
# this abstract class will define the lecture activity frame group percentages
class LectureActivityFrameGroupPercentages(models.Model):
phone_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
listen_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
note_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
class Meta:
abstract = True
# this abstract class will define the details for an activity frame group
class LectureActivityFrameGroupDetails(models.Model):
frame_group = models.CharField(max_length=10)
frame_group_percentages = models.EmbeddedField(
model_container=LectureActivityFrameGroupPercentages
)
class Meta:
abstract = True
# this class will contain the activity frame groupings
class LectureActivityFrameGroupings(models.Model):
lecture_activity_frame_groupings_id = models.CharField(max_length=15, default="")
lecture_activity_id = models.ForeignKey(LectureActivity, on_delete=models.CASCADE)
frame_group_details = models.ArrayField(model_container=LectureActivityFrameGroupDetails)
def __str__(self):
return self.lecture_activity_frame_groupings_id
# this abstract class will contain lecture activity frame recognition details
class LectureActivityFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
phone_perct = models.FloatField()
listen_perct = models.FloatField()
note_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture activity frame recognitions
class LectureActivityFrameRecognitions(models.Model):
lecture_activity_frame_recognition_id = models.CharField(max_length=15)
lecture_activity_id = models.ForeignKey(LectureActivity, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LectureActivityFrameRecognitionDetails)
def __str__(self):
return self.lecture_activity_frame_recognition_id
# EMOTIONS section
# Lecture emotion report
class LectureEmotionReport(models.Model):
......@@ -141,8 +225,132 @@ class LectureEmotionReport(models.Model):
return self.lecture_emotion_id
# this abstract class will define the lecture emotion frame group percentages
class LectureEmotionFrameGroupPercentages(models.Model):
happy_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
sad_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
angry_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
disgust_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
surprise_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
neutral_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
class Meta:
abstract = True
# this abstract class will define the details for an emotion frame group
class LectureEmotionFrameGroupDetails(models.Model):
frame_group = models.CharField(max_length=10)
frame_group_percentages = models.EmbeddedField(
model_container=LectureEmotionFrameGroupPercentages
)
class Meta:
abstract = True
# this class will contain the emotion frame groupings
class LectureEmotionFrameGroupings(models.Model):
lecture_emotion_frame_groupings_id = models.CharField(max_length=15, default="")
lecture_emotion_id = models.ForeignKey(LectureEmotionReport, on_delete=models.CASCADE)
frame_group_details = models.ArrayField(model_container=LectureEmotionFrameGroupDetails)
def __str__(self):
return self.lecture_emotion_frame_groupings_id
# this abstract class will contain lecture emotion frame recognition details
class LectureEmotionFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
happy_perct = models.FloatField()
sad_perct = models.FloatField()
angry_perct = models.FloatField()
surprise_perct = models.FloatField()
neutral_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture emotion frame recognitions
class LectureEmotionFrameRecognitions(models.Model):
lecture_emotion_frame_recognition_id = models.CharField(max_length=15)
lecture_emotion_id = models.ForeignKey(LectureEmotionReport, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LectureEmotionFrameRecognitionDetails)
def __str__(self):
return self.lecture_emotion_frame_recognition_id
# POSE section
# lecture pose estimation
class LecturePoseEstimation(models.Model):
lecture_pose_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
\ No newline at end of file
class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
looking_up_and_right_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_up_and_left_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_down_and_right_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_down_and_left_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_front_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
def __str__(self):
return self.lecture_gaze_id
# this abstract class will define the lecture gaze frame group percentages
class LectureGazeFrameGroupPercentages(models.Model):
upright_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
upleft_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
downright_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
downleft_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
front_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
class Meta:
abstract = True
# this abstract class will define the details for a gaze frame group
class LectureGazeFrameGroupDetails(models.Model):
frame_group = models.CharField(max_length=10)
frame_group_percentages = models.EmbeddedField(
model_container=LectureGazeFrameGroupPercentages
)
class Meta:
abstract = True
# this class will contain the gaze frame groupings
class LectureGazeFrameGroupings(models.Model):
lecture_gaze_frame_groupings_id = models.CharField(max_length=15, default="")
lecture_gaze_id = models.ForeignKey(LectureGazeEstimation, on_delete=models.CASCADE)
frame_group_details = models.ArrayField(model_container=LectureGazeFrameGroupDetails)
def __str__(self):
return self.lecture_gaze_frame_groupings_id
# this abstract class will contain lecture gaze frame recognition details
class LectureGazeFrameRecognitionDetails(models.Model):
frame_name = models.CharField(max_length=15)
upright_perct = models.FloatField()
upleft_perct = models.FloatField()
downright_perct = models.FloatField()
downleft_perct = models.FloatField()
front_perct = models.FloatField()
class Meta:
abstract = True
# this class will contain lecture gaze frame recognitions
class LectureGazeFrameRecognitions(models.Model):
lecture_gaze_frame_recognition_id = models.CharField(max_length=15)
lecture_gaze_id = models.ForeignKey(LectureGazeEstimation, on_delete=models.CASCADE)
frame_recognition_details = models.ArrayField(LectureGazeFrameRecognitionDetails)
def __str__(self):
return self.lecture_gaze_frame_recognition_id
\ No newline at end of file
......@@ -11,4 +11,5 @@ admin.site.register(LecturerSubject)
admin.site.register(LecturerCredentials)
admin.site.register(FacultyTimetable)
admin.site.register(LectureVideo)
admin.site.register(LectureActivity)
\ No newline at end of file
admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation)
\ No newline at end of file
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from . MongoModels import *
from .MongoModels import *
from rest_framework.views import *
from . ImageOperations import saveImage
from . logic import head_pose_estimation
from . logic import video_extraction
from . logic import activity_recognition as ar
from . logic import posenet_calculation as pc
from .ImageOperations import saveImage
from .logic import head_pose_estimation
from .logic import video_extraction
from .logic import activity_recognition as ar
from .logic import posenet_calculation as pc
from . import emotion_detector as ed
from . logic import id_generator as ig
from . models import Teachers, Video, VideoMeta, RegisterUser
from . MongoModels import *
from . serializers import *
from .logic import id_generator as ig
from .logic import pdf_file_generator as pdf
from .logic import head_gaze_estimation as hge
from .logic import video_extraction as ve
from .models import Teachers, Video, VideoMeta, RegisterUser
from .MongoModels import *
from .serializers import *
import datetime
# to create images
class ImageViewSet(APIView):
......@@ -41,6 +47,7 @@ class VideoExtractionViewSet(APIView):
response = video_extraction.VideoExtractor(request.data)
return Response({"response": response})
# lecture emotions view set
class LectureEmotionViewSet(APIView):
......@@ -75,6 +82,7 @@ class LectureViewSet(APIView):
).save()
return Response({"response": request})
# API for Faculties
class FacultyViewSet(APIView):
......@@ -90,6 +98,7 @@ class FacultyViewSet(APIView):
).save()
return Response(status=201, data={"response": "successfully added"})
# API for subjects
class SubjectViewSet(APIView):
......@@ -99,7 +108,6 @@ class SubjectViewSet(APIView):
return Response(serializer.data)
def post(self, request):
serializer = SubjectSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
......@@ -118,7 +126,6 @@ class LecturerViewSet(APIView):
return Response(serializer.data)
def post(self, request):
serializer = LecturerSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
......@@ -137,7 +144,6 @@ class LecturerSubjectViewSet(APIView):
return Response(serializer.data)
def post(self, request):
serializer = LecturerSubjectSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
......@@ -156,7 +162,6 @@ class FacultyTimetableViewSet(APIView):
return Response(serializer.data)
def post(self, request):
serializer = FacultyTimetableSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
......@@ -175,7 +180,6 @@ class LectureVideoViewSet(APIView):
return Response(serializer.data)
def post(self, request):
serializer = LectureVideoSerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
......@@ -194,7 +198,7 @@ class GetLectureVideoViewSet(APIView):
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
lecture_video_id = serializer.data[index]['lecture_video_id']
lecture_video_id = serializer.data[0]['lecture_video_id']
print('lecture video id: ', lecture_video_id)
activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
isActivityFound = (len(activities) > 0)
......@@ -205,6 +209,34 @@ class GetLectureVideoViewSet(APIView):
})
# this API will retrieve lecture video details for lecturer Home Page
class GetLectureVideoViewSetForHome(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
date = request.query_params.get('date')
counter = int(request.query_params.get('counter'))
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
response = {}
# to check whether there is only one lecture video for the query
if len(serializer.data) > 1:
lecture_video_id = serializer.data[counter]['lecture_video_id']
response = serializer.data[counter]
else:
lecture_video_id = serializer.data[0]['lecture_video_id']
response = serializer.data[0]
return Response({
"response": response
})
# ACTIVITY
# API for lecture activities
class LectureActivityViewSet(APIView):
......@@ -215,7 +247,6 @@ class LectureActivityViewSet(APIView):
return Response(serializer.data)
def post(self, request):
serializer = LectureActivitySerializer(data=request.data)
if serializer.is_valid(raise_exception=ValueError):
serializer.create(validated_data=request.data)
......@@ -248,7 +279,7 @@ class LectureActivityProcess(APIView):
def get(self, request):
video_name = request.query_params.get('lecture_video_name')
video_id = request.query_params.get('lecture_video_id')
video_id = int(request.query_params.get('lecture_video_id'))
percentages = ar.activity_recognition(video_name)
self.activity(video_id, percentages)
return Response({"response": True})
......@@ -257,21 +288,39 @@ class LectureActivityProcess(APIView):
pass
def activity(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
last_lec_activity = LectureActivity.objects.order_by('lecture_activity_id').last()
# lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
lec_video = LectureVideo.objects.filter(id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_activity = LectureActivity.objects.order_by('lecture_activity_id').last()
new_lecture_activity_id = ig.generate_new_id(last_lec_activity.lecture_activity_id)
# creating a new lecture activity
LectureActivity(
lecture_activity_id=new_lecture_activity_id,
lecture_video_id=lec_video,
lecture_video_id_id=lec_video_id,
talking_perct=percentages['talking_perct'],
phone_perct=percentages['phone_perct'],
listening_perct=percentages['listening_perct'],
writing_perct=percentages['writing_perct']
).save()
# get the video name
video_name = lec_video_data['video_name']
# then save the frame recognitions to the database
_ = ar.save_frame_recognition(video_name)
# save the time landmarks and frame landmarks
ve.save_time_landmarks(video_name)
frame_landmarks, frame_group_dict = ve.save_frame_landmarks(video_name)
# then save the activity frame groupings
ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
class GetLectureActivityDetections(APIView):
......@@ -330,13 +379,63 @@ class GetLectureActivityRecognitionsForFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
frame_detections = ar.get_frame_activity_recognition(video_name)
return Response({
"response": frame_detections
})
# finding the existence of Lecture activity frame recognition record
isExist = LectureActivityFrameRecognitions.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
lecture_activity_frame_recognitions = LectureActivityFrameRecognitions.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name)
lecture_activity_frame_recognitions_ser = LectureActivityFrameRecognitionsSerializer(lecture_activity_frame_recognitions, many=True)
lecture_activity_frame_recognitions_data = lecture_activity_frame_recognitions_ser.data[0]
frame_detections = lecture_activity_frame_recognitions_data['frame_recognition_details']
return Response({
"response": frame_detections
})
else:
# perform the action of saving frame recognitions to database
frame_detections = ar.save_frame_recognition(video_name)
return Response({
"response": frame_detections
})
# API to create reports for Activity
class GenerateActivityReport(APIView):
def get(self, request):
subject = request.query_params.get('subject')
lecturer = int(request.query_params.get('lecturer'))
date = request.query_params.get('date')
# retrieve the subject name
subject_query = Subject.objects.filter(subject_code=subject)
subject_serializer = SubjectSerializer(subject_query, many=True)
subject_name = subject_serializer.data[0]['name']
# retrieve the lecturer name
# lecturer_query = Lecturer.objects.filter(lecturer_id=lecturer)
lecturer_query = Lecturer.objects.filter(id=lecturer)
lecturer_serializer = LecturerSerializer(lecturer_query, many=True)
lecturer_lname = lecturer_serializer.data[0]['lname']
lecturer_fname = lecturer_serializer.data[0]['fname']
lecturer_fullname = lecturer_fname + " " + lecturer_lname
# set the dictionary
object = {}
object['subject_name'] = subject_name
object['lecturer_name'] = lecturer_fullname
object['date'] = date
pdf.generate_pdf_file(object)
return Response({
"response": "success"
})
###### EMOTIONS section #####
......@@ -358,6 +457,7 @@ class GetLectureEmotionAvailability(APIView):
"isActivityFound": isActivityFound
})
# to process lecture emotions for a lecture video
class LectureEmotionProcess(APIView):
......@@ -375,6 +475,7 @@ class LectureEmotionProcess(APIView):
def save_emotion_report(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
last_lec_emotion = LectureEmotionReport.objects.order_by('lecture_emotion_id').last()
new_lecture_emotion_id = ig.generate_new_id(last_lec_emotion.lecture_emotion_id)
......@@ -389,6 +490,20 @@ class LectureEmotionProcess(APIView):
surprise_perct=percentages.surprise_perct
).save()
# get the video name
video_name = lec_video_data['video_name']
# then save the frame recognition details to the database
_ = ed.save_frame_recognitions(video_name)
# retrieve the frame landmarks and frame group dictionary
frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
# then save emotion frame groupings
ed.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# to get a lecture emotion report
class GetLectureEmotionReportViewSet(APIView):
......@@ -442,11 +557,32 @@ class GetLectureEmotionRecognitionsForFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
frame_detections = ed.get_frame_emotion_recognition(video_name)
return Response({
"response": frame_detections
})
# finding the existence of Lecture emotion frame recognition record
isExist = LectureEmotionFrameRecognitions.objects.filter(
lecture_emotion_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
lecture_emotion_frame_recognitions = LectureEmotionFrameRecognitions.objects.filter(
lecture_emotion_id__lecture_video_id__video_name=video_name)
lecture_emotion_frame_recognitions_ser = LectureEmotionFrameRecognitionsSerializer(
lecture_emotion_frame_recognitions, many=True)
lecture_emotion_frame_recognitions_data = lecture_emotion_frame_recognitions_ser.data[0]
frame_detections = lecture_emotion_frame_recognitions_data['frame_recognition_details']
return Response({
"response": frame_detections
})
else:
# save the frame recognitions into the database
frame_detections = ed.save_frame_recognitions(video_name)
return Response({
"response": frame_detections
})
##### POSE #####
......@@ -455,11 +591,12 @@ class GetLectureVideoForPose(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
date = request.query_params.get('date')
index = int(request.query_params.get('index'))
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
return Response({
"response": serializer.data
"response": serializer.data[index]
})
......@@ -514,3 +651,622 @@ class ProcessIndividualStudentPoseEstimation(APIView):
})
##### GAZE ESTIMATION SECTION #####
class GetLectureGazeEstimationAvailaibility(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
date = request.query_params.get('date')
index = int(request.query_params.get('index'))
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
lecture_video_id = serializer.data[index]['lecture_video_id']
gaze_estimation = LectureGazeEstimation.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
isGazeEstimationFound = (len(gaze_estimation) > 0)
return Response({
"response": serializer.data[index],
"isGazeEstimationFound": isGazeEstimationFound
})
# the API to process lecture gaze estimation
class ProcessLectureGazeEstimation(APIView):
def get(self, request):
video_name = request.query_params.get('lecture_video_name')
video_id = request.query_params.get('lecture_video_id')
percentages = hge.process_gaze_estimation(video_name)
self.estimate_gaze(video_id, percentages)
return Response({"response": True})
def post(self, request):
pass
def estimate_gaze(self, lec_video_id, percentages):
lec_video = LectureVideo.objects.get(lecture_video_id=lec_video_id)
last_lec_gaze = LectureGazeEstimation.objects.order_by('lecture_gaze_id').last()
lec_video_serializer = LectureVideoSerializer(lec_video, many=True)
lec_video_data = lec_video_serializer.data[0]
new_lecture_gaze_id = "LG000001" if (last_lec_gaze is None) else ig.generate_new_id(
last_lec_gaze.lecture_gaze_id)
# creating a new lecture gaze estimation
LectureGazeEstimation(
lecture_gaze_id=new_lecture_gaze_id,
lecture_video_id=lec_video,
looking_up_and_right_perct=percentages['head_up_right_perct'],
looking_up_and_left_perct=percentages['head_up_left_perct'],
looking_down_and_right_perct=percentages['head_down_right_perct'],
looking_down_and_left_perct=percentages['head_down_left_perct'],
looking_front_perct=percentages['head_front_perct']
).save()
# get the video name
video_name = lec_video_data['video_name']
# then save the frame recognitions to the database
_ = hge.save_frame_detections(video_name)
# get the frame landmarks and frame group dictionary
frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name, "Gaze")
# then save the gaze frame groupings to the database
hge.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# the API to retrieve lecture gaze estimation
class GetLectureGazeEstimationViewSet(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = hge.getExtractedFrames(lecture_video_name)
lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureGazeEstimationSerializer(lecture_gaze_estimations, many=True)
return Response({
"response": serializer.data,
"extracted": extracted
})
# the API to retrieve Gaze estimation for frames
class GetLectureGazeEstimationForFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# finding the existence of Lecture gaze frame recognition record
isExist = LectureGazeFrameRecognitions.objects.filter(
lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
lecture_gaze_frame_recognitions = LectureGazeFrameRecognitions.objects.filter(
lecture_gaze_id__lecture_video_id__video_name=video_name)
lecture_gaze_frame_recognitions_ser = LectureGazeFrameRecognitionsSerializer(
lecture_gaze_frame_recognitions, many=True)
lecture_gaze_frame_recognitions_data = lecture_gaze_frame_recognitions_ser.data[0]
frame_detections = lecture_gaze_frame_recognitions_data['frame_recognition_details']
return Response({
"response": frame_detections
})
else:
# save recognition details into the database
frame_detections = hge.save_frame_detections(video_name)
return Response({
"response": frame_detections
})
##### VIDEO RESULTS SECTION #####
# this API find the lectures which are yet to be processed
class LectureProcessAvailability(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
lecturer_videos = LectureVideo.objects.filter(lecturer_id=lecturer)
serializer = LectureVideoSerializer(lecturer_videos, many=True)
data = serializer.data
for video in data:
print('video name: ', video['video_name'])
return Response({
"response": "hello"
})
##### VIEW STUDENT BEHAVIOR SUMMARY SECTION #####
# this API will retrieve student behavior summary for specified time period
class GetStudentBehaviorSummaryForPeriod(APIView):
def get(self, request):
option = request.query_params.get('option')
lecturer = request.query_params.get('lecturer')
int_lecturer = int(lecturer)
int_option = int(option)
# int_option = 150
isRecordFound = False
activity_percentages = {}
emotion_percentages = {}
gaze_estimation_percentages = {}
individual_lec_activties = []
individual_lec_emotions = []
individual_lec_gaze_estimations = []
activity_labels = []
emotion_labels = []
gaze_estimation_labels = []
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
previous_date = current_date - option_date
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
if len(lec_activity) > 0:
isRecordFound = True
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
activity_percentages, individual_lec_activties, activity_labels = ar.get_student_activity_summary_for_period(activity_data)
# retrieving lecture emotions
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
if len(lec_emotion) > 0:
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
emotion_percentages, individual_lec_emotions, emotion_labels = ed.get_student_emotion_summary_for_period(emotion_data)
# retrieving lecture gaze estimations
lec_gaze_estimation = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are gaze estimation data
if len(lec_gaze_estimation) > 0:
gaze_estimation_serializer = LectureGazeEstimationSerializer(lec_gaze_estimation, many=True)
gaze_estimation_data = gaze_estimation_serializer.data
gaze_estimation_percentages, individual_lec_gaze_estimations, gaze_estimation_labels = hge.get_student_gaze_estimation_summary_for_period(gaze_estimation_data)
return Response({
"activity_response": activity_percentages,
"emotion_response": emotion_percentages,
"gaze_estimation_response": gaze_estimation_percentages,
"individual_activities": individual_lec_activties,
"individual_emotions": individual_lec_emotions,
"individual_gaze_estimations": individual_lec_gaze_estimations,
"activity_labels": activity_labels,
"emotion_labels": emotion_labels,
"gaze_estimation_labels": gaze_estimation_labels,
"isRecordFound": isRecordFound
})
# this API will retrieve lecture video summary time landmarks
class GetLectureVideoSummaryTimeLandmarks(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# checking for the existing time landmarks details
isExist = LectureVideoTimeLandmarks.objects.filter(lecture_video_id__video_name=video_name).exists()
if (isExist):
time_landmarks = []
lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.filter(lecture_video_id__video_name=video_name)
lec_video_time_landmarks_ser = LectureVideoTimeLandmarksSerializer(lec_video_time_landmarks, many=True)
lec_video_time_landmarks_data = lec_video_time_landmarks_ser.data[0]
retrieved_landmarks = lec_video_time_landmarks_data["time_landmarks"]
for landmark in retrieved_landmarks:
time_landmarks.append(landmark['landmark'])
# return the response
return Response({
"response": time_landmarks
})
# else:
#
#
# last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
# new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
# ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
#
#
# # retrieve lecture video details
# lec_video = LectureVideo.objects.filter(video_name=video_name)
# lec_video_ser = LectureVideoSerializer(lec_video, many=True)
# lec_video_id = lec_video_ser.data[0]['id']
#
#
# # save the landmark details in the db
# time_landmarks = ve.getTimeLandmarks(video_name)
#
# db_time_landmarks = []
#
# # loop through the time landmarks
# for landmark in time_landmarks:
# landmark_obj = Landmarks()
# landmark_obj.landmark = landmark
#
# db_time_landmarks.append(landmark_obj)
#
#
# new_lec_video_time_landmarks = LectureVideoTimeLandmarks()
# new_lec_video_time_landmarks.lecture_video_time_landmarks_id = new_lecture_video_time_landmarks_id
# new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
# new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
#
# new_lec_video_time_landmarks.save()
#
# return Response({
# "response": time_landmarks
# })
# this API will retrieve lecture activity summary
class GetLectureActivitySummary(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# checking the existence of lecture activity frame grouping records in the db
isExist = LectureActivityFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
frame_group_percentages = {}
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark'])
lec_activity_frame_groupings = LectureActivityFrameGroupings.objects.filter(lecture_activity_id__lecture_video_id__video_name=video_name)
lec_activity_frame_groupings_ser = LectureActivityFrameGroupingsSerializer(lec_activity_frame_groupings, many=True)
lec_activity_frame_groupings_data = lec_activity_frame_groupings_ser.data[0]
frame_group_details = lec_activity_frame_groupings_data["frame_group_details"]
# create the new dictionary
for group in frame_group_details:
frame_group_percentages[group['frame_group']] = group['frame_group_percentages']
class_labels = ['phone_perct', 'listen_perct', 'note_perct']
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"activity_labels": class_labels
})
# else:
#
# # retrieve the previous lecture video frame landmarks details
# last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
# 'lecture_video_frame_landmarks_id').last()
# new_lecture_video_frame_landmarks_id = "LVFL00001" if (last_lec_video_frame_landmarks is None) else \
# ig.generate_new_id(last_lec_video_frame_landmarks.lecture_video_frame_landmarks_id)
#
#
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name, "Activity")
# frame_group_percentages, activity_labels = ar.activity_frame_groupings(video_name, frame_landmarks, frame_group_dict)
#
#
# # retrieve lecture video details
# lec_video = LectureVideo.objects.filter(video_name=video_name)
# lec_video_ser = LectureVideoSerializer(lec_video, many=True)
# lec_video_id = lec_video_ser.data[0]['id']
#
#
# # save the frame landmarks details into db (temp method)
# db_frame_landmarks = []
#
# for landmark in frame_landmarks:
# landmark_obj = Landmarks()
# landmark_obj.landmark = landmark
#
# db_frame_landmarks.append(landmark_obj)
#
#
# new_lec_video_frame_landmarks = LectureVideoFrameLandmarks()
# new_lec_video_frame_landmarks.lecture_video_frame_landmarks_id = new_lecture_video_frame_landmarks_id
# new_lec_video_frame_landmarks.lecture_video_id_id = lec_video_id
# new_lec_video_frame_landmarks.frame_landmarks = db_frame_landmarks
#
# new_lec_video_frame_landmarks.save()
#
#
#
# # save the frame group details into db (temp method)
#
# last_lec_activity_frame_grouping = LectureActivityFrameGroupings.objects.order_by('lecture_activity_frame_groupings_id').last()
# new_lecture_activity_frame_grouping_id = "LAFG00001" if (last_lec_activity_frame_grouping is None) else \
# ig.generate_new_id(last_lec_activity_frame_grouping.lecture_activity_frame_groupings_id)
#
# # retrieve the lecture activity id
# lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
# lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
# lec_activity_id = lec_activity_ser.data[0]['id']
#
# # create the frame group details
# frame_group_details = []
#
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails'
# lec_activity_frame_group_details = LectureActivityFrameGroupDetails()
# lec_activity_frame_group_details.frame_group = key
# lec_activity_frame_group_details.frame_group_percentages = frame_group_percentages[key]
#
# frame_group_details.append(lec_activity_frame_group_details)
#
#
# new_lec_activity_frame_groupings = LectureActivityFrameGroupings()
# new_lec_activity_frame_groupings.lecture_activity_frame_groupings_id = new_lecture_activity_frame_grouping_id
# new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
# new_lec_activity_frame_groupings.frame_group_details = frame_group_details
#
# # save
# new_lec_activity_frame_groupings.save()
#
#
# return Response({
# "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages,
# "activity_labels": activity_labels
# })
# this API will retrieve lecture emotion summary
class GetLectureEmotionSummary(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# checking the existence of lecture activity frame grouping records in the db
isExist = LectureEmotionFrameGroupings.objects.filter(lecture_emotion_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
frame_group_percentages = {}
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark'])
# retrieve emotion frame groupings details
lec_emotion_frame_groupings = LectureEmotionFrameGroupings.objects.filter(lecture_emotion_id__lecture_video_id__video_name=video_name)
lec_emotion_frame_groupings_ser = LectureEmotionFrameGroupingsSerializer(lec_emotion_frame_groupings, many=True)
lec_emotion_frame_groupings_data = lec_emotion_frame_groupings_ser.data[0]
frame_group_details = lec_emotion_frame_groupings_data["frame_group_details"]
# create the new dictionary
for group in frame_group_details:
frame_group_percentages[group['frame_group']] = group['frame_group_percentages']
class_labels = ['happy_perct', 'sad_perct', 'angry_perct', 'surprise_perct', 'neutral_perct']
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"emotion_labels": class_labels
})
# else:
#
# frame_landmarks = []
#
# # retrieve frame landmarks from db
# lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
# lecture_video_id__video_name=video_name)
# lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
# lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
#
# retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
#
# # creating a new list to display in the frontend
# for landmark in retrieved_frame_landmarks:
# frame_landmarks.append(int(landmark['landmark']))
#
#
# l, frame_group_dict = ve.getFrameLandmarks(video_name, "Emotion")
# frame_group_percentages, emotion_labels = ed.emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
#
#
#
# # save the frame group details into db (temp method)
#
# last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
# new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
# ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
#
# # retrieve the lecture emotion id
# lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
# lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
# lec_emotion_id = lec_emotion_ser.data[0]['id']
#
# # create the frame group details
# frame_group_details = []
#
# for key in frame_group_percentages.keys():
# # create an object of type 'LectureActivityFrameGroupDetails'
# lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
# lec_emotion_frame_group_details.frame_group = key
# lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
#
# frame_group_details.append(lec_emotion_frame_group_details)
#
#
# new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
# new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
# new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
# new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
#
# # save
# new_lec_emotion_frame_groupings.save()
#
#
# return Response({
# "frame_landmarks": frame_landmarks,
# "frame_group_percentages": frame_group_percentages,
# "emotion_labels": emotion_labels
# })
# this API will retrieve lecture gaze summary
class GetLectureGazeSummary(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
# checking the existence of lecture activity frame grouping records in the db
isExist = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name).exists()
if (isExist):
# frame_landmarks, frame_group_dict = ve.getFrameLandmarks(video_name)
frame_group_percentages = {}
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark'])
# retrieve the frame groupings
lec_gaze_frame_groupings = LectureGazeFrameGroupings.objects.filter(lecture_gaze_id__lecture_video_id__video_name=video_name)
lec_gaze_frame_groupings_ser = LectureGazeFrameGroupingsSerializer(lec_gaze_frame_groupings, many=True)
lec_gaze_frame_groupings_data = lec_gaze_frame_groupings_ser.data[0]
# take the frame group details out of it
frame_group_details = lec_gaze_frame_groupings_data["frame_group_details"]
# create the new dictionary
for group in frame_group_details:
frame_group_percentages[group['frame_group']] = group['frame_group_percentages']
class_labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"gaze_labels": class_labels
})
else:
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(
lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(int(landmark['landmark']))
l, frame_group_dict = ve.getFrameLandmarks(video_name, "Gaze")
print('frame group dict: ', frame_group_dict)
frame_group_percentages, gaze_labels = hge.gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db (temp method)
last_lec_gaze_frame_grouping = LectureGazeFrameGroupings.objects.order_by('lecture_gaze_frame_groupings_id').last()
new_lecture_gaze_frame_grouping_id = "LGFG00001" if (last_lec_gaze_frame_grouping is None) else \
ig.generate_new_id(last_lec_gaze_frame_grouping.lecture_gaze_frame_groupings_id)
# retrieve the lecture activity id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_id = lec_gaze_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_gaze_frame_group_details = LectureGazeFrameGroupDetails()
lec_gaze_frame_group_details.frame_group = key
lec_gaze_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_gaze_frame_group_details)
new_lec_gaze_frame_groupings = LectureGazeFrameGroupings()
new_lec_gaze_frame_groupings.lecture_gaze_frame_groupings_id = new_lecture_gaze_frame_grouping_id
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_gaze_frame_groupings.save()
return Response({
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"gaze_labels": gaze_labels
})
\ No newline at end of file
......@@ -5,11 +5,17 @@ from keras.preprocessing import image
import cv2
import os
import numpy as np
from .MongoModels import *
from . models import VideoMeta
from . logic import custom_sorter as cs
from .logic import id_generator as ig
# emotion recognition method
from .serializers import LectureEmotionSerializer
def emotion_recognition(classifier, face_classifier, image):
label = ""
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
......@@ -66,7 +72,6 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
print('number of faces: ', len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
......@@ -104,13 +109,9 @@ def detect_emotion(video):
elif (label == 'Sad'):
count_sad += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Sad')
# cv2.imwrite(os.path.join(path, 'Sad-{0}.jpg'.format(count)), frame)
elif (label == 'Surprise'):
count_surprise += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Surprise')
# cv2.imwrite(os.path.join(path, 'Surprise-{0}.jpg'.format(count)), frame)
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
......@@ -118,13 +119,9 @@ def detect_emotion(video):
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# cv2.imshow('Emotion Detector',frame)
count_frames += 1
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# setting up the counted values
meta_data.frame_count = count_frames
meta_data.happy_count = count_happy
meta_data.sad_count = count_sad
......@@ -328,11 +325,11 @@ def get_frame_emotion_recognition(video_name):
# calculating the percentages for the frame
happy_perct = float(happy_count / detection_count) * 100
sad_perct = float(sad_count / detection_count) * 100
angry_perct = float(angry_count / detection_count) * 100
neutral_perct = float(neutral_count / detection_count) * 100
surprise_perct = float(surprise_count / detection_count) * 100
happy_perct = float(happy_count / detection_count) * 100 if detection_count > 0 else 0
sad_perct = float(sad_count / detection_count) * 100 if detection_count > 0 else 0
angry_perct = float(angry_count / detection_count) * 100 if detection_count > 0 else 0
neutral_perct = float(neutral_count / detection_count) * 100 if detection_count > 0 else 0
surprise_perct = float(surprise_count / detection_count) * 100 if detection_count > 0 else 0
# this dictionary will be returned
frame_details['happy_perct'] = happy_perct
......@@ -348,4 +345,291 @@ def get_frame_emotion_recognition(video_name):
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# return the detected frame percentages
return sorted_activity_frame_recognitions
\ No newline at end of file
return sorted_activity_frame_recognitions
# this method will retrieve student activity summary for given time period
def get_student_emotion_summary_for_period(emotions):
# declare variables to add percentage values
happy_perct_combined = 0.0
sad_perct_combined = 0.0
angry_perct_combined = 0.0
disgust_perct_combined = 0.0
surprise_perct_combined = 0.0
neutral_perct_combined = 0.0
# get the number of activties to calculate average
no_of_emotions = len(emotions)
individual_lec_emotions = []
emotion_labels = ["happy_perct", "sad_perct", "angry_perct", "disgust_perct", "surprise_perct", "neutral_perct"]
# iterate through the activities
for emotion in emotions:
individual_emotion = {}
individual_emotion["happy_perct"] = float(emotion['happy_perct'])
individual_emotion["sad_perct"] = float(emotion['sad_perct'])
individual_emotion["angry_perct"] = float(emotion['angry_perct'])
individual_emotion["disgust_perct"] = float(emotion['disgust_perct'])
individual_emotion["surprise_perct"] = float(emotion['surprise_perct'])
individual_emotion["neutral_perct"] = float(emotion['neutral_perct'])
happy_perct_combined += float(emotion['happy_perct'])
sad_perct_combined += float(emotion['sad_perct'])
angry_perct_combined += float(emotion['angry_perct'])
disgust_perct_combined += float(emotion['disgust_perct'])
surprise_perct_combined += float(emotion['surprise_perct'])
neutral_perct_combined += float(emotion['neutral_perct'])
# append to the list
individual_lec_emotions.append(individual_emotion)
# calculate the average percentages
happy_average_perct = round((happy_perct_combined / no_of_emotions), 1)
sad_average_perct = round((sad_perct_combined / no_of_emotions), 1)
angry_average_perct = round((angry_perct_combined / no_of_emotions), 1)
disgust_average_perct = round((disgust_perct_combined / no_of_emotions), 1)
surprise_average_perct = round((surprise_perct_combined / no_of_emotions), 1)
neutral_average_perct = round((neutral_perct_combined / no_of_emotions), 1)
percentages = {}
percentages["happy_perct"] = happy_average_perct
percentages["sad_perct"] = sad_average_perct
percentages["angry_perct"] = angry_average_perct
percentages["disgust_perct"] = disgust_average_perct
percentages["surprise_perct"] = surprise_average_perct
percentages["neutral_perct"] = neutral_average_perct
return percentages, individual_lec_emotions, emotion_labels
# this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
# initializing the count variables
frame_count = 0
# class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
# get the frame differences for each frame group
frame_group_diff = {}
# loop through the frame group dictionary
for key in frame_group_dict.keys():
split_values = key.split("-")
value1 = int(split_values[0])
value2 = int(split_values[1])
diff = value2 - value1
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# initializing the variables
happy_count = 0
sad_count = 0
angry_count = 0
surprise_count = 0
neutral_count = 0
detection_count = 0
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image)
# increment the count based on the label
if label == class_labels[0]:
angry_count += 1
if label == class_labels[1]:
happy_count += 1
if label == class_labels[2]:
neutral_count += 1
if label == class_labels[3]:
sad_count += 1
if label == class_labels[4]:
surprise_count += 1
# increment the detection count
detection_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
# checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['happy_count'] += happy_count
frame_group_dict[frame_name]['sad_count'] += sad_count
frame_group_dict[frame_name]['angry_count'] += angry_count
frame_group_dict[frame_name]['surprise_count'] += surprise_count
frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_happy_count = frame_group_details['happy_count']
frame_group_sad_count = frame_group_details['sad_count']
frame_group_angry_count = frame_group_details['angry_count']
frame_group_surprise_count = frame_group_details['surprise_count']
frame_group_neutral_count = frame_group_details['neutral_count']
group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count)
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
frame_group_happy_perct = float(frame_group_happy_count / group_detection_count) * 100
frame_group_sad_perct = float(frame_group_sad_count / group_detection_count) * 100
frame_group_angry_perct = float(frame_group_angry_count / group_detection_count) * 100
frame_group_surprise_perct = float(frame_group_surprise_count / group_detection_count) * 100
frame_group_neutral_perct = float(frame_group_neutral_count / group_detection_count) * 100
# assign the values to the same dictionary
frame_group_dict[key]['happy_perct'] = round(frame_group_happy_perct, 1)
frame_group_dict[key]['sad_perct'] = round(frame_group_sad_perct, 1)
frame_group_dict[key]['angry_perct'] = round(frame_group_angry_perct, 1)
frame_group_dict[key]['surprise_perct'] = round(frame_group_surprise_perct, 1)
frame_group_dict[key]['neutral_perct'] = round(frame_group_neutral_perct, 1)
# removing irrelevant items from the dictionary
frame_group_dict[key].pop('happy_count')
frame_group_dict[key].pop('sad_count')
frame_group_dict[key].pop('angry_count')
frame_group_dict[key].pop('surprise_count')
frame_group_dict[key].pop('neutral_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
emotion_labels = ['happy_perct', 'sad_perct', 'angry_perct', 'surprise_perct', 'neutral_perct']
# return the dictionary
return frame_group_dict, emotion_labels
# this section will handle some database operations
def save_frame_recognitions(video_name):
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_data = lec_emotion_ser.data[0]
lec_emotion_id = lec_emotion_data['id']
# create a new lecture activity frame detections id
last_lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions.objects.order_by(
'lecture_emotion_frame_recognition_id').last()
new_lecture_emotion_frame_recognitions_id = "LEFR00001" if (
last_lec_emotion_frame_recognitions is None) else \
ig.generate_new_id(last_lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id)
# calculate the frame detections
frame_detections = get_frame_emotion_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_emotion_frame_recognition_details = LectureEmotionFrameRecognitionDetails()
lec_emotion_frame_recognition_details.frame_name = detection['frame_name']
lec_emotion_frame_recognition_details.happy_perct = detection['happy_perct']
lec_emotion_frame_recognition_details.sad_perct = detection['sad_perct']
lec_emotion_frame_recognition_details.angry_perct = detection['angry_perct']
lec_emotion_frame_recognition_details.surprise_perct = detection['surprise_perct']
lec_emotion_frame_recognition_details.neutral_perct = detection['neutral_perct']
frame_recognition_details.append(lec_emotion_frame_recognition_details)
lec_emotion_frame_recognitions = LectureEmotionFrameRecognitions()
lec_emotion_frame_recognitions.lecture_emotion_frame_recognition_id = new_lecture_emotion_frame_recognitions_id
lec_emotion_frame_recognitions.lecture_emotion_id_id = lec_emotion_id
lec_emotion_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_emotion_frame_recognitions.save()
# now return the frame recognitions
return frame_detections
# this method will save the emotion frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_percentages, emotion_labels = emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict)
# save the frame group details into db
last_lec_emotion_frame_grouping = LectureEmotionFrameGroupings.objects.order_by('lecture_emotion_frame_groupings_id').last()
new_lecture_emotion_frame_grouping_id = "LEFG00001" if (last_lec_emotion_frame_grouping is None) else \
ig.generate_new_id(last_lec_emotion_frame_grouping.lecture_emotion_frame_groupings_id)
# retrieve the lecture emotion id
lec_emotion = LectureEmotionReport.objects.filter(lecture_video_id__video_name=video_name)
lec_emotion_ser = LectureEmotionSerializer(lec_emotion, many=True)
lec_emotion_id = lec_emotion_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_emotion_frame_group_details = LectureEmotionFrameGroupDetails()
lec_emotion_frame_group_details.frame_group = key
lec_emotion_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_emotion_frame_group_details)
new_lec_emotion_frame_groupings = LectureEmotionFrameGroupings()
new_lec_emotion_frame_groupings.lecture_emotion_frame_groupings_id = new_lecture_emotion_frame_grouping_id
new_lec_emotion_frame_groupings.lecture_emotion_id_id = lec_emotion_id
new_lec_emotion_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_emotion_frame_groupings.save()
......@@ -5,7 +5,10 @@ import numpy as np
import cv2
import os
import shutil
from . custom_sorter import *
from .custom_sorter import *
from ..MongoModels import *
from ..serializers import *
from . import id_generator as ig
def activity_recognition(video_path):
......@@ -51,35 +54,34 @@ def activity_recognition(video_path):
note_taking_count = 0
listening_count = 0
# video activity didrectory
# video activity directory
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path)
# creating the directory for the video
if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
shutil.rmtree(VIDEO_ACTIVITY_DIR)
# create the video directory
os.mkdir(VIDEO_ACTIVITY_DIR)
# if (os.path.isdir(VIDEO_ACTIVITY_DIR)):
# shutil.rmtree(VIDEO_ACTIVITY_DIR)
#
# # create the video directory
# os.mkdir(VIDEO_ACTIVITY_DIR)
while (frame_count < no_of_frames):
ret, image = video.read()
FRAME_DIR = os.path.join(VIDEO_ACTIVITY_DIR, "frame-{}".format(frame_count))
frame_name = "frame-{}.png".format(frame_count)
FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
if (os.path.isdir(FRAME_DIR)):
shutil.rmtree(FRAME_DIR)
# frame_name = "frame-{}.png".format(frame_count)
#
# FRAME_IMG = os.path.join(FRAME_DIR, frame_name)
#
# if (os.path.isdir(FRAME_DIR)):
# shutil.rmtree(FRAME_DIR)
# create the new frame directory
os.mkdir(FRAME_DIR)
# os.mkdir(FRAME_DIR)
image = cv2.resize(image, size)
# image = ImageOps.fit(image, size, Image.ANTIALIAS)
detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite(FRAME_IMG, image)
# cv2.imwrite(FRAME_IMG, image)
# if there are any person detections
if (len(detections) > 0):
......@@ -112,28 +114,27 @@ def activity_recognition(video_path):
note_taking_count += 1
# saving the detection for the particular frame
detection_name = "detection-{}.png".format(detection_count)
detection_image_path = os.path.join(FRAME_DIR, detection_name)
# converting detected image into grey-scale
detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
cv2.imwrite(detection_image_path, detection)
# detection_name = "detection-{}.png".format(detection_count)
# detection_image_path = os.path.join(FRAME_DIR, detection_name)
#
# # converting detected image into grey-scale
# detection = cv2.cvtColor(detection, cv2.COLOR_BGR2GRAY)
#
# cv2.imwrite(detection_image_path, detection)
detection_count += 1
frame_count += 1
# after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100
talking_perct = float(talking_count / total_detections) * 100
note_perct = float(note_taking_count / total_detections) * 100
listening_perct = float(listening_count / total_detections) * 100
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct
......@@ -141,7 +142,6 @@ def activity_recognition(video_path):
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
return percentages
......@@ -163,8 +163,6 @@ def person_detection(image, net):
person_count = 0
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
# (note: normalization is done via the authors of the MobileNet SSD
......@@ -213,10 +211,8 @@ def person_detection(image, net):
# retrieving the extracted frames and detections for a given video
def getExtractedFrames(folder_name):
image_list = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(folder_name))
......@@ -241,9 +237,9 @@ def getExtractedFrames(folder_name):
else:
return "No extracted frames were found"
# get detections for a given frame name
def get_detections(video_name, frame_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_name)
......@@ -258,7 +254,6 @@ def get_detections(video_name, frame_name):
# get detections for a given class name
def get_detections_for_label(video_name, label_index):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
......@@ -329,7 +324,6 @@ def get_detections_for_label(video_name, label_index):
# to get the student evaluations
def get_student_activity_evaluation(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
......@@ -402,7 +396,6 @@ def get_frame_activity_recognition(video_name):
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
np.set_printoptions(suppress=True)
# load the model
......@@ -474,7 +467,6 @@ def get_frame_activity_recognition(video_name):
# increment the detection count
detection_count += 1
# calculating the percentages for the frame
phone_checking_perct = float(phone_checking_count / detection_count) * 100 if detection_count > 0 else 0
listening_perct = float(listening_count / detection_count) * 100 if detection_count > 0 else 0
......@@ -571,4 +563,267 @@ def get_individual_student_evaluation(video_name, student_name):
percentages['writing_perct'] = writing_perct
percentages['listening_perct'] = listening_perct
return percentages
\ No newline at end of file
return percentages
# this method will retrieve student activity summary for given time period
def get_student_activity_summary_for_period(activities):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# get the number of activties to calculate average
no_of_activities = len(activities)
individual_lec_activities = []
activity_labels = ["phone_perct", "listening_perct", "writing_perct"]
# iterate through the activities
for activity in activities:
individual_activity = {}
individual_activity["phone_perct"] = float(activity['phone_perct'])
individual_activity["listening_perct"] = float(activity['listening_perct'])
individual_activity["writing_perct"] = float(activity['writing_perct'])
phone_checking_perct_combined += float(activity['phone_perct'])
listening_perct_combined += float(activity['listening_perct'])
note_taking_perct_combined += float(activity['writing_perct'])
# append to the list
individual_lec_activities.append(individual_activity)
# calculate the average percentages
phone_checking_average_perct = round((phone_checking_perct_combined / no_of_activities), 1)
listening_average_perct = round((listening_perct_combined / no_of_activities), 1)
note_taking_average_perct = round((note_taking_perct_combined / no_of_activities), 1)
percentages = {}
percentages["phone_perct"] = phone_checking_average_perct
percentages["listening_perct"] = listening_average_perct
percentages["writing_perct"] = note_taking_average_perct
return percentages, individual_lec_activities, activity_labels
# this method will retrieve activity frame groupings for a lecture
def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
np.set_printoptions(suppress=True)
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# initializing the count variables
frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group
frame_group_diff = {}
# loop through the frame group dictionary
for key in frame_group_dict.keys():
split_values = key.split("-")
value1 = int(split_values[0])
value2 = int(split_values[1])
diff = value2 - value1
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# initializing the variables
phone_count = 0
note_count = 0
listen_count = 0
detection_count = 0
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
# get the predicted label
label = class_labels[prediction.argmax()]
# increment the count based on the label
if label == class_labels[0]:
phone_count += 1
elif label == class_labels[1]:
listen_count += 1
elif label == class_labels[2]:
note_count += 1
# increment the detection count
detection_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
# checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['phone_count'] += phone_count
frame_group_dict[frame_name]['listen_count'] += listen_count
frame_group_dict[frame_name]['note_count'] += note_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_phone_count = frame_group_details['phone_count']
frame_group_listen_count = frame_group_details['listen_count']
frame_group_note_count = frame_group_details['note_count']
group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count)
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100
frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100
frame_group_note_perct = float(frame_group_note_count / group_detection_count) * 100
# assign the values to the same dictionary
frame_group_dict[key]['phone_perct'] = round(frame_group_phone_perct, 1)
frame_group_dict[key]['listen_perct'] = round(frame_group_listen_perct, 1)
frame_group_dict[key]['note_perct'] = round(frame_group_note_perct, 1)
# removing irrelevant items from the dictionary
frame_group_dict[key].pop('phone_count')
frame_group_dict[key].pop('listen_count')
frame_group_dict[key].pop('note_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
activity_labels = ['phone_perct', 'listen_perct', 'note_perct']
# return the dictionary
return frame_group_dict, activity_labels
# this section will handle saving activity entities to the database
def save_frame_recognition(video_name):
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
lec_activity_data = lec_activity_ser.data[0]
lec_activity_id = lec_activity_data['id']
# create a new lecture activity frame detections id
last_lec_activity_frame_recognitions = LectureActivityFrameRecognitions.objects.order_by(
'lecture_activity_frame_recognition_id').last()
new_lecture_activity_frame_recognitions_id = "LAFR00001" if (last_lec_activity_frame_recognitions is None) else \
ig.generate_new_id(last_lec_activity_frame_recognitions.lecture_activity_frame_recognition_id)
# calculate the frame detections
frame_detections = get_frame_activity_recognition(video_name)
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_activity_frame_recognition_details = LectureActivityFrameRecognitionDetails()
lec_activity_frame_recognition_details.frame_name = detection['frame_name']
lec_activity_frame_recognition_details.phone_perct = detection['phone_perct']
lec_activity_frame_recognition_details.listen_perct = detection['listening_perct']
lec_activity_frame_recognition_details.note_perct = detection['note_perct']
frame_recognition_details.append(lec_activity_frame_recognition_details)
lec_activity_frame_recognitions = LectureActivityFrameRecognitions()
lec_activity_frame_recognitions.lecture_activity_frame_recognition_id = new_lecture_activity_frame_recognitions_id
lec_activity_frame_recognitions.lecture_activity_id_id = lec_activity_id
lec_activity_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_activity_frame_recognitions.save()
# now return the frame detections
return frame_detections
# this method will save the activity frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_percentages, activity_labels = activity_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
# save the frame group details into db
last_lec_activity_frame_grouping = LectureActivityFrameGroupings.objects.order_by(
'lecture_activity_frame_groupings_id').last()
new_lecture_activity_frame_grouping_id = "LAFG00001" if (last_lec_activity_frame_grouping is None) else \
ig.generate_new_id(last_lec_activity_frame_grouping.lecture_activity_frame_groupings_id)
# retrieve the lecture activity id
lec_activity = LectureActivity.objects.filter(lecture_video_id__video_name=video_name)
lec_activity_ser = LectureActivitySerializer(lec_activity, many=True)
lec_activity_id = lec_activity_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_activity_frame_group_details = LectureActivityFrameGroupDetails()
lec_activity_frame_group_details.frame_group = key
lec_activity_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_activity_frame_group_details)
new_lec_activity_frame_groupings = LectureActivityFrameGroupings()
new_lec_activity_frame_groupings.lecture_activity_frame_groupings_id = new_lecture_activity_frame_grouping_id
new_lec_activity_frame_groupings.lecture_activity_id_id = lec_activity_id
new_lec_activity_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_activity_frame_groupings.save()
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 17:52:00 2020
@author: hp
"""
import cv2
import numpy as np
import os
def get_face_detector(modelFile = "models/res10_300x300_ssd_iter_140000.caffemodel",
configFile = "models/deploy.prototxt"):
"""
Get the face detection caffe model of OpenCV's DNN module
Parameters
----------
modelFile : string, optional
Path to model file. The default is "models/res10_300x300_ssd_iter_140000.caffemodel".
configFile : string, optional
Path to config file. The default is "models/deploy.prototxt".
Returns
-------
model : dnn_Net
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers")
modelFile = os.path.join(CLASSIFIER_DIR, "res10_300x300_ssd_iter_140000.caffemodel")
configFile = os.path.join(CLASSIFIER_DIR, "deploy.prototxt")
model = cv2.dnn.readNetFromCaffe(configFile, modelFile)
return model
def find_faces(img, model):
"""
Find the faces in an image
Parameters
----------
img : np.uint8
Image to find faces from
model : dnn_Net
Face detection model
Returns
-------
faces : list
List of coordinates of the faces detected in the image
"""
h, w = img.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
res = model.forward()
faces = []
for i in range(res.shape[2]):
confidence = res[0, 0, i, 2]
if confidence > 0.5:
box = res[0, 0, i, 3:7] * np.array([w, h, w, h])
(x, y, x1, y1) = box.astype("int")
faces.append([x, y, x1, y1])
return faces
def draw_faces(img, faces):
"""
Draw faces on image
Parameters
----------
img : np.uint8
Image to draw faces on
faces : List of face coordinates
Coordinates of faces to draw
Returns
-------
None.
"""
for x, y, x1, y1 in faces:
cv2.rectangle(img, (x, y), (x1, y1), (0, 0, 255), 3)
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 19:47:08 2020
@author: hp
"""
import cv2
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os
def get_landmark_model():
"""
Get the facial landmark model.
Original repository: https://github.com/yinguobing/cnn-facial-landmark
Parameters
----------
saved_model : string, optional
Path to facial landmarks model. The default is 'models/pose_model'.
Returns
-------
model : Tensorflow model
Facial landmarks model
"""
# define the location of the pose model
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
saved_model = os.path.join(BASE_DIR, "FirstApp\\classifiers\\pose_model")
model = keras.models.load_model(saved_model)
return model
def get_square_box(box):
"""Get a square box out of the given box, by expanding it."""
left_x = box[0]
top_y = box[1]
right_x = box[2]
bottom_y = box[3]
box_width = right_x - left_x
box_height = bottom_y - top_y
# Check if box is already a square. If not, make it a square.
diff = box_height - box_width
delta = int(abs(diff) / 2)
if diff == 0: # Already a square.
return box
elif diff > 0: # Height > width, a slim box.
left_x -= delta
right_x += delta
if diff % 2 == 1:
right_x += 1
else: # Width > height, a short box.
top_y -= delta
bottom_y += delta
if diff % 2 == 1:
bottom_y += 1
# Make sure box is always square.
assert ((right_x - left_x) == (bottom_y - top_y)), 'Box is not square.'
return [left_x, top_y, right_x, bottom_y]
def move_box(box, offset):
"""Move the box to direction specified by vector offset"""
left_x = box[0] + offset[0]
top_y = box[1] + offset[1]
right_x = box[2] + offset[0]
bottom_y = box[3] + offset[1]
return [left_x, top_y, right_x, bottom_y]
def detect_marks(img, model, face):
"""
Find the facial landmarks in an image from the faces
Parameters
----------
img : np.uint8
The image in which landmarks are to be found
model : Tensorflow model
Loaded facial landmark model
face : list
Face coordinates (x, y, x1, y1) in which the landmarks are to be found
Returns
-------
marks : numpy array
facial landmark points
"""
offset_y = int(abs((face[3] - face[1]) * 0.1))
box_moved = move_box(face, [0, offset_y])
facebox = get_square_box(box_moved)
# reassigning the facebox values
facebox[0] = facebox[0] if facebox[0] > 0 else 0
facebox[1] = facebox[1] if facebox[1] > 0 else 0
facebox[2] = facebox[2] if facebox[2] > 0 else 0
facebox[3] = facebox[3] if facebox[3] > 0 else 0
# draw a bounding box
cv2.rectangle(img, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0), 2)
face_img = img[facebox[1]: facebox[3],
facebox[0]: facebox[2]]
marks = np.zeros((68, 2))
# if the list length is more than 0
if len(face_img) > 0:
face_img = cv2.resize(face_img, (128, 128))
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
# # Actual detection.
predictions = model.signatures["predict"](
tf.constant([face_img], dtype=tf.uint8))
# Convert predictions to landmarks.
marks = np.array(predictions['output']).flatten()[:136]
marks = np.reshape(marks, (-1, 2))
marks *= (facebox[2] - facebox[0])
marks[:, 0] += facebox[0]
marks[:, 1] += facebox[1]
marks = marks.astype(np.uint)
# return marks
# return detected facial marks and face coordinates
return marks, facebox
def draw_marks(image, marks, color=(0, 255, 0)):
"""
Draw the facial landmarks on an image
Parameters
----------
image : np.uint8
Image on which landmarks are to be drawn.
marks : list or numpy array
Facial landmark points
color : tuple, optional
Color to which landmarks are to be drawn with. The default is (0, 255, 0).
Returns
-------
None.
"""
for mark in marks:
cv2.circle(image, (mark[0], mark[1]), 2, color, -1, cv2.LINE_AA)
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 03:00:36 2020
@author: hp
"""
from decimal import Decimal
from . custom_sorter import *
import cv2
import numpy as np
import math
from . face_detector import get_face_detector, find_faces
from . face_landmarks import get_landmark_model, detect_marks
import os
import shutil
import math
from ..MongoModels import *
from ..serializers import *
from . import id_generator as ig
def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):
"""Return the 3D points present as 2D for making annotation box"""
point_3d = []
dist_coeffs = np.zeros((4, 1))
rear_size = val[0]
rear_depth = val[1]
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = val[2]
front_depth = val[3]
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
# Map to 2d img points
(point_2d, _) = cv2.projectPoints(point_3d,
rotation_vector,
translation_vector,
camera_matrix,
dist_coeffs)
point_2d = np.int32(point_2d.reshape(-1, 2))
return point_2d
def draw_annotation_box(img, rotation_vector, translation_vector, camera_matrix,
rear_size=300, rear_depth=0, front_size=500, front_depth=400,
color=(255, 255, 0), line_width=2):
"""
Draw a 3D anotation box on the face for head pose estimation
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
rear_size : int, optional
Size of rear box. The default is 300.
rear_depth : int, optional
The default is 0.
front_size : int, optional
Size of front box. The default is 500.
front_depth : int, optional
Front depth. The default is 400.
color : tuple, optional
The color with which to draw annotation box. The default is (255, 255, 0).
line_width : int, optional
line width of lines drawn. The default is 2.
Returns
-------
None.
"""
rear_size = 1
rear_depth = 0
front_size = img.shape[1]
front_depth = front_size * 2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)
# # Draw all the lines
cv2.polylines(img, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(img, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
def head_pose_points(img, rotation_vector, translation_vector, camera_matrix):
"""
Get the points to estimate head pose sideways
Parameters
----------
img : np.unit8
Original Image.
rotation_vector : Array of float64
Rotation Vector obtained from cv2.solvePnP
translation_vector : Array of float64
Translation Vector obtained from cv2.solvePnP
camera_matrix : Array of float64
The camera matrix
Returns
-------
(x, y) : tuple
Coordinates of line to estimate head pose
"""
rear_size = 1
rear_depth = 0
front_size = img.shape[1]
front_depth = front_size * 2
val = [rear_size, rear_depth, front_size, front_depth]
point_2d = get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val)
y = (point_2d[5] + point_2d[8]) // 2
x = point_2d[2]
return (x, y)
# this method will perform gaze estimation for a lecture
def process_gaze_estimation(video_path):
# get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
GAZE_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\gaze")
# create a folder with the same name as the video
VIDEO_DIR = os.path.join(GAZE_DIR, video_path)
# define a dictionary to return the percentage values
percentages = {}
# checking whether the video directory exist
if os.path.isdir(VIDEO_DIR):
shutil.rmtree(VIDEO_DIR)
# create the new directory
os.mkdir(VIDEO_DIR)
# load the face detection model
face_model = get_face_detector()
# load the facial landamrk model
landmark_model = get_landmark_model()
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# setting up the count variables
head_front_count = 0
head_up_right_count = 0
head_up_left_count = 0
head_down_right_count = 0
head_down_left_count = 0
# define a variable to count the frames
frame_count = 0
face_count = 0
# set a threshold angle
# THRESHOLD = 15
THRESHOLD = 22
# THRESHOLD = 30
# THRESHOLD = 45
# THRESHOLD = 48
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# iterate the video frames
while True:
ret, img = cap.read()
if ret == True:
faces = find_faces(img, face_model)
# print('no of faces found: ', len(faces))
student_count = 0
# iterate through each detected face
for face in faces:
# declaring boolean variables
isLookingUp = False
isLookingDown = False
isLookingRight = False
isLookingLeft = False
isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
# mark_detector.draw_marks(img, marks, color=(0, 255, 0))
image_points = np.array([
marks[30], # Nose tip
marks[8], # Chin
marks[36], # Left eye left corner
marks[45], # Right eye right corne
marks[48], # Left Mouth corner
marks[54] # Right mouth corner
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
translation_vector, camera_matrix, dist_coeffs)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
x1, x2 = head_pose_points(img, rotation_vector, translation_vector, camera_matrix)
# measuring the angles
try:
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
ang1 = int(math.degrees(math.atan(m)))
except:
ang1 = 90
try:
m = (x2[1] - x1[1]) / (x2[0] - x1[0])
ang2 = int(math.degrees(math.atan(-1 / m)))
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
# cv2.putText(img, 'looking down', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingDown = True
elif ang1 <= -THRESHOLD:
# cv2.putText(img, 'looking up', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingUp = True
else:
# cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingFront = True
# checking for angle 2
if ang2 >= THRESHOLD:
# cv2.putText(img, 'looking right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingRight = True
elif ang2 <= -THRESHOLD:
# cv2.putText(img, 'looking left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingLeft = True
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1
elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1
elif isLookingFront:
cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1
# indicate the student name
cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count
face_count += 1
# naming the new image
image_name = "frame-{}.png".format(frame_count)
# new image path
image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image
cv2.imwrite(image_path, img)
# increment the frame count
frame_count += 1
else:
break
# after extracting the frames, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
head_up_left_perct = (Decimal(head_up_left_count) / Decimal(face_count)) * 100
head_down_right_perct = (Decimal(head_down_right_count) / Decimal(face_count)) * 100
head_down_left_perct = (Decimal(head_down_left_count) / Decimal(face_count)) * 100
head_front_perct = (Decimal(head_front_count) / Decimal(face_count)) * 100
# collect the percentages to a dictionary
percentages['head_up_right_perct'] = head_up_right_perct
percentages['head_up_left_perct'] = head_up_left_perct
percentages['head_down_right_perct'] = head_down_right_perct
percentages['head_down_left_perct'] = head_down_left_perct
percentages['head_front_perct'] = head_front_perct
cv2.destroyAllWindows()
cap.release()
# return the dictionary
return percentages
# this method will retrieve extracted frames
def getExtractedFrames(lecture_video_name):
image_list = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(lecture_video_name))
# listing all the images in the directory
for image_path in os.listdir(EXTRACTED_DIR):
image_list.append(image_path)
# checking for the number of frames
if (len(image_list) > 0):
image_list = custom_sort(image_list)
return image_list
else:
return "No extracted frames were found"
# this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_esrimation_for_frames(video_name):
# get the base directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# play the video
video = cv2.VideoCapture(VIDEO_PATH)
frame_rate = video.get(cv2.CAP_PROP_FPS)
frame_detections = []
face_model = get_face_detector()
landmark_model = get_landmark_model()
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# define a variable to count the frames
frame_count = 0
# set a threshold angle
# THRESHOLD = 15
THRESHOLD = 22
# THRESHOLD = 30
# THRESHOLD = 45
# THRESHOLD = 48
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# iterate the video frames
while True:
ret, img = cap.read()
if ret == True:
# setting up the count variables
head_front_count = 0
head_up_right_count = 0
head_up_left_count = 0
head_down_right_count = 0
head_down_left_count = 0
face_count = 0
# find the number of faces
faces = find_faces(img, face_model)
student_count = 0
# iterate through each detected face
for face in faces:
# declaring boolean variables
isLookingUp = False
isLookingDown = False
isLookingRight = False
isLookingLeft = False
isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
# mark_detector.draw_marks(img, marks, color=(0, 255, 0))
image_points = np.array([
marks[30], # Nose tip
marks[8], # Chin
marks[36], # Left eye left corner
marks[45], # Right eye right corne
marks[48], # Left Mouth corner
marks[54] # Right mouth corner
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
translation_vector, camera_matrix, dist_coeffs)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
x1, x2 = head_pose_points(img, rotation_vector, translation_vector, camera_matrix)
# measuring the angles
try:
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
ang1 = int(math.degrees(math.atan(m)))
except:
ang1 = 90
try:
m = (x2[1] - x1[1]) / (x2[0] - x1[0])
ang2 = int(math.degrees(math.atan(-1 / m)))
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
# cv2.putText(img, 'looking down', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingDown = True
elif ang1 <= -THRESHOLD:
# cv2.putText(img, 'looking up', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingUp = True
else:
# cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingFront = True
# checking for angle 2
if ang2 >= THRESHOLD:
# cv2.putText(img, 'looking right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingRight = True
elif ang2 <= -THRESHOLD:
# cv2.putText(img, 'looking left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingLeft = True
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
head_down_left_count += 1
elif isLookingUp & isLookingRight:
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
head_up_left_count += 1
elif isLookingFront:
head_front_count += 1
# increment the face count
face_count += 1
# the percentages will be calculated here
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100 if (face_count != 0) else 0
head_up_left_perct = (Decimal(head_up_left_count) / Decimal(face_count)) * 100 if (face_count != 0) else 0
head_down_right_perct = (Decimal(head_down_right_count) / Decimal(face_count)) * 100 if (face_count != 0) else 0
head_down_left_perct = (Decimal(head_down_left_count) / Decimal(face_count)) * 100 if (face_count != 0) else 0
head_front_perct = (Decimal(head_front_count) / Decimal(face_count)) * 100 if (face_count != 0) else 0
# the dictionary
percentages = {}
# collect the percentages to a dictionary
percentages['frame_name'] = "frame-{}".format(frame_count)
percentages['upright_perct'] = head_up_right_perct
percentages['upleft_perct'] = head_up_left_perct
percentages['downright_perct'] = head_down_right_perct
percentages['downleft_perct'] = head_down_left_perct
percentages['front_perct'] = head_front_perct
# append the calculated percentages to the frame_detections
frame_detections.append(percentages)
frame_count += 1
else:
break
return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
looking_up_right_perct_combined = 0.0
looking_up_left_perct_combined = 0.0
looking_down_right_perct_combined = 0.0
looking_down_left_perct_combined = 0.0
looking_front_perct_combined = 0.0
# get the number of activties to calculate average
no_of_gaze_estimations = len(gaze_estimation_data)
individual_lec_gaze_estimations = []
gaze_estimation_labels = ["looking_up_and_right_perct", "looking_up_and_left_perct", "looking_down_and_right_perct", "looking_down_and_left_perct", "looking_front_perct"]
# iterate through the activities
for gaze_estimation in gaze_estimation_data:
individual_gaze_estimation = {}
individual_gaze_estimation["looking_up_and_right_perct"] = float(gaze_estimation['looking_up_and_right_perct'])
individual_gaze_estimation["looking_up_and_left_perct"] = float(gaze_estimation['looking_up_and_left_perct'])
individual_gaze_estimation["looking_down_and_right_perct"] = float(gaze_estimation['looking_down_and_right_perct'])
individual_gaze_estimation["looking_down_and_left_perct"] = float(gaze_estimation['looking_down_and_left_perct'])
individual_gaze_estimation["looking_front_perct"] = float(gaze_estimation['looking_front_perct'])
looking_up_right_perct_combined += float(gaze_estimation['looking_up_and_right_perct'])
looking_up_left_perct_combined += float(gaze_estimation['looking_up_and_left_perct'])
looking_down_right_perct_combined += float(gaze_estimation['looking_down_and_right_perct'])
looking_down_left_perct_combined += float(gaze_estimation['looking_down_and_left_perct'])
looking_front_perct_combined += float(gaze_estimation['looking_front_perct'])
# append to the list
individual_lec_gaze_estimations.append(individual_gaze_estimation)
# calculate the average percentages
looking_up_right_average_perct = round((looking_up_right_perct_combined / no_of_gaze_estimations), 1)
looking_up_left_perct = round((looking_up_left_perct_combined / no_of_gaze_estimations), 1)
looking_down_right_average_perct = round((looking_down_right_perct_combined / no_of_gaze_estimations), 1)
looking_down_left_average_perct = round((looking_down_left_perct_combined / no_of_gaze_estimations), 1)
looking_front_average_perct = round((looking_front_perct_combined / no_of_gaze_estimations), 1)
percentages = {}
percentages["looking_up_and_right_perct"] = looking_up_right_average_perct
percentages["looking_up_and_left_perct"] = looking_up_left_perct_combined
percentages["looking_down_and_right_perct"] = looking_down_right_perct_combined
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined
percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
# this method will get the lecture gaze estimation frame groupings
def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
print('video path: ', VIDEO_PATH)
# load the face detection model
face_model = get_face_detector()
# load the facial landamrk model
landmark_model = get_landmark_model()
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# set a threshold angle
# THRESHOLD = 15
THRESHOLD = 22
# THRESHOLD = 30
# THRESHOLD = 45
# THRESHOLD = 48
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# initializing the count variables
frame_count = 0
# get the frame differences for each frame group
frame_group_diff = {}
# loop through the frame group dictionary
for key in frame_group_dict.keys():
split_values = key.split("-")
value1 = int(split_values[0])
value2 = int(split_values[1])
diff = value2 - value1
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
while True:
ret, image = cap.read()
if ret == True:
# initializing the variables
# setting up the count variables
head_front_count = 0
head_up_right_count = 0
head_up_left_count = 0
head_down_right_count = 0
head_down_left_count = 0
face_count = 0
detection_count = 0
# prediction happens here
# find the number of faces
faces = find_faces(img, face_model)
# iterate through each detected face
for face in faces:
# declaring boolean variables
isLookingUp = False
isLookingDown = False
isLookingRight = False
isLookingLeft = False
isLookingFront = False
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
# mark_detector.draw_marks(img, marks, color=(0, 255, 0))
image_points = np.array([
marks[30], # Nose tip
marks[8], # Chin
marks[36], # Left eye left corner
marks[45], # Right eye right corne
marks[48], # Left Mouth corner
marks[54] # Right mouth corner
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
translation_vector, camera_matrix, dist_coeffs)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
x1, x2 = head_pose_points(img, rotation_vector, translation_vector, camera_matrix)
# measuring the angles
try:
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
ang1 = int(math.degrees(math.atan(m)))
except:
ang1 = 90
try:
m = (x2[1] - x1[1]) / (x2[0] - x1[0])
ang2 = int(math.degrees(math.atan(-1 / m)))
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
isLookingDown = True
elif ang1 <= -THRESHOLD:
isLookingUp = True
else:
isLookingFront = True
# checking for angle 2
if ang2 >= THRESHOLD:
isLookingRight = True
elif ang2 <= -THRESHOLD:
isLookingLeft = True
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
head_down_left_count += 1
elif isLookingUp & isLookingRight:
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
head_up_left_count += 1
elif isLookingFront:
head_front_count += 1
# increment the detection count
detection_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
# checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
print('frame group dict: ', frame_group_dict[frame_name])
frame_group_dict[frame_name]['upright_count'] += head_up_right_count
frame_group_dict[frame_name]['upleft_count'] += head_up_left_count
frame_group_dict[frame_name]['downright_count'] += head_down_right_count
frame_group_dict[frame_name]['downleft_count'] += head_down_left_count
frame_group_dict[frame_name]['front_count'] += head_front_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
else:
break
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_upright_count = frame_group_details['upright_count']
frame_group_upleft_count = frame_group_details['upleft_count']
frame_group_downright_count = frame_group_details['downright_count']
frame_group_downleft_count = frame_group_details['downleft_count']
frame_group_front_count = frame_group_details['front_count']
print('detection count: ', frame_group_details['detection_count'])
group_detection_count = 1 if frame_group_details['detection_count'] == 0 else frame_group_details['detection_count']
frame_group_upright_perct = float(frame_group_upright_count / group_detection_count) * 100
frame_group_upleft_perct = float(frame_group_upleft_count / group_detection_count) * 100
frame_group_downright_perct = float(frame_group_downright_count / group_detection_count) * 100
frame_group_downleft_perct = float(frame_group_downleft_count / group_detection_count) * 100
frame_group_front_perct = float(frame_group_front_count / group_detection_count) * 100
# assign the values to the same dictionary
frame_group_dict[key]['upright_perct'] = round(frame_group_upright_perct, 1)
frame_group_dict[key]['upleft_perct'] = round(frame_group_upleft_perct, 1)
frame_group_dict[key]['downright_perct'] = round(frame_group_downright_perct, 1)
frame_group_dict[key]['downleft_perct'] = round(frame_group_downleft_perct, 1)
frame_group_dict[key]['front_perct'] = round(frame_group_front_perct, 1)
# removing irrelevant items from the dictionary
frame_group_dict[key].pop('upright_count')
frame_group_dict[key].pop('upleft_count')
frame_group_dict[key].pop('downright_count')
frame_group_dict[key].pop('downleft_count')
frame_group_dict[key].pop('front_count')
frame_group_dict[key].pop('detection_count')
# define the labels
labels = ['upright_perct', 'upleft_perct', 'downright_perct', 'downleft_perct', 'front_perct']
# return the dictionary
return frame_group_dict, labels
# this section will handle some database operations
def save_frame_detections(video_name):
# retrieve the lecture emotion id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_data = lec_gaze_ser.data[0]
lec_gaze_id = lec_gaze_data['id']
# create a new lecture activity frame detections id
last_lec_gaze_frame_recognitions = LectureGazeFrameRecognitions.objects.order_by(
'lecture_gaze_frame_recognition_id').last()
new_lecture_gaze_frame_recognitions_id = "LGFR00001" if (
last_lec_gaze_frame_recognitions is None) else \
ig.generate_new_id(last_lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id)
# calculate the frame detections
frame_detections, frame_rate = get_lecture_gaze_esrimation_for_frames(video_name)
# to be added to the field 'frame_recognition_details' in the Lecture Gaze Frame Recordings
frame_recognition_details = []
# save the new lecture activity frame recognitions
for detection in frame_detections:
lec_gaze_frame_recognition_details = LectureGazeFrameRecognitionDetails()
lec_gaze_frame_recognition_details.frame_name = detection['frame_name']
lec_gaze_frame_recognition_details.upright_perct = detection['upright_perct']
lec_gaze_frame_recognition_details.upleft_perct = detection['upleft_perct']
lec_gaze_frame_recognition_details.downright_perct = detection['downright_perct']
lec_gaze_frame_recognition_details.downleft_perct = detection['downleft_perct']
lec_gaze_frame_recognition_details.front_perct = detection['front_perct']
frame_recognition_details.append(lec_gaze_frame_recognition_details)
lec_gaze_frame_recognitions = LectureGazeFrameRecognitions()
lec_gaze_frame_recognitions.lecture_gaze_frame_recognition_id = new_lecture_gaze_frame_recognitions_id
lec_gaze_frame_recognitions.lecture_gaze_id_id = lec_gaze_id
lec_gaze_frame_recognitions.frame_recognition_details = frame_recognition_details
lec_gaze_frame_recognitions.save()
# now return the frame recognitions
return frame_detections
# this method will save gaze frame groupings to the database
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_percentages, gaze_labels = gaze_estimation_frame_groupings(video_name, frame_landmarks,
frame_group_dict)
# save the frame group details into db
last_lec_gaze_frame_grouping = LectureGazeFrameGroupings.objects.order_by('lecture_gaze_frame_groupings_id').last()
new_lecture_gaze_frame_grouping_id = "LGFG00001" if (last_lec_gaze_frame_grouping is None) else \
ig.generate_new_id(last_lec_gaze_frame_grouping.lecture_gaze_frame_groupings_id)
# retrieve the lecture activity id
lec_gaze = LectureGazeEstimation.objects.filter(lecture_video_id__video_name=video_name)
lec_gaze_ser = LectureGazeEstimationSerializer(lec_gaze, many=True)
lec_gaze_id = lec_gaze_ser.data[0]['id']
# create the frame group details
frame_group_details = []
for key in frame_group_percentages.keys():
# create an object of type 'LectureActivityFrameGroupDetails'
lec_gaze_frame_group_details = LectureGazeFrameGroupDetails()
lec_gaze_frame_group_details.frame_group = key
lec_gaze_frame_group_details.frame_group_percentages = frame_group_percentages[key]
frame_group_details.append(lec_gaze_frame_group_details)
new_lec_gaze_frame_groupings = LectureGazeFrameGroupings()
new_lec_gaze_frame_groupings.lecture_gaze_frame_groupings_id = new_lecture_gaze_frame_grouping_id
new_lec_gaze_frame_groupings.lecture_gaze_id_id = lec_gaze_id
new_lec_gaze_frame_groupings.frame_group_details = frame_group_details
# save
new_lec_gaze_frame_groupings.save()
import jinja2 as ji
import pdfkit
import os
# from DemoProject import jinja2
from integrated_slpes import jinja2
def generate_pdf_file(object):
# templateLoader = jinja2.FileSystemLoader(searchpath="../")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEMPLATE_DIR = os.path.join(BASE_DIR, "FirstApp\\templates\\FirstApp")
HTML_PATH = os.path.join(TEMPLATE_DIR, "pdf_template_1.html")
PDF_DIRECTORY = os.path.join(BASE_DIR, "FirstApp\\files")
templateLoader = ji.FileSystemLoader(TEMPLATE_DIR)
new_env = jinja2.environment()
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "pdf_template.html"
template = templateEnv.get_template(TEMPLATE_FILE)
print('variables: ', templateEnv.globals['dict'])
# render the template
outputText = template.render(lecturer_name=object['lecturer_name'], subject=object['subject_name'], date=object['date'], static=new_env.globals['static'])
html_file = open(HTML_PATH, "w")
html_file.write(outputText)
html_file.close()
# create a new pdf file path
NEW_PDF_PATH = os.path.join(PDF_DIRECTORY, "activity.pdf")
asset_path = os.path.join('D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/css/sb-admin-2.min.css')
network_path = "file:/" + asset_path
# options = {'enable-local-file-access': network_path}
options = {'enable-local-file-access': asset_path, 'load-error-handling': 'ignore'}
# create a new pdf file
pdfkit.from_file(HTML_PATH, NEW_PDF_PATH, options=options)
......@@ -117,10 +117,6 @@ def calculate_pose_estimation_for_student(video_name, student, poses):
left_upper_x = 0 if (middle_x - fraction) < 0 else (middle_x - fraction)
print('head_y: ', head_y)
print('fraction: ', fraction)
print('distance: ', distance)
print('left_upper_x: ', left_upper_x)
# extract the new image
new_img = detection_img[head_y:head_y+fraction, left_upper_x:left_upper_x+distance]
......
import os
import cv2
import shutil
import datetime
from FirstApp.MongoModels import *
from FirstApp.serializers import *
from . import id_generator as ig
def VideoExtractor(request):
......@@ -68,3 +74,223 @@ def getExtractedFrames(request):
else:
return "No extracted frames were found"
# this method will retrieve the time landmarks for a lecture video
def getTimeLandmarks(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# iteration
video = cv2.VideoCapture(VIDEO_PATH)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
fps = int(video.get(cv2.CAP_PROP_FPS))
frame_count = 0
# calculating the duration in seconds
duration = int(no_of_frames / fps)
# define the number of time gaps required
THRESHOLD_GAP = 5
# calculating the real duration
real_duration = datetime.timedelta(seconds=(duration+THRESHOLD_GAP))
# defines the number of seconds included for a frame group
THRESHOLD_TIME = 10
# define an unit gap
unit_gap = int(duration / THRESHOLD_GAP)
initial_landmark = 0
# time_landmarks = ['0:00:00']
time_landmarks = []
time_landmarks_values = [0]
# loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP):
initial_landmark += unit_gap
time_landmark = str(datetime.timedelta(seconds=initial_landmark))
time_landmark_value = initial_landmark
time_landmarks.append(time_landmark)
time_landmarks_values.append(time_landmark_value)
# append the final time
time_landmarks.append(str(real_duration))
time_landmarks_values.append(duration)
return time_landmarks
# this method will retrieve the time landmarks for a lecture video
def getFrameLandmarks(video_name, category):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# iteration
video = cv2.VideoCapture(VIDEO_PATH)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
int_no_of_frames = int(no_of_frames)
fps = int(video.get(cv2.CAP_PROP_FPS))
# list of categories
categories = ["Activity", "Emotion", "Gaze"]
# define the number of time gaps required
THRESHOLD_GAP = 5
# define a frame gap
frame_gap = int(int_no_of_frames / THRESHOLD_GAP)
initial_frame_landmark = 0
# define frame landmarks
frame_landmarks = [0]
# frame_landmarks = []
# loop through the threshold gap limit to define the time landmarks
for i in range(THRESHOLD_GAP):
initial_frame_landmark += frame_gap
frame_landmarks.append(initial_frame_landmark)
# append the final frame
frame_landmarks.append(int_no_of_frames)
# defining the frame group dictionary
frame_group_list = []
# creating frame group names
for landmark in frame_landmarks:
index = frame_landmarks.index(landmark)
j = index + 1
# if the next index is within the range of the list
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
group_name = "{}-{}".format(landmark, next_value)
# append to the list
frame_group_list.append(group_name)
# define a dictionary to hold the frame groups
frame_group_dict = {}
# checking for the category
if category == categories[0]:
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'phone_count': 0, 'listen_count': 0, 'note_count': 0, 'detection_count': 0}
elif category == categories[1]:
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'happy_count': 0, 'sad_count': 0, 'angry_count': 0, 'surprise_count': 0, 'neutral_count': 0, 'detection_count': 0}
elif category == categories[2]:
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'upright_count': 0, 'upleft_count': 0, 'downright_count': 0, 'downleft_count': 0,
'front_count': 0, 'detection_count': 0}
return frame_landmarks, frame_group_dict
# this section will handle some database operations
def save_time_landmarks(video_name):
last_lec_video_time_landmarks = LectureVideoTimeLandmarks.objects.order_by('lecture_video_time_landmarks_id').last()
new_lecture_video_time_landmarks_id = "LVTL00001" if (last_lec_video_time_landmarks is None) else \
ig.generate_new_id(last_lec_video_time_landmarks.lecture_video_time_landmarks_id)
# retrieve lecture video details
lec_video = LectureVideo.objects.filter(video_name=video_name)
lec_video_ser = LectureVideoSerializer(lec_video, many=True)
lec_video_id = lec_video_ser.data[0]['id']
# save the landmark details in the db
time_landmarks = getTimeLandmarks(video_name)
db_time_landmarks = []
# loop through the time landmarks
for landmark in time_landmarks:
landmark_obj = Landmarks()
landmark_obj.landmark = landmark
db_time_landmarks.append(landmark_obj)
new_lec_video_time_landmarks = LectureVideoTimeLandmarks()
new_lec_video_time_landmarks.lecture_video_time_landmarks_id = new_lecture_video_time_landmarks_id
new_lec_video_time_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_time_landmarks.time_landmarks = db_time_landmarks
new_lec_video_time_landmarks.save()
# this method will save frame landmarks to the database
def save_frame_landmarks(video_name):
# retrieve the previous lecture video frame landmarks details
last_lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.order_by(
'lecture_video_frame_landmarks_id').last()
new_lecture_video_frame_landmarks_id = "LVFL00001" if (last_lec_video_frame_landmarks is None) else \
ig.generate_new_id(last_lec_video_frame_landmarks.lecture_video_frame_landmarks_id)
frame_landmarks, frame_group_dict = getFrameLandmarks(video_name, "Activity")
# retrieve lecture video details
lec_video = LectureVideo.objects.filter(video_name=video_name)
lec_video_ser = LectureVideoSerializer(lec_video, many=True)
lec_video_id = lec_video_ser.data[0]['id']
# save the frame landmarks details into db
db_frame_landmarks = []
for landmark in frame_landmarks:
landmark_obj = Landmarks()
landmark_obj.landmark = landmark
db_frame_landmarks.append(landmark_obj)
new_lec_video_frame_landmarks = LectureVideoFrameLandmarks()
new_lec_video_frame_landmarks.lecture_video_frame_landmarks_id = new_lecture_video_frame_landmarks_id
new_lec_video_frame_landmarks.lecture_video_id_id = lec_video_id
new_lec_video_frame_landmarks.frame_landmarks = db_frame_landmarks
new_lec_video_frame_landmarks.save()
# now return the frame landmarks and the frame group dictionary
return frame_landmarks, frame_group_dict
# this method will retrieve the frame landmarks from the database
def get_frame_landmarks(video_name):
frame_landmarks = []
# retrieve frame landmarks from db
lec_video_frame_landmarks = LectureVideoFrameLandmarks.objects.filter(lecture_video_id__video_name=video_name)
lec_video_frame_landmarks_ser = LectureVideoFrameLandmarksSerializer(lec_video_frame_landmarks, many=True)
lec_video_frame_landmarks_data = lec_video_frame_landmarks_ser.data[0]
retrieved_frame_landmarks = lec_video_frame_landmarks_data["frame_landmarks"]
# creating a new list to display in the frontend
for landmark in retrieved_frame_landmarks:
frame_landmarks.append(landmark['landmark'])
# now return the frame landmarks
return frame_landmarks
\ No newline at end of file
# Generated by Django 2.2.11 on 2020-03-16 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RegisterUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=20)),
('lastName', models.CharField(max_length=30)),
('email', models.CharField(max_length=30)),
('password', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('path', models.CharField(max_length=100)),
('duration', models.CharField(max_length=100)),
('hours', models.IntegerField()),
('minutes', models.IntegerField()),
('seconds', models.IntegerField()),
],
),
migrations.CreateModel(
name='VideoMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fps', models.IntegerField()),
('frame_count', models.IntegerField()),
('happy_count', models.IntegerField()),
('sad_count', models.IntegerField()),
('angry_count', models.IntegerField()),
('neutral_count', models.IntegerField()),
('surprise_count', models.IntegerField()),
('happy_perct', models.IntegerField()),
('sad_perct', models.IntegerField()),
('angry_perct', models.IntegerField()),
('neutral_perct', models.IntegerField()),
('surprise_perct', models.IntegerField()),
],
),
]
# Generated by Django 2.2.11 on 2020-05-10 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0002_registeruser_video_videometa'),
]
operations = [
migrations.CreateModel(
name='LectureEmotionReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_id', models.CharField(max_length=10)),
('happy_perct', models.FloatField()),
('sad_perct', models.FloatField()),
('angry_perct', models.FloatField()),
('surprise_perct', models.FloatField()),
('disgust_perct', models.FloatField()),
('neutral_perct', models.FloatField()),
],
),
]
# Generated by Django 2.2.11 on 2020-05-11 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0003_lectureemotionreport'),
]
operations = [
migrations.CreateModel(
name='Lecture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_created=True)),
('lecture_id', models.CharField(max_length=10)),
],
),
]
# Generated by Django 2.2.11 on 2020-05-13 10:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0004_lecture'),
]
operations = [
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('faculty_id', models.CharField(max_length=10)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Lecturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecturer_id', models.CharField(max_length=7)),
('fname', models.TextField()),
('lname', models.TextField()),
('email', models.EmailField(max_length=254)),
('telephone', models.CharField(max_length=10)),
('faculty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Faculty')),
],
),
migrations.AlterField(
model_name='lecture',
name='date',
field=models.DateTimeField(auto_created=True, default=None),
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject_code', models.TextField()),
('name', models.TextField()),
('year', models.IntegerField()),
('faculty', models.ForeignKey(default={}, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Faculty')),
],
),
migrations.CreateModel(
name='LecturerSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lec_subject_id', models.CharField(max_length=10)),
('lecturer_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Lecturer')),
('subjects', models.ManyToManyField(to='FirstApp.Subject')),
],
),
]
# Generated by Django 2.2.11 on 2020-05-13 15:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0005_auto_20200513_1551'),
]
operations = [
migrations.CreateModel(
name='LecturerCredentials',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=15)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Lecturer')),
],
),
]
# Generated by Django 2.2.11 on 2020-08-25 11:28
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0006_lecturercredentials'),
]
operations = [
migrations.RenameField(
model_name='lectureemotionreport',
old_name='lecture_id',
new_name='lecture_emotion_id',
),
migrations.AlterField(
model_name='lectureemotionreport',
name='angry_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='disgust_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='happy_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='neutral_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='sad_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='surprise_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.CreateModel(
name='LectureVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_video_id', models.CharField(max_length=10)),
('date', models.DateField()),
('video_name', models.CharField(max_length=50)),
('video_length', models.DurationField()),
('lecturer', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Lecturer')),
('subject', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Subject')),
],
),
migrations.CreateModel(
name='LectureGazeEstimation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_gaze_id', models.CharField(max_length=10)),
('lecture_video_id',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo')),
],
),
migrations.CreateModel(
name='LectureActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_activity_id', models.CharField(max_length=10)),
('talking_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('listening_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('writing_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('phone_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('lecture_video_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo')),
],
),
migrations.CreateModel(
name='FacultyTimetable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timetable_id', models.CharField(max_length=10)),
('timetable', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.DateTimeTable)),
('faculty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Faculty')),
],
),
migrations.AddField(
model_name='lectureemotionreport',
name='lecture_video_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo'),
),
]
# Generated by Django 2.2.11 on 2020-08-25 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0007_auto_20200825_1658'),
]
operations = [
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_down_and_left_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_down_and_right_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_front_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_up_and_left_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_up_and_right_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
]
# Generated by Django 2.2.11 on 2020-10-08 17:02
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0008_auto_20200825_1821'),
]
operations = [
migrations.CreateModel(
name='LectureActivityFrameGroupings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_activity_frame_groupings_id', models.CharField(default='', max_length=15)),
('frame_group_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureActivityFrameGroupDetails)),
('lecture_activity_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureActivity')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-09 09:10
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0009_lectureactivityframegroupings'),
]
operations = [
migrations.CreateModel(
name='LectureVideoTimeLandmarks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_video_time_landmarks_id', models.CharField(max_length=15)),
('time_landmarks', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.Landmarks)),
('lecture_video_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo')),
],
),
migrations.CreateModel(
name='LectureVideoFrameLandmarks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_video_frame_landmarks_id', models.CharField(max_length=15)),
('frame_landmarks', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.Landmarks)),
('lecture_video_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-09 16:58
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0010_lecturevideoframelandmarks_lecturevideotimelandmarks'),
]
operations = [
migrations.CreateModel(
name='LectureGazeFrameGroupings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_gaze_frame_groupings_id', models.CharField(default='', max_length=15)),
('frame_group_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureGazeFrameGroupDetails)),
('lecture_gaze_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureGazeEstimation')),
],
),
migrations.CreateModel(
name='LectureEmotionFrameGroupings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_emotion_frame_groupings_id', models.CharField(default='', max_length=15)),
('frame_group_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureEmotionFrameGroupDetails)),
('lecture_emotion_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureEmotionReport')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-16 17:07
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0011_lectureemotionframegroupings_lecturegazeframegroupings'),
]
operations = [
migrations.CreateModel(
name='LectureActivityFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_activity_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureActivityFrameRecognitionDetails)),
('lecture_activity_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureActivity')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-17 14:01
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0012_lectureactivityframerecognitions'),
]
operations = [
migrations.CreateModel(
name='LectureEmotionFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_emotion_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureEmotionFrameRecognitionDetails)),
('lecture_emotion_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureEmotionReport')),
],
),
]
# Generated by Django 2.2.11 on 2020-10-17 17:06
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0013_lectureemotionframerecognitions'),
]
operations = [
migrations.CreateModel(
name='LectureGazeFrameRecognitions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_gaze_frame_recognition_id', models.CharField(max_length=15)),
('frame_recognition_details', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.LectureGazeFrameRecognitionDetails)),
('lecture_gaze_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureGazeEstimation')),
],
),
]
......@@ -201,6 +201,52 @@ class LectureVideoSerializer(serializers.ModelSerializer):
fields = '__all__'
# lecture video time landmarks serializer
class LectureVideoTimeLandmarksSerializer(serializers.ModelSerializer):
lecture_video_id = LectureVideoSerializer()
time_landmarks = serializers.SerializerMethodField()
def get_time_landmarks(self, obj):
return_data = []
for time_landmark in obj.time_landmarks:
landmark_details = {}
landmark_details["landmark"] = time_landmark.landmark
return_data.append(landmark_details)
return return_data
class Meta:
model = LectureVideoTimeLandmarks
fields = '__all__'
# lecture video frame landmarks serializer
class LectureVideoFrameLandmarksSerializer(serializers.ModelSerializer):
lecture_video_id = LectureVideoSerializer()
frame_landmarks = serializers.SerializerMethodField()
def get_frame_landmarks(self, obj):
return_data = []
for frame_landmark in obj.frame_landmarks:
landmark_details = {}
landmark_details["landmark"] = frame_landmark.landmark
return_data.append(landmark_details)
return return_data
class Meta:
model = LectureVideoFrameLandmarks
fields = '__all__'
# Lecture activity serializer
class LectureActivitySerializer(serializers.ModelSerializer):
lecture_video_id = LectureVideoSerializer()
......@@ -210,6 +256,64 @@ class LectureActivitySerializer(serializers.ModelSerializer):
fields = '__all__'
# Lecture Activity Frame Group Serializer
class LectureActivityFrameGroupingsSerializer(serializers.ModelSerializer):
lecture_activity_id = LectureActivitySerializer()
frame_group_details = serializers.SerializerMethodField()
def get_frame_group_details(self, obj):
return_data = []
for frame_group in obj.frame_group_details:
group_details = {}
group_details["frame_group_percentages"] = {}
group_details["frame_group"] = frame_group.frame_group
group_details["frame_group_percentages"]["phone_perct"] = frame_group.frame_group_percentages.phone_perct
group_details["frame_group_percentages"]["listen_perct"] = frame_group.frame_group_percentages.listen_perct
group_details["frame_group_percentages"]["note_perct"] = frame_group.frame_group_percentages.note_perct
return_data.append(group_details)
return return_data
class Meta:
model = LectureActivityFrameGroupings
fields = '__all__'
# lecture activity frame recognition serializer
class LectureActivityFrameRecognitionsSerializer(serializers.ModelSerializer):
lecture_activity_id = LectureActivitySerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["phone_perct"] = frame_recognition.phone_perct
recognition["listen_perct"] = frame_recognition.listen_perct
recognition["note_perct"] = frame_recognition.note_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LectureActivityFrameRecognitions
fields = '__all__'
# EMOTIONS section
# lecture emotions serailzier
class LectureEmotionSerializer(serializers.ModelSerializer):
......@@ -220,9 +324,146 @@ class LectureEmotionSerializer(serializers.ModelSerializer):
fields = '__all__'
# Lecture emotion Frame Group Serializer
class LectureEmotionFrameGroupingsSerializer(serializers.ModelSerializer):
lecture_emotion_id = LectureEmotionSerializer()
frame_group_details = serializers.SerializerMethodField()
def get_frame_group_details(self, obj):
return_data = []
for frame_group in obj.frame_group_details:
group_details = {}
group_details["frame_group_percentages"] = {}
group_details["frame_group"] = frame_group.frame_group
group_details["frame_group_percentages"]["happy_perct"] = frame_group.frame_group_percentages.happy_perct
group_details["frame_group_percentages"]["sad_perct"] = frame_group.frame_group_percentages.sad_perct
group_details["frame_group_percentages"]["angry_perct"] = frame_group.frame_group_percentages.angry_perct
group_details["frame_group_percentages"]["disgust_perct"] = frame_group.frame_group_percentages.disgust_perct
group_details["frame_group_percentages"]["surprise_perct"] = frame_group.frame_group_percentages.surprise_perct
group_details["frame_group_percentages"]["neutral_perct"] = frame_group.frame_group_percentages.neutral_perct
return_data.append(group_details)
return return_data
class Meta:
model = LectureEmotionFrameGroupings
fields = '__all__'
# lecture emotion frame recognition serializer
class LectureEmotionFrameRecognitionsSerializer(serializers.ModelSerializer):
lecture_emotion_id = LectureEmotionSerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["happy_perct"] = frame_recognition.happy_perct
recognition["sad_perct"] = frame_recognition.sad_perct
recognition["angry_perct"] = frame_recognition.angry_perct
recognition["surprise_perct"] = frame_recognition.surprise_perct
recognition["neutral_perct"] = frame_recognition.neutral_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LectureEmotionFrameRecognitions
fields = '__all__'
# lecture video meta serializer
class VideoMetaSerializer(serializers.ModelSerializer):
class Meta:
model = VideoMeta
fields = '__all__'
# lecture gaze serializer
class LectureGazeEstimationSerializer(serializers.ModelSerializer):
lecture_video_id = LectureVideoSerializer()
class Meta:
model = LectureGazeEstimation
fields = '__all__'
# Lecture emotion Frame Group Serializer
class LectureGazeFrameGroupingsSerializer(serializers.ModelSerializer):
lecture_gaze_id = LectureGazeEstimationSerializer()
frame_group_details = serializers.SerializerMethodField()
def get_frame_group_details(self, obj):
return_data = []
for frame_group in obj.frame_group_details:
group_details = {}
group_details["frame_group_percentages"] = {}
group_details["frame_group"] = frame_group.frame_group
group_details["frame_group_percentages"]["upright_perct"] = frame_group.frame_group_percentages.upright_perct
group_details["frame_group_percentages"]["upleft_perct"] = frame_group.frame_group_percentages.upleft_perct
group_details["frame_group_percentages"]["downright_perct"] = frame_group.frame_group_percentages.downright_perct
group_details["frame_group_percentages"]["downleft_perct"] = frame_group.frame_group_percentages.downleft_perct
group_details["frame_group_percentages"]["front_perct"] = frame_group.frame_group_percentages.front_perct
return_data.append(group_details)
return return_data
class Meta:
model = LectureGazeFrameGroupings
fields = '__all__'
# lecture emotion frame recognition serializer
class LectureGazeFrameRecognitionsSerializer(serializers.ModelSerializer):
lecture_gaze_id = LectureGazeEstimationSerializer()
frame_recognition_details = serializers.SerializerMethodField()
# this method will be used to serialize the 'frame_recogition_details' field
def get_frame_recognition_details(self, obj):
return_data = []
for frame_recognition in obj.frame_recognition_details:
recognition = {}
recognition["frame_name"] = frame_recognition.frame_name
recognition["upright_perct"] = frame_recognition.upright_perct
recognition["upleft_perct"] = frame_recognition.upleft_perct
recognition["downright_perct"] = frame_recognition.downright_perct
recognition["downleft_perct"] = frame_recognition.downleft_perct
recognition["front_perct"] = frame_recognition.front_perct
return_data.append(recognition)
# return the data
return return_data
class Meta:
model = LectureGazeFrameRecognitions
fields = '__all__'
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -29,6 +29,7 @@
var global_lecture_video_id = '';
var global_video_name = '';
var global_lecturer_subject_index = 0;
var global_lecture_date = '';
//jquery
$(document).ready(function () {
......@@ -72,6 +73,8 @@
real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-video/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
......@@ -79,7 +82,7 @@
.catch((error) => alert('an error occurred: ' + error));
});
//this function will display the timetable for the lecturer
function createTimeTable(timetable, subject, lecturer) {
$('#loader').attr('hidden', true);
$('#timetable').attr('hidden', false);
......@@ -146,16 +149,19 @@
if (lectureVideo.isActivityFound) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary">Results</button>';
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>';
} else {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-success">Process</button>';
}
//enable the 'generate report' button
$('#generate_report_before').attr('disabled', false);
$('#video_modal').modal();
}
//binding a click event for 'btn-primary' buttons
$(document).on('click', '.btn-primary', function (e) {
$(document).on('click', '#result_btn', function (e) {
//removing the previous frames (if there is any)
$('#main_frames').remove();
......@@ -215,15 +221,15 @@
//sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
.then((out) => handleResponse(out.response, e))
.then((out) => handleActivityResponse(out.response, e))
.catch((error) => alert('error: ' + error));
});
//this is to change the button from 'process' to 'results'
function handleResponse(response, e) {
//this is to detect the response gained from activity recognition porcess
function handleActivityResponse(response, e) {
//change the button, if the response is positive
if (response) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary">Results</button>';
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>';
}
}
......@@ -736,7 +742,7 @@
//to handle the 'integrate' modal
$('#integrate_activity').click(function () {
$('#integrate_gaze').click(function () {
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
......@@ -768,7 +774,8 @@
response.map((frame) => {
let frame_name = frame.frame_name;
let phone_perct = Math.round(frame.phone_perct, 0);
let listen_perct = Math.round(frame.listening_perct, 0);
let listen_perct = Math.round(frame.listen_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let note_perct = Math.round(frame.note_perct, 0);
//append to the html string
......@@ -852,7 +859,7 @@
$('#listening_instant_value').width(listening_number + '%');
*/
}, 1000);
}, 33);
//check for the current class
if (classes === play_class) {
......@@ -878,6 +885,29 @@
});
//this method will call the API to generate activity report
$('#generate_report_btn').click(function () {
//call the fetch API
//hide the message
$('#generate_report_message').hide();
fetch('http://127.0.0.1:8000/lecture-activity-report-generation/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => {
//show the loader and loading message
$('#generate_report_loader').attr('hidden', true);
$('#generate_report_loading_message').attr('hidden', true);
$('#generateReportModal').modal('hide');
})
.catch((err) => alert('error: ' + err))
});
});
......@@ -901,9 +931,12 @@
{% load static %}
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>
</div>
{# <div class="d-sm-flex align-items-center justify-content-between mb-4">#}
{# <h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>#}
{# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#}
{# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#}
{# </div>#}
<!--first row -->
<div class="row p-2">
......@@ -1183,63 +1216,90 @@
<!--2nd column -->
<div class="col-lg-6">
<!--card content -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>
</div>
{# <div class="col-lg-6">#}
{# <!--card content -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#}
{# </div>#}
{##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#}
{# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# </div>#}
{# </div>#}
<!--card body -->
<div class="text-center p-4" id="detection_frames">
<!--no content message-->
<div class="text-center p-2" id="no_detection_message_content">
<span class="font-italic">No frame is selected</span>
</div>
<div class="text-left m-3" id="detection_number_area" hidden>
<p>No of detections: <span id="no_of_detections"></span></p>
</div>
<!--the detection loader -->
<div class="text-center p-2" id="detection_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
<!--detection person card -->
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by activity#}
{# type)</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#}
{# </div>#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{##}
{# </div>#}
{# </div>#}
{# </div>#}
<!--2nd column -->
<div class="col-lg-6">
<!--card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Detected Students (by activity
type)</h5>
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Integrated Evaluation</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_students">
<!--activity type line -->
<div class="text-center p-2" id="activity_type" hidden>
<p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>
</p>
</div>
<!--no content message-->
<div class="text-center p-2" id="no_detection_student_content">
<span class="font-italic">No activity type is selected</span>
<div class="card-body">
<div class="text-center" id="integrate_message">
<span class="font-italic">The integrated version student and lecturer evaluations will display here.</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="detection_student_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
<!--button -->
<div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_gaze">
Process
</button>
</div>
</div>
</div>
</div>
<!--end of 2nd column -->
</div>
......@@ -1250,63 +1310,37 @@
<!--1st column -->
<div class="col-lg-6">
<!--card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>
</div>
<!--card body -->
<div class="card-body" id="evaluation_students">
<!--no content message-->
<div class="text-center p-2" id="no_evaluated_student_content">
<span class="font-italic">Press 'Evaluate' button to evaluate students</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="evaluate_student_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
<!--end of student detection loader -->
</div>
</div>
{# <!--card -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header">#}
{# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="card-body" id="evaluation_students">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_evaluated_student_content">#}
{# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="evaluate_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# <!--end of student detection loader -->#}
{##}
{##}
{# </div>#}
{##}
{# </div>#}
</div>
<!--end of 1st column -->
<!--2nd column -->
<div class="col-lg-6">
<!--card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Integrated Evaluation</h5>
</div>
<!--card body -->
<div class="card-body">
<div class="text-center" id="integrate_message">
<span class="font-italic">The integrated version student and lecturer evaluations will display here.</span>
</div>
<!--button -->
<div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_activity">
Process
</button>
</div>
</div>
</div>
</div>
<!--end of 2nd column -->
</div>
<!--end of 3rd row -->
......@@ -1451,6 +1485,37 @@
</div>
<!-- generate report Modal-->
<div class="modal fade" id="generateReportModal" tabindex="-1" role="dialog" aria-labelledby="generateReportModal"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Generate Activity Report</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<div id="generate_report_message">
Are you sure you want to generate the report without going through the content?
</div>
<div class="text-center" id="generate_report_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">y
</div>
<div class="text-center" id="generate_report_loading_message" hidden>
<span class="font-italic">This will take some time</span>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-primary" data-dismiss="modal" id="generate_report_btn">Yes</button>
<button type="button" class="btn btn-danger" data-dismiss="modal">No</button>
</div>
</div>
</div>
</div>
<!--integrate modal -->
<div class="modal fade" id="integrate_modal" tabindex="-1" role="dialog" aria-labelledby="integrate_modal">
<div class="modal-dialog" style="max-width: 1300px">
......@@ -1548,12 +1613,12 @@
<span class="font-italic">No video was found</span>
</div>
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
<!-- video -->
{# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div>
<!--end of lecture video section -->
......
......@@ -3,6 +3,8 @@
<head>
{% load static %}
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
......@@ -12,11 +14,11 @@
<title>SB Admin 2 - Charts</title>
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
......
......@@ -195,7 +195,6 @@
activity.response.map((act, index) => {
//setting the percentage values
alert('happy perct: ' + act);
$('#happy_perct').text(act.happy_perct + '%');
$('#sad_perct').text(act.sad_perct + '%');
$('#anger_perct').text(act.angry_perct + '%');
......@@ -1151,10 +1150,10 @@
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--evaluate button -->
<button type="button" class="btn btn-danger float-right"
id="evaluate_button">Evaluate
</button>
{# <!--evaluate button -->#}
{# <button type="button" class="btn btn-danger float-right"#}
{# id="evaluate_button">Evaluate#}
{# </button>#}
</div>
......@@ -1215,63 +1214,63 @@
<!--2nd column -->
<div class="col-lg-6">
<!--card content -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_frames">
<!--no content message-->
<div class="text-center p-2" id="no_detection_message_content">
<span class="font-italic">No frame is selected</span>
</div>
<div class="text-left m-3" id="detection_number_area" hidden>
<p>No of detections: <span id="no_of_detections"></span></p>
</div>
<!--the detection loader -->
<div class="text-center p-2" id="detection_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
<!--detection person card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Detected Students (by emotion
type)</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_students">
<!--activity type line -->
<div class="text-center p-2" id="activity_type" hidden>
<p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>
</p>
</div>
<!--no content message-->
<div class="text-center p-2" id="no_detection_student_content">
<span class="font-italic">No activity type is selected</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="detection_student_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
</div>
{# <div class="col-lg-6">#}
{# <!--card content -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_frames">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_message_content">#}
{# <span class="font-italic">No frame is selected</span>#}
{# </div>#}
{##}
{# <div class="text-left m-3" id="detection_number_area" hidden>#}
{# <p>No of detections: <span id="no_of_detections"></span></p>#}
{# </div>#}
{# <!--the detection loader -->#}
{# <div class="text-center p-2" id="detection_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# </div>#}
{# </div>#}
{##}
{# <!--detection person card -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header py-3">#}
{# <h5 class="m-0 font-weight-bold text-primary">Detected Students (by emotion#}
{# type)</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="text-center p-4" id="detection_students">#}
{# <!--activity type line -->#}
{# <div class="text-center p-2" id="activity_type" hidden>#}
{# <p>Activity Type: <span class="font-weight-bold" id="activity_type_text"></span>#}
{# </p>#}
{# </div>#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_detection_student_content">#}
{# <span class="font-italic">No activity type is selected</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="detection_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{##}
{# </div>#}
{# </div>#}
{# </div>#}
</div>
......@@ -1281,36 +1280,36 @@
<div class="row p-2">
<!--1st column -->
<div class="col-lg-6">
<!--card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>
</div>
<!--card body -->
<div class="card-body" id="evaluation_students">
<!--no content message-->
<div class="text-center p-2" id="no_evaluated_student_content">
<span class="font-italic">Press 'Evaluate' button to evaluate students</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="evaluate_student_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
<!--end of student detection loader -->
</div>
</div>
</div>
{# <div class="col-lg-6">#}
{# <!--card -->#}
{# <div class="card shadow mb-4">#}
{# <!--card header -->#}
{# <div class="card-header">#}
{# <h5 class="m-0 font-weight-bold text-primary">Evaluated Students</h5>#}
{# </div>#}
{##}
{# <!--card body -->#}
{# <div class="card-body" id="evaluation_students">#}
{##}
{# <!--no content message-->#}
{# <div class="text-center p-2" id="no_evaluated_student_content">#}
{# <span class="font-italic">Press 'Evaluate' button to evaluate students</span>#}
{# </div>#}
{##}
{# <!--the detection student loader -->#}
{# <div class="text-center p-2" id="evaluate_student_loader" hidden>#}
{# <img src="{% static 'FirstApp/images/ajax-loader.gif' %}"#}
{# alt="Loader">#}
{# </div>#}
{# <!--end of student detection loader -->#}
{##}
{##}
{# </div>#}
{##}
{# </div>#}
{##}
{##}
{# </div>#}
<!--end of 1st column -->
......
......@@ -73,7 +73,7 @@
let date = e.target.parentNode.parentNode.firstChild.innerHTML;
fetch('http://127.0.0.1:8000/get-lecture-video/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
fetch('http://127.0.0.1:8000/get-lecture-video-gaze-estimation-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
.then((out) => displayLectureVideoDetails(out, object))
.catch((error) => alert('an error occurred: ' + error));
......@@ -144,17 +144,19 @@
global_video_name = video.video_name;
if (lectureVideo.isActivityFound) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary">Results</button>';
if (lectureVideo.isGazeEstimationFound) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn">Results</button>';
} else {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-success">Process</button>';
{#e.target.parentNode.parentNode.lastChild.innerHTML = "<span class='font-italic font-weight-bold text-danger'>Needs to be processed</span>";#}
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-success" id="process_gaze">Process</button>';
}
$('#video_modal').modal();
}
//binding a click event for 'btn-primary' buttons
$(document).on('click', '.btn-primary', function (e) {
$(document).on('click', '#result_btn', function (e) {
//removing the previous frames (if there is any)
$('#main_frames').remove();
......@@ -164,7 +166,7 @@
//hiding the temporary text
$('#temporary_text').attr('hidden', true);
fetch('http://127.0.0.1:8000/get-lecture-activity/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => {
let frames = createFrames(out);
......@@ -179,7 +181,7 @@
});
//displaying the activity percentages in the progress bars
function displayActivity(activity) {
function displayGazeEstimation(gaze) {
//hiding the loader (for the frames)
$('#frame_loader').attr('hidden', true);
......@@ -190,29 +192,31 @@
$('video').attr({'hidden': false, 'src': src});
activity.response.map((act, index) => {
gaze.response.map((act, index) => {
//setting the percentage values
$('#phone_perct').text(act.phone_perct + '%');
$('#talking_perct').text(act.talking_perct + '%');
$('#listening_perct').text(act.listening_perct + '%');
$('#writing_perct').text(act.writing_perct + '%');
$('#looking_up_right_perct').text(act.looking_up_and_right_perct + '%');
$('#looking_up_left_perct').text(act.looking_up_and_left_perct + '%');
$('#looking_down_right_perct').text(act.looking_down_and_right_perct + '%');
$('#looking_down_left_perct').text(act.looking_down_and_left_perct + '%');
$('#looking_front_perct').text(act.looking_front_perct + '%');
//setting the width in the progress bars
$('#phone_width').width(act.phone_perct + '%');
$('#talking_width').width(act.talking_perct + '%');
$('#listening_width').width(act.listening_perct + '%');
$('#writing_width').width(act.writing_perct + '%');
$('#looking_up_right_width').width(act.looking_up_and_right_perct + '%');
$('#looking_up_left_width').width(act.looking_up_and_left_perct + '%');
$('#looking_down_right_width').width(act.looking_down_and_right_perct + '%');
$('#looking_down_left_width').width(act.looking_down_and_left_perct + '%');
$('#looking_front_width').width(act.looking_front_perct + '%');
});
//display the progress bar area
$('.progress_area').attr('hidden', false);
}
//to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) {
//to handle the process button
$(document).on('click', '#process_gaze', function (e) {
//sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
//sending the get request to process the lecture gaze estimations
fetch('http://127.0.0.1:8000/process-lecture-gaze-estimation/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
.then((out) => handleResponse(out.response, e))
.catch((error) => alert('error: ' + error));
......@@ -222,7 +226,7 @@
function handleResponse(response, e) {
//change the button, if the response is positive
if (response) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary">Results</button>';
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary" id="result_btn" >Results</button>';
}
}
......@@ -236,15 +240,14 @@
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
let len = image.detections.length;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + res.extracted[0].frame + "/" + res.extracted[0].detections[0] + "' width='400' height='400'>";
img_src = "<img src='{% static '' %}FirstApp/gaze/" + global_video_name + "/" + res.extracted[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + image.frame + "/" + image.detections[len - 1] + "' class='img-link' width='400' height='400'>";
img_src = "<img src='{% static '' %}FirstApp/gaze/" + global_video_name + "/" + image + "' class='img-link' width='400' height='400'>";
}
......@@ -261,12 +264,176 @@
$('#myActivityRange').attr({'min': 0, 'max': count});
//display the progress bars
displayActivity(res);
displayGazeEstimation(res);
return main_frame_content;
}
//to handle the 'integrate' modal
$('#integrate_activity').click(function () {
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
$('#integrate_modal').modal();
//fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => displayGazeEstimationForFrame(out.response))
.catch((err) => alert('error: ' + err));
});
//this function will load the activity recognition for frames
function displayGazeEstimationForFrame(response) {
//hide the loader
$('#student_video_progress_loader').attr('hidden', true);
//show the progress bars
$('#student_video_progress').attr('hidden', false);
//creating the html string
let htmlString = "";
//creating the html string, iteratively
response.map((frame) => {
let frame_name = frame.frame_name;
let look_up_right = Math.round(frame.upright_perct, 0);
let look_up_left = Math.round(frame.upleft_perct, 0);
let look_down_right = Math.round(frame.downright_perct, 0);
let look_down_left = Math.round(frame.downleft_perct, 0);
let look_front = Math.round(frame.front_perct, 0);
//append to the html string
//looking up and right
htmlString += "<div class='progress_area' id='progress_" +frame_name+ "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Looking up and right</h4>";
htmlString += "<span class='float-right' id='look_up_right_instant_" +frame_name+ "'>" +look_up_right+ "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='look_up_right_instant_value_" +frame_name+ "' style='width: " +look_up_right+ "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//looking up and left
htmlString += "<h4 class='small font-weight-bold'>Looking up and left</h4>";
htmlString += "<span class='float-right' id='look_up_left_instant_" +frame_name+ "'>" +look_up_left+ "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='look_up_left_instant_value_" +frame_name+ "' style='width: " +look_up_left+ "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//looking down and right
htmlString += "<h4 class='small font-weight-bold'>Looking down and right</h4>";
htmlString += "<span class='float-right' id='look_down_right_instant_" +frame_name+ "'>" +look_down_right+ "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_right_instant_value_" +frame_name+ "' style='width: " +look_down_right+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//looking down and left
htmlString += "<h4 class='small font-weight-bold'>Looking down and left</h4>";
htmlString += "<span class='float-right' id='look_down_left_instant_" +frame_name+ "'>" +look_down_left+ "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_down_left_instant_value_" +frame_name+ "' style='width: " +look_down_left+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//looking front
htmlString += "<h4 class='small font-weight-bold'>Looking front</h4>";
htmlString += "<span class='float-right' id='look_front_instant_" +frame_name+ "'>" +look_front+ "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='look_front_instant_value_" +frame_name+ "' style='width: " +look_front+ "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//ending the progress area
htmlString += "</div>";
});
//append the html
$('#student_video_column').append(htmlString);
}
//to handle the 'integrate' play button
$('#play_integrate_button').click(function () {
let video = $('video')[0];
let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause';
let count = 0;
let classes = $(this).attr('class');
let video_interval = setInterval(() => {
{#let talking_number = Math.round(Math.random() * 100, 0);#}
{#let phone_number = Math.round(Math.random() * 100, 0);#}
{#let note_number = Math.round(Math.random() * 100, 0);#}
{#let listening_number = Math.round(Math.random() * 100, 0);#}
//get the relevant progress area
let progress_area = "progress_frame-" + count;
let progress_area_id = "#" + progress_area;
//find the corresponding progress area
let progress_area_html = document.getElementById(progress_area);
//display the retrieved progress area
$(progress_area_id).attr('hidden', false);
//replace the current progress area with the selected one
$('#student_video_progress').html(progress_area_html);
//increment the count
count++;
//setting the values
{#$('#looking_up_right_instant_perct').text(talking_number + '%');#}
{#$('#looking_up_left_instant_perct').text(phone_number + '%');#}
{#$('#looking_down_right_instant_perct').text(note_number + '%');#}
{#$('#looking_down_left_instant_perct').text(listening_number + '%');#}
{#$('#looking_front_instant_perct').text(listening_number + '%');#}
{##}
{#//setting the width#}
{#$('#talking_instant_value').width(talking_number + '%');#}
{#$('#phone_checking_instant_value').width(phone_number + '%');#}
{#$('#note_taking_instant_value').width(note_number + '%');#}
{#$('#listening_instant_value').width(listening_number + '%');#}
}, 33);
//check for the current class
if (classes === play_class) {
$(this).text('Pause');
$(this).attr('class', pause_class);
video.play();
} else if (classes === pause_class) {
$(this).text('Play');
$(this).attr('class', play_class);
video.pause();
}
//function to do when the video is paused
//function to do when the video is ended
video.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval);
}
});
//declaring the variable for setInterval function
let timeVar = null;
......@@ -422,10 +589,9 @@
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" +title+ "'>";
}
else {
images += "<li class='list-group-item other-student-frames' id='image_" +student_count+ "_" +title+ "' hidden>";
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
......@@ -443,10 +609,10 @@
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" +title+ "'></i></span>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" +title+ "'>";
htmlString += "<p>No of frames: <span id='demo_" +title+ "'></span></p>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
});
......@@ -500,7 +666,7 @@
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' +Number(value)+ '_' + title;
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
......@@ -707,53 +873,66 @@
<!--this area will display the progress bars -->
<div class="progress_area" hidden>
<!--talking with friends -->
<!--Looking up and right -->
<a href="#" class="btn btn-link labels" data-number="1">
<h4 class="small font-weight-bold">Talking with friends</h4>
<h4 class="small font-weight-bold">Looking up and right</h4>
</a>
<span class="float-right" id="talking_perct">40%</span>
<span class="float-right" id="looking_up_right_perct">40%</span>
<div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar"
id="talking_width"
id="looking_up_right_width"
style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--phone checking -->
<!--looking up and left -->
<a href="#" class="btn btn-link labels" data-number="0">
<h4 class="small font-weight-bold">Phone checking</h4>
<h4 class="small font-weight-bold">Looking up and left</h4>
</a>
<span class="float-right" id="phone_perct">45%</span>
<span class="float-right" id="looking_up_left_perct">45%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="phone_width"
id="looking_up_left_width"
style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--note taking -->
<!--looking down and right -->
<a href="#" class="btn btn-link labels" data-number="2">
<h4 class="small font-weight-bold">Writing</h4>
<h4 class="small font-weight-bold">Looking down and right</h4>
</a>
<span class="float-right" id="writing_perct">50%</span>
<span class="float-right" id="looking_down_right_perct">50%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="writing_width"
<div class="progress-bar" role="progressbar"
id="looking_down_right_width"
style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--listening-->
<!--Looking down and left-->
<a href="#" class="btn btn-link labels">
<h4 class="small font-weight-bold">Looking down and left</h4>
</a>
<span class="float-right" id="looking_down_left_perct">60%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="looking_down_left_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--Looking front-->
<a href="#" class="btn btn-link labels">
<h4 class="small font-weight-bold">Listening</h4>
<h4 class="small font-weight-bold">Looking Front</h4>
</a>
<span class="float-right" id="listening_perct">60%</span>
<span class="float-right" id="looking_front_perct">60%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="listening_width" style="width: 80%"
id="looking_front_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!-- end of progress area -->
</div>
......@@ -813,55 +992,29 @@
<!--2nd column -->
<div class="col-lg-6">
<!--card content -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_frames">
<!--no content message-->
<div class="text-center p-2" id="no_detection_message_content">
<span class="font-italic">No frame is selected</span>
</div>
<div class="text-left m-3" id="detection_number_area" hidden>
<p>No of detections: <span id="no_of_detections"></span></p>
</div>
<!--the detection loader -->
<div class="text-center p-2" id="detection_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
<!--detection person card -->
<!--card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Detected Students</h5>
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Integrated Evaluation</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_students">
<!--no content message-->
<div class="text-center p-2" id="no_detection_student_content">
<span class="font-italic">No activity type is selected</span>
<div class="card-body">
<div class="text-center" id="integrate_message">
<span class="font-italic">The integrated version student and lecturer evaluations will display here.</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="detection_student_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
<!--button -->
<div class="text-right m-4">
<button type="button" class="btn btn-outline-success" id="integrate_activity">
Process
</button>
</div>
</div>
</div>
</div>
<!--end of 2nd column -->
</div>
......@@ -943,6 +1096,157 @@
</div>
</div>
</div>
<!-- end of logout modal -->
<!--integrate modal -->
<div class="modal fade" id="integrate_modal" tabindex="-1" role="dialog" aria-labelledby="integrate_modal">
<div class="modal-dialog" style="max-width: 1300px">
<div class="modal-content">
<!--modal header -->
<div class="modal-header">
<h4 class="modal-title">Integrated Version</h4>
</div>
<!--modal body -->
<div class="modal-body">
<div class="container-fluid">
<div class="row">
<!--1st column -->
<div class="col-md-6" id="student_video_column" style="border-right: 1px solid black">
<div class="text-center">
<span class="h3 font-italic font-weight-bold">Student Behavior</span>
</div>
<!--display student video -->
<div class="text-center m-3" id="student_video_section">
<video width="500" height="300" id="student_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
</div>
<!--end of student video section -->
<!-- ajax loader section -->
<div class="text-center mt-3" id="student_video_progress_loader">
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
</div>
<!--progress bar section -->
<div class="progress_area" id="student_video_progress" hidden>
<!--Looking up and right -->
<a href="#" class="btn btn-link labels" data-number="1">
<h4 class="small font-weight-bold">Looking up and right</h4>
</a>
<span class="float-right" id="looking_up_right_instant_perct">40%</span>
<div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar"
id="looking_up_right_instant_width"
style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--looking up and left -->
<a href="#" class="btn btn-link labels" data-number="0">
<h4 class="small font-weight-bold">Looking up and left</h4>
</a>
<span class="float-right" id="looking_up_left_instant_perct">45%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="looking_up_left_instant_width"
style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--looking down and right -->
<a href="#" class="btn btn-link labels" data-number="2">
<h4 class="small font-weight-bold">Looking down and right</h4>
</a>
<span class="float-right" id="looking_down_right_instant_perct">50%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="looking_down_right_instant_width"
style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--Looking down and left-->
<a href="#" class="btn btn-link labels">
<h4 class="small font-weight-bold">Looking down and left</h4>
</a>
<span class="float-right" id="looking_down_left_instant_perct">60%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="looking_down_left_instant_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--Looking front-->
<a href="#" class="btn btn-link labels">
<h4 class="small font-weight-bold">Looking Front</h4>
</a>
<span class="float-right" id="looking_front_instant_perct">60%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="looking_front_instant_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
</div>
<!--end of 1st column -->
<!--2nd column -->
<div class="col-md-6">
<div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div>
<!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section">
<!--temporary text -->
<div class="text-center" id="temp_lecturer_text">
<span class="font-italic">No video was found</span>
</div>
{# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div>
<!--end of lecture video section -->
</div>
<!--end of 2nd column -->
</div>
<!--end of 1st row -->
<!--2nd row -->
<div class="row">
<!--play button -->
<div class="col-md-12">
<div class="text-center p-3">
<button type="button" class="btn btn-outline-danger play"
id="play_integrate_button">Play
</button>
</div>
</div>
</div>
<!--end of 2nd row -->
</div>
</div>
<!-- modal footer -->
<div class="modal-footer">
<button type="button" data-dismiss="modal" class="btn btn-danger text-white">Close</button>
</div>
</div>
</div>
</div>
<!--end of integrate modal -->
{% endblock %}
</body>
......
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SLPES</title>
<link rel="shortcut icon" href="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/images/favicon.ico" type="image/x-icon"/>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<link rel="stylesheet" href="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/css/sb-admin-2.min.css" type="text/css">
<link rel="stylesheet" href="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/css/slider.css" type="text/css">
<link href="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i"
rel="stylesheet">
<link rel="stylesheet" href="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/css/all.min.css" type="text/css">
<link href="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/vendor/datatables/dataTables.bootstrap4.min.css" rel="stylesheet" type="text/css">
</head>
<body id="page-top">
<!-- main content -->
<div id="wrapper">
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Topbar -->
<nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow">
<!-- Sidebar Toggle (Topbar) -->
<button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3">
<i class="fa fa-bars"></i>
</button>
<!-- Topbar Search -->
<!--form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..."
aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form-->
<!-- logo -->
<div id="logo" class="text-left">
<img src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/images/logo.PNG" alt="Logo" width="80" height="60">
</div>
<div class="m-4">
<h5>Student and Lecturer</h5>
<h5>Performance Enhancement System</h5>
</div>
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
<li class="nav-item dropdown no-arrow d-sm-none">
<a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-search fa-fw"></i>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in"
aria-labelledby="searchDropdown">
<form class="form-inline mr-auto w-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small"
placeholder="Search for..." aria-label="Search"
aria-describedby="basic-addon2">
<div class="input-group-append">
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
</div>
</div>
</form>
</div>
</li>
<!-- Nav Item - Alerts -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
{# <i class="fas fa-bell fa-fw"></i>#}
<!-- Counter - Alerts -->
{# <span class="badge badge-danger badge-counter">3+</span>#}
</a>
<!-- Dropdown - Alerts -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in"
aria-labelledby="alertsDropdown">
<h6 class="dropdown-header">
Alerts Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-primary">
<i class="fas fa-file-alt text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 12, 2019</div>
<span class="font-weight-bold">A new monthly report is ready to download!</span>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-success">
<i class="fas fa-donate text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 7, 2019</div>
$290.29 has been deposited into your account!
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-warning">
<i class="fas fa-exclamation-triangle text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 2, 2019</div>
Spending Alert: We've noticed unusually high spending for your account.
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>
</div>
</li>
<!-- Nav Item - Messages -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
{# <i class="fas fa-envelope fa-fw"></i>#}
<!-- Counter - Messages -->
{# <span class="badge badge-danger badge-counter">7</span>#}
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in"
aria-labelledby="messagesDropdown">
<h6 class="dropdown-header">
Message Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60"
alt="">
<div class="status-indicator bg-success"></div>
</div>
<div class="font-weight-bold">
<div class="text-truncate">Hi there! I am wondering if you can help me with a
problem I've been having.
</div>
<div class="small text-gray-500">Emily Fowler · 58m</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60"
alt="">
<div class="status-indicator"></div>
</div>
<div>
<div class="text-truncate">I have the photos that you ordered last month, how
would you like them sent to you?
</div>
<div class="small text-gray-500">Jae Chun · 1d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60"
alt="">
<div class="status-indicator bg-warning"></div>
</div>
<div>
<div class="text-truncate">Last month's report looks great, I am very happy with
the progress so far, keep up the good work!
</div>
<div class="small text-gray-500">Morgan Alvarez · 2d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60"
alt="">
<div class="status-indicator bg-success"></div>
</div>
<div>
<div class="text-truncate">Am I a good boy? The reason I ask is because someone
told me that people say this to all dogs, even if they aren't good...
</div>
<div class="small text-gray-500">Chicken the Dog · 2w</div>
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>
</div>
</li>
<div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow">
<img src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/images/sliit.png" alt="sliit" width="200" height="70">
<!-- Dropdown - User Information -->
<div class="dropdown-menu dropdown-menu-right shadow animated--grow-in"
aria-labelledby="userDropdown">
<a class="dropdown-item" href="#">
<i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i>
Profile
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i>
Settings
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i>
Activity Log
</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="#" data-toggle="modal" data-target="#logoutModal">
<i class="fas fa-sign-out-alt fa-sm fa-fw mr-2 text-gray-400"></i>
Logout
</a>
</div>
</li>
</ul>
</nav>
<!-- End of Topbar -->
<!-- beginning of container-fluid -->
<div class="container-fluid">
<!-- Page Heading -->
{# <div class="d-sm-flex align-items-center justify-content-between mb-4">#}
{# <h1 class="mb-0 text-gray-800">Activity Recognition</h1>#}
{# </div>#}
<div class="text-center">
<h1 class="mb-0">Activity Recognition</h1>
</div>
<hr class="p-2">
<!--1st row -->
<div class="row">
<!-- 1st column -->
<div class="col-lg-6">
<!-- card -->
<div class="card shadow mb-4 bg-white">
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Lecturer Information</h5>
</div>
<!-- card body -->
<div class="card-body">
<table class="table table-borderless">
<tr>
<td class="font-weight-bold h4">Name</td>
<td>{{ lecturer_name }}</td>
</tr>
<tr>
<td class="font-weight-bold h4">Subject</td>
<td>{{ subject }}</td>
</tr>
<tr>
<td class="font-weight-bold h4">Date</td>
<td>{{ date }}</td>
</tr>
</table>
</div>
</div>
</div>
<!-- end of 1st column -->
<!-- 2nd column -->
<div class="col-lg-6">
<!-- card -->
<div class="card shadow mb-4 bg-white">
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Activity Information</h5>
</div>
<!-- card body -->
<div class="card-body">
<div class="progress_area">
<!--talking with friends -->
<h4 class="small font-weight-bold">Talking with friends</h4>
<span class="float-right" id="talking_perct">40%</span>
<div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar"
id="talking_width"
style="width: 20%"
aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--phone checking -->
<h4 class="small font-weight-bold">Phone checking</h4>
<span class="float-right" id="phone_perct">45%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="phone_width"
style="width: 40%"
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--note taking -->
<h4 class="small font-weight-bold">Writing</h4>
<span class="float-right" id="writing_perct">50%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar" id="writing_width"
style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--listening-->
<h4 class="small font-weight-bold">Listening</h4>
<span class="float-right" id="listening_perct">60%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="listening_width" style="width: 80%"
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
</div>
</div>
</div>
<!-- end of 2nd column -->
</div>
<!-- end of 1st row -->
<!-- 2nd row -->
<!-- Area Chart -->
<div class="col-xl-8 col-lg-7">
<div class="card shadow mb-4">
<!-- Card Header - Dropdown -->
<div class="card-header py-3 d-flex flex-row align-items-center justify-content-between">
<h6 class="m-0 font-weight-bold text-primary">Activity Overview</h6>
<div class="dropdown no-arrow">
<a class="dropdown-toggle" href="#" role="button" id="dropdownMenuLink"
data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-ellipsis-v fa-sm fa-fw text-gray-400"></i>
</a>
<div class="dropdown-menu dropdown-menu-right shadow animated--fade-in"
aria-labelledby="dropdownMenuLink">
<div class="dropdown-header">Dropdown Header:</div>
<a class="dropdown-item" href="#">Action</a>
<a class="dropdown-item" href="#">Another action</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="#">Something else here</a>
</div>
</div>
</div>
<!-- Card Body -->
<div class="card-body">
<div class="chart-area">
<canvas id="myAreaChart"></canvas>
</div>
</div>
</div>
</div>
<!-- end of 2nd row -->
</div>
<!-- end of container-fluid -->
</div>
<!--end of content -->
</div>
<!--end of content-wrapper -->
</div>
<!--end of wrapper -->
<script src="https://code.jquery.com/jquery-3.4.1.slim.min.js"
integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n"
crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js"
integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo"
crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js"
integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6"
crossorigin="anonymous"></script>
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/vendor/jquery/jquery.js"></script>
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/js/sb-admin-2.min.js"></script>
<!-- Page level plugins -->
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/vendor/chart.js/Chart.min.js"></script>
<!-- Page level custom scripts -->
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/js/demo/chart-area-demo.js"></script>
<script src="file:///D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/js/demo/chart-pie-demo.js"></script>
</body>
</html>
\ No newline at end of file
......@@ -20,6 +20,9 @@
<link rel="stylesheet" href="{% static 'FirstApp/css/all.min.css' %}">
<link href="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.css' %}" rel="stylesheet">
<!-- this link will import process workflow CSS -->
<link href="{% static 'FirstApp/css/process-worflow.css' %}" rel="stylesheet" type="text/css">
</head>
{% endblock %}
......@@ -72,7 +75,7 @@
<div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Components:</h6>
<a class="collapse-item" href="/pose">Pose</a>
{# <a class="collapse-item" href="/pose">Pose</a>#}
<a class="collapse-item" href="/gaze">Gaze</a>
<a class="collapse-item" href="/emotion">Emotion</a>
<a class="collapse-item" href="/activity">Activity</a>
......@@ -80,6 +83,33 @@
</div>
</li>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseThree" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i>
<span>Lecture</span>
</a>
<div id="collapseThree" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Components:</h6>
<a class="collapse-item" href="/summary/lecture">Summarization</a>
</div>
</div>
</li>
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseFour" aria-expanded="true" aria-controls="collapseThree">
<i class="fas fa-fw fa-cog"></i>
<span>Attendance</span>
</a>
<div id="collapseFour" class="collapse" aria-labelledby="headingThree" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Components:</h6>
<a class="collapse-item" href="/attendance/initiate-lecture">initiate lecture</a>
</div>
</div>
</li>
<!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities" aria-expanded="true" aria-controls="collapseUtilities">
......@@ -89,10 +119,10 @@
<div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Custom Utilities:</h6>
<a class="collapse-item" href="/extract">Video Extractor</a>
{# <a class="collapse-item" href="/extract">Video Extractor</a>#}
<a class="collapse-item" href="/video_result">Video Results</a>
<a class="collapse-item" href="utilities-animation.html">Animations</a>
<a class="collapse-item" href="utilities-other.html">Other</a>
{# <a class="collapse-item" href="utilities-animation.html">Animations</a>#}
{# <a class="collapse-item" href="utilities-other.html">Other</a>#}
</div>
</div>
</li>
......@@ -100,44 +130,44 @@
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
Addons
</div>
{# <!-- Heading -->#}
{# <div class="sidebar-heading">#}
{# Addons#}
{# </div>#}
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">
<i class="fas fa-fw fa-folder"></i>
<span>Pages</span>
</a>
<div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Login Screens:</h6>
<a class="collapse-item" href="/login">Login</a>
<a class="collapse-item" href="/register">Register</a>
<a class="collapse-item" href="/forgot-password">Forgot Password</a>
<div class="collapse-divider"></div>
<h6 class="collapse-header">Other Pages:</h6>
<a class="collapse-item" href="/404">404 Page</a>
<a class="collapse-item" href="/blank">Blank Page</a>
</div>
</div>
</li>
<!-- Nav Item - Charts -->
<li class="nav-item">
<a class="nav-link" href="charts.html">
<i class="fas fa-fw fa-chart-area"></i>
<span>Charts</span></a>
</li>
<!-- Nav Item - Tables -->
<li class="nav-item">
<a class="nav-link" href="/tables">
<i class="fas fa-fw fa-table"></i>
<span>Tables</span></a>
</li>
{# <li class="nav-item">#}
{# <a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">#}
{# <i class="fas fa-fw fa-folder"></i>#}
{# <span>Pages</span>#}
{# </a>#}
{# <div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">#}
{# <div class="bg-white py-2 collapse-inner rounded">#}
{# <h6 class="collapse-header">Login Screens:</h6>#}
{# <a class="collapse-item" href="/login">Login</a>#}
{# <a class="collapse-item" href="/register">Register</a>#}
{# <a class="collapse-item" href="/forgot-password">Forgot Password</a>#}
{# <div class="collapse-divider"></div>#}
{# <h6 class="collapse-header">Other Pages:</h6>#}
{# <a class="collapse-item" href="/404">404 Page</a>#}
{# <a class="collapse-item" href="/blank">Blank Page</a>#}
{# </div>#}
{# </div>#}
{# </li>#}
{# <!-- Nav Item - Charts -->#}
{# <li class="nav-item">#}
{# <a class="nav-link" href="charts.html">#}
{# <i class="fas fa-fw fa-chart-area"></i>#}
{# <span>Charts</span></a>#}
{# </li>#}
{##}
{# <!-- Nav Item - Tables -->#}
{# <li class="nav-item">#}
{# <a class="nav-link" href="/tables">#}
{# <i class="fas fa-fw fa-table"></i>#}
{# <span>Tables</span></a>#}
{# </li>#}
<!-- Divider -->
<hr class="sidebar-divider d-none d-md-block">
......@@ -164,144 +194,144 @@
</button>
<!-- Topbar Search -->
<form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form>
{# <form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">#}
{# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
{# </div>#}
{# </div>#}
{# </form>#}
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
<li class="nav-item dropdown no-arrow d-sm-none">
<a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-search fa-fw"></i>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">
<form class="form-inline mr-auto w-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form>
</div>
</li>
<!-- Nav Item - Alerts -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-bell fa-fw"></i>
<!-- Counter - Alerts -->
<span class="badge badge-danger badge-counter">3+</span>
</a>
<!-- Dropdown - Alerts -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">
<h6 class="dropdown-header">
Alerts Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-primary">
<i class="fas fa-file-alt text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 12, 2019</div>
<span class="font-weight-bold">A new monthly report is ready to download!</span>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-success">
<i class="fas fa-donate text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 7, 2019</div>
$290.29 has been deposited into your account!
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-warning">
<i class="fas fa-exclamation-triangle text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 2, 2019</div>
Spending Alert: We've noticed unusually high spending for your account.
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>
</div>
</li>
{# <li class="nav-item dropdown no-arrow d-sm-none">#}
{# <a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-search fa-fw"></i>#}
{# </a>#}
{# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">#}
{# <form class="form-inline mr-auto w-100 navbar-search">#}
{# <div class="input-group">#}
{# <input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">#}
{# <div class="input-group-append">#}
{# <button class="btn btn-primary" type="button">#}
{# <i class="fas fa-search fa-sm"></i>#}
{# </button>#}
{# </div>#}
{# </div>#}
{# </form>#}
{# </div>#}
{# </li>#}
{##}
{# <!-- Nav Item - Alerts -->#}
{# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-bell fa-fw"></i>#}
{# <!-- Counter - Alerts -->#}
{# <span class="badge badge-danger badge-counter">3+</span>#}
{# </a>#}
{# <!-- Dropdown - Alerts -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">#}
{# <h6 class="dropdown-header">#}
{# Alerts Center#}
{# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-primary">#}
{# <i class="fas fa-file-alt text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 12, 2019</div>#}
{# <span class="font-weight-bold">A new monthly report is ready to download!</span>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-success">#}
{# <i class="fas fa-donate text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 7, 2019</div>#}
{# $290.29 has been deposited into your account!#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="mr-3">#}
{# <div class="icon-circle bg-warning">#}
{# <i class="fas fa-exclamation-triangle text-white"></i>#}
{# </div>#}
{# </div>#}
{# <div>#}
{# <div class="small text-gray-500">December 2, 2019</div>#}
{# Spending Alert: We've noticed unusually high spending for your account.#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>#}
{# </div>#}
{# </li>#}
<!-- Nav Item - Messages -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-envelope fa-fw"></i>
<!-- Counter - Messages -->
<span class="badge badge-danger badge-counter">7</span>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">
<h6 class="dropdown-header">
Message Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">
<div class="status-indicator bg-success"></div>
</div>
<div class="font-weight-bold">
<div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>
<div class="small text-gray-500">Emily Fowler · 58m</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">
<div class="status-indicator"></div>
</div>
<div>
<div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>
<div class="small text-gray-500">Jae Chun · 1d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">
<div class="status-indicator bg-warning"></div>
</div>
<div>
<div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>
<div class="small text-gray-500">Morgan Alvarez · 2d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">
<div class="status-indicator bg-success"></div>
</div>
<div>
<div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>
<div class="small text-gray-500">Chicken the Dog · 2w</div>
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>
</div>
</li>
{# <li class="nav-item dropdown no-arrow mx-1">#}
{# <a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">#}
{# <i class="fas fa-envelope fa-fw"></i>#}
{# <!-- Counter - Messages -->#}
{# <span class="badge badge-danger badge-counter">7</span>#}
{# </a>#}
{# <!-- Dropdown - Messages -->#}
{# <div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">#}
{# <h6 class="dropdown-header">#}
{# Message Center#}
{# </h6>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#}
{# </div>#}
{# <div class="font-weight-bold">#}
{# <div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>#}
{# <div class="small text-gray-500">Emily Fowler · 58m</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">#}
{# <div class="status-indicator"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>#}
{# <div class="small text-gray-500">Jae Chun · 1d</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">#}
{# <div class="status-indicator bg-warning"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>#}
{# <div class="small text-gray-500">Morgan Alvarez · 2d</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item d-flex align-items-center" href="#">#}
{# <div class="dropdown-list-image mr-3">#}
{# <img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">#}
{# <div class="status-indicator bg-success"></div>#}
{# </div>#}
{# <div>#}
{# <div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>#}
{# <div class="small text-gray-500">Chicken the Dog · 2w</div>#}
{# </div>#}
{# </a>#}
{# <a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>#}
{# </div>#}
{# </li>#}
<div class="topbar-divider d-none d-sm-block"></div>
......
......@@ -5,11 +5,11 @@
<!-- Page Wrapper -->
{% block javascript %}
{% load static %}
{% load static %}
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Page level plugins -->
<script src="{% static 'FirstApp/vendor/datatables/jquery.dataTables.min.js' %}"></script>
......@@ -18,213 +18,290 @@
<!-- Page level custom scripts -->
<script src="{% static 'FirstApp/js/demo/datatables-demo.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<script type="text/javascript">
var global_subject = '';
var global_lecturer = '';
var global_lecture_video_id = '';
var global_video_name = '';
var global_lecturer_subject_index = 0;
var global_lecture_date = '';
var class1 = 'col-4 smpl-step-step complete';
var class2 = 'col-4 smpl-step-step active';
var class3 = 'col-4 smpl-step-step disabled';
//jquery
$(document).ready(function () {
let folder = '';
//select a particular subject
//select a particular subject
$('input[type=radio]').click(function () {
let subject_id = $(this).attr('id');
global_subject = subject_id;
let lecturer = $(this).attr('data-lecturer');
global_lecturer = lecturer;
let subject_name = $(this).attr('data-name');
$('#timetable').attr('hidden', false);
{#$('#timetable').attr('hidden', true);#}
$('#no_timetable_content').attr('hidden', true);
$('.student-detection-rows').remove();
$('#timetable_body').children().map(function () {
$(this).remove();
});
$('#no_subject_selected').attr('hidden', true);
$('#timetable_caption').text('subject: ' +subject_name);
$('#timetable_caption').text('subject: ' + subject_name);
$('#loader').attr('hidden', false);
//fetching the timetable from the db
fetch('http://127.0.0.1:8000/timetables')
.then((res) => res.json())
.then((out) => createTimeTable(out, subject_id, lecturer))
.catch((error) => alert('this is the error: ' + error))
});
//view the video details
$('.modal-expand').click(function () {
let clicked_id = $(this).attr('id');
folder = clicked_id;
$('input[type=hidden]').each(function () {
let hidden_id = $(this).attr('id');
//this function will display the timetable for the lecturer
function createTimeTable(timetable, subject, lecturer) {
$('#loader').attr('hidden', true);
$('#timetable').attr('hidden', false);
let isTimetableSubject = false;
if (clicked_id === hidden_id) {
let duration = $(this).attr('data-duration');
$('#video_name').text(clicked_id);
$('#video_duration').text(duration);
$('#video_date').text(new Date().toDateString());
timetable.map((item, i) => {
item.timetable.map((table, index) => {
}
});
let lecturer_subject_index_arr = [];
//after data assigning, load the modal
$('#video_modal').modal();
});
//to get the number of subjects taught by the lecturer in a day
table.time_slots.forEach((slot1, ind) => {
let isLecturer = slot1.lecturer.id === Number(lecturer);
//retrieve the lecture details
$('.retrieve').click(function () {
let lecture = $(this).attr('data-id');
if (isLecturer) {
lecturer_subject_index_arr.push(ind);
}
});
//removing the previous frames
$('#main_frames').remove();
//iterating each slot (for a given day)
table.time_slots.forEach((slot, in1) => {
let isLecturer = slot.lecturer.id === Number(lecturer);
let isLecSubject = slot.subject.subject_code === subject;
let message = '';
if (isLecturer && isLecSubject) {
let html = '';
let isProcessPerformed = false;
global_lecturer_subject_index = lecturer_subject_index_arr.findIndex((inner) => inner === in1);
isTimetableSubject = true;
{#check for the lecture video status#}
let date = table.date;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-video/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
.then((out) => {
isProcessPerformed = out.response.isActivityFound;
alert('response: ' + out.response.isActivityFound);
if (isProcessPerformed) {
message = '<td><span class="font-italic font-weight-bold text-primary" id="message">Already processed</span></td>';
} else {
message = '<td><button type="button" class="btn btn-success">Process</button></td>';
}
html += "<tr class='lecture-details'><td class='slot_date'>" + table.date + "</td>"
+ "<td>" + slot.location + "</td>"
+ "<td>" + slot.start_time + "</td>"
+ "<td>" + slot.end_time + "</td>"
+ message
+ "</tr>";
$('#timetable_body').append(html);
})
.catch((error) => alert('an error occurred: ' + error));
}
});
});
$('#error_message').attr('hidden', true);
$('#nav_bar').attr('hidden', true);
$('#tabContentDetails').attr('hidden', true);
if (!isTimetableSubject) {
$('#timetable').attr('hidden', true);
$('#no_timetable_content').attr('hidden', false);
}
//disabling all the 'retrive' buttons
$('.retrieve').each(function () {
$(this).attr('disabled', 'disabled');
});
}
let url = 'http://127.0.0.1:8000/videoExtract/?folder_name=' + lecture;
//retrieve frames
fetch(url)
.then((res) => res.json())
.then((out) => {
//create the frames
let frameHTML = createFrames(out.response, lecture);
return frameHTML;
})
.then((obj) => {
//after loading the frames, display the rest of the images
$('#loader').attr('hidden', false);
setTimeout(() => {
$('#nav_bar').attr('hidden', false);
$('#tabContentDetails').attr('hidden', false);
$('#loader').attr('hidden', true);
//enabling the 'retrive' buttons
$('.retrieve').each(function () {
$(this).removeAttr('disabled');
});
}, 65000);
{#$('#nav_bar').attr('hidden', false);#}
{#$('#tabContentDetails').attr('hidden', false);#}
//load the frames
$('#emotion_frames').prepend(obj);
})
.catch((error) => {
$('#error_message').attr('hidden', false);
//enabling the 'retrieve' buttons
$('.retrieve').each(function () {
$(this).removeAttr('disabled');
//this function will retrieve the lecture video for a given lecture
$(document).on('click', '.btn-info', function (e) {
});
});
let clicked_class = e.target.className;
let object = e;
let real_class = clicked_class.split(' ')[1];
real_class = '.' + real_class;
//setting some values
$('#no_content_message').attr('hidden', true);
let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-video/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
.then((out) => displayLectureVideoDetails(out, object))
.catch((error) => alert('an error occurred: ' + error));
});
//function to display lecture video details
function displayLectureVideoDetails(lectureVideo, e) {
//creating the frame content
function createFrames(response, folder) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
response.map((image) => {
let img_src = "";
//get the lecture video response
let video = lectureVideo.response;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/extracted/" + folder + "/" + response[0].image + "' width='500' height='500'>";
}
else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" +count+ "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/extracted/" + folder + "/" + image.image + "' width='500' height='500'>";
}
$('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length);
$('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id;
global_video_name = video.video_name;
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
if (lectureVideo.isActivityFound) {
{#e.target.parentNode.parentNode.lastChild.innerHTML = '<span class="font-italic font-weight-bold text-primary" id="message">Already processed</span>';#}
alert('I was found');
} else {
{#e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-success">Process</button>';#}
alert('I am not here');
}
}
main_frame_content += "</ul>";
main_frame_content += "</div>";
// this function simulate the process workflow
$('#simulate_process').click(function () {
//setting the min, max values of the slider
$('#myRange').attr({'min': 0, 'max': count});
return main_frame_content;
}
let classname = $('#step_1').attr('class');
//declaring the variable for setInterval function
let timeVar = null;
setTimeout(() => {
$('#step_1').attr('class', class1)
}, 2000);
//handling the play button
$('#play_pause_icon').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
setTimeout(() => {
$('#step_2').attr('class', class1)
}, 4000);
//retrieving the current icon class
let current_class = $(this).attr('class');
setTimeout(() => {
$('#step_3').attr('class', class1)
}, 6000);
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
setTimeout(() => {
$('#step_4').attr('class', class1)
}, 8000);
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myRange");
let output = document.getElementById("demo");
});
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
//this function will handle the batch process button
$('.batch_process').click(function () {
let video_id = $(this).attr("data-video-id");
let video_name = $(this).attr("data-video-name");
//display the activity loader
$('#activity_loader').attr("hidden", false);
let selectedImage = '#image_' + Number(value);
global_lecture_video_id = video_id;
global_video_name = video_name;
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 10);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
//perform activity recognition
fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
.then((out) => handleActivityResponse(out.response))
.catch((error) => alert('error: ' + error));
});
//handling the slider
let slider = document.getElementById("myRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
//this is to detect the response gained from activity recognition porcess
function handleActivityResponse(response, e) {
//change the button, if the response is positive
if (response) {
//display the activity process as completed
$('#step_1').attr('class', class1);
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide the activity loader
$('#activity_loader').hide();
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//display the emotion loader
$('#emotion_loader').attr('hidden', false);
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
//sending the request to process the lecture emotions
fetch('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
.then((out) => handleEmotionResponse(out.response))
.catch((error) => alert('error: ' + error));
}
}
//this is to detect the response gained from emotion recognition process
function handleEmotionResponse(response) {
//change the button, if the response is positive
if (response) {
//display the activity process as completed
$('#step_2').attr('class', class1);
//hide the emotion loader
$('#emotion_loader').hide();
//display the gaze loader
$('#gaze_loader').attr('hidden', false);
//sending the get request to process the lecture gaze estimations
fetch('http://127.0.0.1:8000/process-lecture-gaze-estimation/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
.then((out) => handleGazeResponse(out.response, e))
.catch((error) => alert('error: ' + error));
}
}
//this is to detect the response gained from emotion recognition process
function handleGazeResponse(response) {
//change the button, if the response is positive
if (response) {
//display the activity process as completed
$('#step_3').attr('class', class1);
//hide the activity loader
$('#gaze_loader').hide();
alert('good');
}
}
});
</script>
......@@ -248,488 +325,158 @@
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Video Results</h1>
<h1 class="h3 mb-0 text-gray-800">Lecture Video Results</h1>
</div>
<!--first row -->
<div class="row p-2">
<!--first column -->
<div class="col-lg-6">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Lecturer Subjects</h5>
</div>
<!--card body -->
<div class="card-body">
{% if lecturer_subjects.count == 0 %}
<div class="text-center">
<span class="font-italic">No subjects</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<th></th>
<th>Subject Name</th>
<th>Year</th>
</tr>
</thead>
<tbody>
{% for subject in subjects %}
<tr class="subjects not_clicked" id="{{ subject.0.subject_code }}">
<td>
<div class="radio">
<label><input type="radio" id="{{ subject.0.subject_code }}" name="subject_radio" data-name="{{ subject.0.name }}"></label>
</div>
</td>
<td>{{ subject.0.name}}</td>
<td>{{ subject.0.year }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!--end of first column -->
<!--second column (timetable column) -->
<div class="col-lg-6">
<div class="col-lg-8" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">View timetable</h5>
<h5 class="m-0 font-weight-bold text-primary">Waiting List</h5>
</div>
<!--card body -->
<div class="card-body">
<!--loading gif -->
{% if due_lectures.count == 0 %}
<div class="text-center" id="no_subject_selected">
<span class="font-italic">No lecture is to be processed</span>
</div>
{% endif %}
<!--no lecture selected message -->
<div class="text-center" id="no_subject_selected">
<span class="font-italic">No lecture is selected</span>
<div class="text-center" id="loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!--no lecture selected message -->
<div class="text-center" id="no_timetable_content" hidden>
<span class="font-italic">Not included in the timetable</span>
</div>
<!--displaying the timetable -->
<table class="table table-striped" id="timetable" hidden>
<caption id="timetable_caption"></caption>
<thead>
<tr>
<th>Date</th>
<th>Hall No.</th>
<th>Time</th>
</tr>
</thead>
</table>
</div>
</div>
</div>
<!--end of first column -->
</div>
<div class="row p-2">
<!--first column-->
<div class="col-lg-6">
<!-- card content -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">List of Lecture Videos</h5>
</div>
<div class="card-body">
<table class="table table-bordered">
<table class="table table-striped" id="timetable">
{# <caption id="timetable_caption"></caption>#}
<thead>
<tr>
<th>Video Name</th>
<th>Length</th>
<th></th>
<th>Date</th>
<th>Subject</th>
<th>start time</th>
<th>end time</th>
<th></th>
</tr>
</thead>
<tbody>
{% for video in Videos %}
<tr class="video_row" id="{{ video.name }}" data-duration="{{ video.duration }}">
<td>
{{ video.name }}
</td>
<td>{{ video.duration }}</td>
<td>
<button class="btn btn-link modal-expand" id="{{ video.name }}">View</button>
</td>
<td>
<button class="btn btn-info retrieve" data-id="{{ video.name }}">Retrieve</button>
</td>
<!--<td>
<select name="category" id="select_{{ video.name }}" class="form-control select_cat">
<option value="">-------</option>
<option value="Emotion">Emotion</option>
<option value="Gaze">Gaze</option>
<option value="Activity">Activity</option>
</select>
</td>-->
<!-- to store the video details -->
<td hidden>
<input type="hidden" id="{{ video.name }}" data-duration="{{ video.duration }}">
</td>
</tr>
{% endfor %}
<tbody id="timetable_body">
{% for lecture in due_lectures %}
<tr>
<td class="font-weight-bolder">{{ lecture.date }}</td>
{# <td>{{ lecture.subject }}</td>#}
<td class="font-weight-bolder">{{ lecture.subject_name }}</td>
<td class="font-weight-bolder">{{ lecture.start_time }}</td>
<td class="font-weight-bolder">{{ lecture.end_time }}</td>
<td>
<button type="button" class="btn btn-success batch_process" data-video-id="{{ lecture.video_id }}" data-video-name="{{ lecture.video_name }}" id="{{ lecture.subject }}">Process</button>
{# <span class="font-italic font-weight-bolder text-success">Processing</span>#}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
<!--second column-->
<div class="col-lg-6">
<!-- card content -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Emotion Detection</h5>
</div>
<div class="card-body">
<!-- no video selected messge -->
<div class="text-center" id="no_content_message">
<p>No content to be displayed</p>
</div>
<!-- error messge -->
<div class="text-center text-danger" id="error_message" hidden>
<p class="font-italic">No frames were found for this video</p>
</div>
<!-- loader -->
<div class="text-center" id="loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loading image">
</div>
<!--nav tabs-->
<ul class="nav nav-tabs nav-fill" id="nav_bar" role="tablist" hidden>
<li class="nav-item">
<a class="nav-link active" id="frame-tab" data-toggle="tab" href="#frame" role="tab" aria-controls="frame" aria-selected="true">Frame</a>
</li>
<li class="nav-item">
<a class="nav-link" id="graph-tab" data-toggle="tab" href="#graph" role="tab" aria-controls="graph" aria-selected="false">Graphs</a>
</li>
</ul>
<!--tab content -->
<div class="tab-content" id="tabContentDetails" hidden>
<div class="tab-pane fade show active" id="frame" role="tabpanel" aria-labelledby="home-tab">
<div class="text-center p-4" id="emotion_frames">
<!-- slide container -->
<div class="slidecontainer">
<div class="row">
<div class="col-1">0</div>
<div class="col-9"></div>
<div class="col-2">100</div>
</div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play" id="play_pause_icon"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider" id="myRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
</div>
<!-- graph content -->
<div class="tab-pane fade" id="graph" role="tabpanel" aria-labelledby="profile-tab">
<!--card content -->
<div class="card shadow mb-4 p-3">
<!-- Card Header - Dropdown -->
<div class="card-header py-3 d-flex flex-row align-items-center justify-content-between">
<h6 class="m-0 font-weight-bold text-primary">Revenue Sources</h6>
<div class="dropdown no-arrow">
<a class="dropdown-toggle" href="#" role="button"
id="dropdownMenuLink" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
<i class="fas fa-ellipsis-v fa-sm fa-fw text-gray-400"></i>
</a>
<div class="dropdown-menu dropdown-menu-right shadow animated--fade-in"
aria-labelledby="dropdownMenuLink">
<div class="dropdown-header">Dropdown Header:</div>
<a class="dropdown-item" href="#">Action</a>
<a class="dropdown-item" href="#">Another action</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="#">Something else here</a>
</div>
</div>
</div>
<!-- Card Body -->
<div class="card-body">
<div class="chart-pie pt-4 pb-2">
<canvas id="myPieChart"></canvas>
</div>
<div class="mt-4 text-center small">
<span class="mr-2">
<i class="fas fa-circle text-primary"></i> Direct
</span>
<span class="mr-2">
<i class="fas fa-circle text-success"></i> Social
</span>
<span class="mr-2">
<i class="fas fa-circle text-info"></i> Referral
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!--end of first column -->
</div>
<!-- end of 2nd row -->
<!--3rd row -->
<div class="row p-2">
<!-- progress row -->
<div class="row p-2" id="progress_row">
<!--1st column -->
<!--second column-->
<div class="col-lg-6">
<!--first column-->
<div class="col-lg-12">
<!-- card content -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Gaze Estimation</h5>
<div class="card-header py-3 text-center">
<h5 class="m-0 font-weight-bold text-primary">Process Workflow</h5>
</div>
<div class="card-body">
<!-- no video selected messge -->
<div class="text-center" id="no_content_message_gaze">
<p>No Gaze estimation content to be displayed</p>
</div>
<!-- error message -->
<div class="text-center text-danger" id="error_message_gaze" hidden>
<p class="font-italic">No frames were found for this video</p>
</div>
<!-- loader -->
<div class="text-center" id="loader_gaze" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loading image">
</div>
<!--nav tabs-->
<ul class="nav nav-tabs nav-fill" id="nav_bar_gaze" role="tablist" hidden>
<li class="nav-item">
<a class="nav-link active" id="frame-tab_gaze" data-toggle="tab" href="#frame_gaze"
role="tab" aria-controls="frame_gaze" aria-selected="true">Frame</a>
</li>
<li class="nav-item">
<a class="nav-link" id="graph-tab_gaze" data-toggle="tab" href="#graph_gaze"
role="tab" aria-controls="graph_gaze" aria-selected="false">Graphs</a>
</li>
</ul>
<!--tab content -->
<div class="tab-content" id="tabContentDetails_gaze" hidden>
<div class="tab-pane fade show active" id="frame_gaze" role="tabpanel"
aria-labelledby="frame-tab_gaze">
<div class="text-center p-4" id="gaze_frames">
<!-- slide container -->
<div class="slidecontainer_gaze">
<div class="row">
<div class="col-1">0</div>
<div class="col-9"></div>
<div class="col-2">100</div>
</div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play" id="play_pause_icon_gaze"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myRange_gaze">
<p>No of frames: <span id="demo_gaze"></span></p>
<div class="container">
<div class="row smpl-step" style="border-bottom: 0; min-width: 500px;">
<!-- step 1 -->
<div class="col-4 smpl-step-step disabled" id="step_1">
<div class="text-center smpl-step-num font-weight-bolder">Step 1</div>
<div class="progress">
<div class="progress-bar"></div>
</div>
<a class="smpl-step-icon text-center"><i class="fa fa-chart-line"
style="font-size: 40px; padding-top: 10px; color: white"></i></a>
<div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">Perform Activity Recognition</span>
<br />
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="activity_loader" hidden>
</div>
</div>
</div>
<!-- end of step 1 -->
<!-- graph content -->
<div class="tab-pane fade" id="graph_gaze" role="tabpanel"
aria-labelledby="graph-tab_gaze">
<!--card content -->
<div class="card shadow mb-4 p-3">
<!-- Card Header - Dropdown -->
<div class="card-header py-3 d-flex flex-row align-items-center justify-content-between">
<h6 class="m-0 font-weight-bold text-primary">Gaze distribution</h6>
<div class="dropdown no-arrow">
<a class="dropdown-toggle" href="#" role="button"
id="dropdownMenuLink" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
<i class="fas fa-ellipsis-v fa-sm fa-fw text-gray-400"></i>
</a>
<div class="dropdown-menu dropdown-menu-right shadow animated--fade-in"
aria-labelledby="dropdownMenuLink">
<div class="dropdown-header">Graph options:</div>
<a class="dropdown-item" href="#">Pie</a>
<a class="dropdown-item" href="#">Bar</a>
<div class="dropdown-divider">Column</div>
<a class="dropdown-item" href="#">Line</a>
</div>
</div>
<!-- step 2 -->
<div class="col-4 smpl-step-step disabled" id="step_2">
<div class="text-center smpl-step-num font-weight-bolder">Step 3</div>
<div class="progress">
<div class="progress-bar"></div>
</div>
<!-- Card Body -->
<div class="card-body">
<div class="chart-pie pt-4 pb-2">
<canvas id="myPieChart_gaze"></canvas>
</div>
<div class="mt-4 text-center small">
<span class="mr-2">
<i class="fas fa-circle text-primary"></i> Direct
</span>
<span class="mr-2">
<i class="fas fa-circle text-success"></i> Social
</span>
<span class="mr-2">
<i class="fas fa-circle text-info"></i> Referral
</span>
</div>
<a class="smpl-step-icon text-center"><i class="fa fa-user"
style="font-size: 50px; padding-top: 10px; color: white"></i></a>
<div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">Study Student Emotions</span>
<br />
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="emotion_loader" hidden>
</div>
</div>
<!-- end of step 2 -->
</div>
</div>
</div>
</div>
</div>
<!-- end of gaze column -->
<!-- student activity column -->
<div class="col-lg-6">
<!-- card content -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Student Activity Recognition</h5>
</div>
<div class="card-body">
<!-- no video selected messge -->
<div class="text-center" id="no_content_message_activity">
<p>No Activity Recognition content to be displayed</p>
</div>
<!-- error messge -->
<div class="text-center text-danger" id="error_message_activity" hidden>
<p class="font-italic">No frames were found for this video</p>
</div>
<!-- loader -->
<div class="text-center" id="loader_activity" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loading image">
</div>
<!--nav tabs-->
<ul class="nav nav-tabs nav-fill" id="nav_bar_activity" role="tablist" hidden>
<li class="nav-item">
<a class="nav-link active" id="frame-tab_activity" data-toggle="tab" href="#frame_activity" role="tab" aria-controls="frame_activity" aria-selected="true">Frame</a>
</li>
<li class="nav-item">
<a class="nav-link" id="graph-tab_activity" data-toggle="tab" href="#graph_activity" role="tab" aria-controls="graph_activity" aria-selected="false">Graphs</a>
</li>
</ul>
<!--tab content -->
<div class="tab-content" id="tabContentDetails_activity" hidden>
<div class="tab-pane fade show active" id="frame_activity" role="tabpanel" aria-labelledby="frame-tab_activity">
<div class="text-center p-4" id="activity_frames">
<!-- slide container -->
<div class="slidecontainer_activity">
<div class="row">
<div class="col-1">0</div>
<div class="col-9"></div>
<div class="col-2">100</div>
</div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play" id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider" id="myRange_activity">
<p>No of frames: <span id="demo_activity"></span></p>
<!-- step 3 -->
<div class="col-4 smpl-step-step disabled" id="step_3">
<div class="text-center smpl-step-num font-weight-bolder">Step 3</div>
<div class="progress">
<div class="progress-bar"></div>
</div>
</div>
</div>
<!-- graph content -->
<div class="tab-pane fade" id="graph_activity" role="tabpanel" aria-labelledby="graph-tab_gaze">
<!--card content -->
<div class="card shadow mb-4 p-3">
<!-- Card Header - Dropdown -->
<div class="card-header py-3 d-flex flex-row align-items-center justify-content-between">
<h6 class="m-0 font-weight-bold text-primary">Activity Distribution</h6>
<div class="dropdown no-arrow">
<a class="dropdown-toggle" href="#" role="button"
id="dropdownMenuLink" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
<i class="fas fa-ellipsis-v fa-sm fa-fw text-gray-400"></i>
</a>
<div class="dropdown-menu dropdown-menu-right shadow animated--fade-in"
aria-labelledby="dropdownMenuLink">
<div class="dropdown-header">Graph options:</div>
<a class="dropdown-item" href="#">Pie</a>
<a class="dropdown-item" href="#">Bar</a>
<div class="dropdown-divider">Column</div>
<a class="dropdown-item" href="#">Line</a>
</div>
</div>
</div>
<!-- Card Body -->
<div class="card-body">
<div class="chart-pie pt-4 pb-2">
<canvas id="myPieChart"></canvas>
</div>
<div class="mt-4 text-center small">
<span class="mr-2">
<i class="fas fa-circle text-primary"></i> Direct
</span>
<span class="mr-2">
<i class="fas fa-circle text-success"></i> Social
</span>
<span class="mr-2">
<i class="fas fa-circle text-info"></i> Referral
</span>
</div>
<a class="smpl-step-icon">
<i class="fa fa-eye"
style="font-size: 60px; padding-left: 7px; padding-top: 5px; color: white;"></i>
</a>
<div class="smpl-step-info text-center">
<span class="font-italic font-weight-bolder">See students' Gazes</span>
<br />
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader" class="mt-2" id="gaze_loader" hidden>
</div>
</div>
<!-- end of step 3 -->
</div>
{# <!-- simulation button row -->#}
{# <div class="row">#}
{# <button type="button" class="btn btn-outline-danger" id="simulate_process">Simulate</button>#}
{# </div>#}
</div>
<!-- end of container -->
</div>
</div>
</div>
<!--end of student activity column -->
<!-- end of 1st column -->
</div>
<!--end of 3rd row -->
<!-- end of progress row -->
</div>
{% endblock %}
......@@ -781,24 +528,24 @@
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="/logout">Logout</a>
</div>
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="/logout">Logout</a>
</div>
</div>
</div>
</div>
{% endblock %}
</body>
......
......@@ -41,6 +41,9 @@ urlpatterns = [
# tables view
path('tables', views.tables),
# test view (delete later)
path('test', views.test),
url(r'^register', views.RegisterViewSet),
# re_path('video/?video_name<str:video_name>', views.video),
url(r'^teachers/', views.teachersList.as_view()),
......@@ -75,7 +78,6 @@ urlpatterns = [
# timetable API
url(r'^timetable', api.FacultyTimetableViewSet.as_view()),
##### VIDEO Section #####
# lecture video API
......@@ -84,6 +86,8 @@ urlpatterns = [
# lecture video API (to retrieve a lecture)
url(r'^get-lecture-video/$', api.GetLectureVideoViewSet.as_view()),
# lecture video API (to retrieve a lecture)
url(r'^get-lecture-video-for-home/$', api.GetLectureVideoViewSetForHome.as_view()),
##### ACTIVITIES API #####
......@@ -112,6 +116,9 @@ urlpatterns = [
url(r'^get-lecture-activity-individual-student-evaluation/$',
api.GetLectureActivityIndividualStudentEvaluation.as_view()),
# lecture activity report generation
url(r'^lecture-activity-report-generation/$',
api.GenerateActivityReport.as_view()),
###### EMOTION Section #####
# getting lecture emotion record availability
......@@ -133,7 +140,6 @@ urlpatterns = [
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section #####
# lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
......@@ -147,6 +153,41 @@ urlpatterns = [
# lecture video individual student process pose estimation API (for Pose estimation)
url(r'^process-lecture-video-individual-pose-estimation', api.ProcessIndividualStudentPoseEstimation.as_view()),
##### GAZE Section #####
# lecture video Gaze estimation
url(r'^get-lecture-video-gaze-estimation-availability/$', api.GetLectureGazeEstimationAvailaibility.as_view()),
# process a lecture Gaze estimation
url(r'^process-lecture-gaze-estimation/$', api.ProcessLectureGazeEstimation.as_view()),
# retrieve a Lecture Gaze estimation
url(r'^get-lecture-gaze-estimation/$', api.GetLectureGazeEstimationViewSet.as_view()),
# lecture gaze estimation for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-gaze-estimation-for-frame/$', api.GetLectureGazeEstimationForFrames.as_view()),
#####===== DATA VISUALIZATION =====#####
##### VIEW STUDENT BEHAVIOR SUMMARY SECTION #####
# retrieves student behavior summary for specified time period
url(r'^get-student-behavior-summary-for-period/$', api.GetStudentBehaviorSummaryForPeriod.as_view()),
# retrieves lecture video summary time landmarks
url(r'^get-lecture-video-summary-time-landmarks/$', api.GetLectureVideoSummaryTimeLandmarks.as_view()),
# retrieves lecture activity summary
url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()),
# retrieves lecture activity summary
url(r'^get-lecture-emotion-summary/$', api.GetLectureEmotionSummary.as_view()),
# retrieves lecture activity summary
url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()),
# routers
# path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
......
......@@ -20,7 +20,7 @@ from . logic import video_extraction
from . forms import *
import cv2
import os
import datetime
from datetime import datetime
# hashing
......@@ -113,27 +113,85 @@ class LectureViewSet(APIView):
def hello(request):
username = request.user.username
# retrieve the lecturer
lecturer = request.session['lecturer']
# retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter()
# serialize the timetable
lecturer_timetable_serialized = FacultyTimetableSerializer(lecturer_timetable, many=True)
lecturer_details = []
# loop through the serialized timetable
for timetable in lecturer_timetable_serialized.data:
# retrieve daily timetable
daily_timetable = timetable['timetable']
# loop through the daily timetable
for day_timetable in daily_timetable:
date = ''
lecture_index = 0
# loop through each timeslots
for slots in day_timetable:
if slots == "date":
date = day_timetable[slots]
elif slots == "time_slots":
slot = day_timetable[slots]
# loop through each slot
for lecture in slot:
# check whether the lecturer is the current lecturer
if lecturer == lecture['lecturer']['id']:
lecturer_lecture_details = {}
lecturer_lecture_details['date'] = date
lecturer_lecture_details['start_time'] = lecture['start_time']
lecturer_lecture_details['end_time'] = lecture['end_time']
lecturer_lecture_details['subject_name'] = lecture['subject']['name']
lecturer_lecture_details['index'] = lecture_index
lecturer_lecture_details['lecturer'] = lecture['lecturer']['id']
# append to the lecturer_details
lecturer_details.append(lecturer_lecture_details)
# increment the index
lecture_index += 1
# sorting the dates in lecturer_details list
# for details in lecturer_details:
lecturer_details.sort(key=lambda date: datetime.strptime(str(date['date']), "%Y-%m-%d"), reverse=True)
obj = {'Message': 'Student and Lecturer Performance Enhancement System', 'username': username}
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
videos = []
durations = []
for videoPath in videoPaths:
video = Video()
cap = cv2.VideoCapture(videoPath)
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = int(frame_count / fps)
durations.append(duration)
videoName = os.path.basename(videoPath)
# videoName = videos.append(os.path.basename(videoPath))
durationObj = datetime.timedelta(seconds=duration)
video.path = videoPath
video.name = videoName
video.duration = str(durationObj)
videos.append(video)
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html'}
#
# for videoPath in videoPaths:
# video = Video()
# cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# duration = int(frame_count / fps)
# durations.append(duration)
# videoName = os.path.basename(videoPath)
# # videoName = videos.append(os.path.basename(videoPath))
# durationObj = datetime.timedelta(seconds=duration)
# video.path = videoPath
# video.name = videoName
# video.duration = str(durationObj)
# videos.append(video)
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context)
def view404(request):
......@@ -302,64 +360,87 @@ def child(request):
# displaying video results
@login_required(login_url='/login')
def video_result(request):
try:
# retrieving data from the db
lecturer = request.session['lecturer']
to_do_lecture_list = []
due_lecture_list = []
lecturer_videos = LectureVideo.objects.filter(lecturer_id=lecturer)
serializer = LectureVideoSerializer(lecturer_videos, many=True)
data = serializer.data
# iterate through the existing lecture videos for the lecturer
for video in data:
video_id = video['id']
date = video['date']
subject = video['subject']['id']
# check whether the video id exist in the Activity Recognition table
lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists()
if lec_activity == False:
to_do_lecture_list.append({
"lecturer": lecturer,
"date": date,
"subject": subject,
"video_id": video['id'],
"video_name": video['video_name']
})
# once the lectures that needs to be processed are found out, extract the corresponding timetable details
# retrieve the lecturer's timetable slots
lecturer_timetable = FacultyTimetable.objects.filter()
# serialize the timetable
lecturer_timetable_serialized = FacultyTimetableSerializer(lecturer_timetable, many=True)
# loop through the serialized timetable
for timetable in lecturer_timetable_serialized.data:
# retrieve daily timetable
daily_timetable = timetable['timetable']
# loop through the daily timetable
for day_timetable in daily_timetable:
# print('day timetable" ', day_timetable)
# loop through the to-do lecture list
for item in to_do_lecture_list:
isDate = item['date'] == str(day_timetable['date'])
# isLecturer = item['lecturer'] ==
# check for the particular lecture on the day
if isDate:
slots = day_timetable['time_slots']
# loop through the slots
for slot in slots:
# check for the lecturer and subject
isLecturer = item['lecturer'] == slot['lecturer']['id']
isSubject = item['subject'] == slot['subject']['id']
if isLecturer & isSubject:
obj = {}
obj['date'] = item['date']
obj['subject'] = slot['subject']['subject_code']
obj['subject_name'] = slot['subject']['name']
obj['start_time'] = slot['start_time']
obj['end_time'] = slot['end_time']
obj['video_id'] = item['video_id']
obj['video_name'] = item['video_name']
# append to the list
due_lecture_list.append(obj)
lecturer_subjects = LecturerSubject.objects.filter(lecturer_id_id=lecturer)
lec_sub_serilizer = LecturerSubjectSerializer(lecturer_subjects, many=True)
subject_list = []
subjects = lec_sub_serilizer.data[0]['subjects']
for sub in subjects:
subject = Subject.objects.filter(id=sub)
subject_serialized = SubjectSerializer(subject, many=True)
subject_list.append(subject_serialized.data)
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
videos = []
durations = []
# setting up the first video details
first_video_path = videoPaths[0]
first_video = Video()
cap = cv2.VideoCapture(first_video_path)
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = int(frame_count / fps)
videoName = os.path.basename(first_video_path)
durationObj = datetime.timedelta(seconds=duration)
first_video.path = first_video_path
first_video.name = videoName
first_video.duration = str(durationObj)
for videoPath in videoPaths:
video = Video()
cap = cv2.VideoCapture(videoPath)
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = int(frame_count / fps)
durations.append(duration)
videoName = os.path.basename(videoPath)
durationObj = datetime.timedelta(seconds=duration)
video.path = videoPath
video.name = videoName
video.duration = str(durationObj)
videos.append(video)
context = {'Videos': videos, 'firstVideo': first_video, 'durations': durations, 'lecturer_subjects': lecturer_subjects, 'subjects': subject_list,
'template_name': 'FirstApp/template.html'}
except Exception as ex:
except Exception as exc:
print('what is wrong?: ', exc)
return redirect('/500')
return render(request, 'FirstApp/video_results.html', context)
return render(request, "FirstApp/video_results.html",
{"lecturer": lecturer, "due_lectures": due_lecture_list})
# view for emotion page
......@@ -437,6 +518,7 @@ def view500(request):
def tables(request):
return render(request, "FirstApp/tables.html")
@login_required(login_url='/login')
def activity(request):
try:
......@@ -459,4 +541,8 @@ def activity(request):
except Exception as exc:
return redirect('/500')
return render(request, "FirstApp/activity.html", {"lecturer_subjects": lecturer_subjects, "subjects": subject_list, "lecturer": lecturer})
\ No newline at end of file
return render(request, "FirstApp/activity.html", {"lecturer_subjects": lecturer_subjects, "subjects": subject_list, "lecturer": lecturer})
def test(request):
return render(request, "FirstApp/pdf_template.html")
\ No newline at end of file
import nltk
read_lines = [line.rstrip('\n') for line in open("audioToText01.txt", "r")]
sentences_list = []
sentence_list = nltk.sent_tokenize(read_lines)
word_search = "important"
sentences_with_word = []
for sentence in sentences_list:
if sentence.count(word_search)>0:
sentences_with_word.append(sentence)
words_search = ["exam", "assignment"]
word_sentence_dictionary = {"exam":[],"assignment":[]}
for word in words_search:
sentences_with_word = []
for sentence in sentences_list:
if sentence.count(word)>0:
sentences_with_word.append(sentence)
word_sentence_dictionary[word] = sentences_with_word
\ No newline at end of file
import spacy
from spacy.lang.pt.stop_words import STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer
import pt_core_news_sm
nlp = pt_core_news_sm.load()
with open("audioToText01.txt", "r", encoding="utf-8") as f:
text = " ".join(f.readlines())
doc = nlp(text)
corpus = [sent.text.lower() for sent in doc.sents ]
cv = CountVectorizer(stop_words=list(STOP_WORDS))
cv_fit=cv.fit_transform(corpus)
word_list = cv.get_feature_names()
count_list = cv_fit.toarray().sum(axis=0)
word_frequency = dict(zip(word_list,count_list))
val=sorted(word_frequency.values())
higher_word_frequencies = [word for word,freq in word_frequency.items() if freq in val[-3:]]
print("\nWords with higher frequencies: ", higher_word_frequencies)
# gets relative frequency of words
higher_frequency = val[-1]
for word in word_frequency.keys():
word_frequency[word] = (word_frequency[word]/higher_frequency)
sentence_rank={}
for sent in doc.sents:
for word in sent :
if word.text.lower() in word_frequency.keys():
if sent in sentence_rank.keys():
sentence_rank[sent]+=word_frequency[word.text.lower()]
else:
sentence_rank[sent]=word_frequency[word.text.lower()]
top_sentences=(sorted(sentence_rank.values())[::-1])
top_sent=top_sentences[:3]
summary=[]
for sent,strength in sentence_rank.items():
if strength in top_sent:
summary.append(sent)
else:
continue
for i in summary:
file = open('Summary01.txt', 'w')
file.write(str(i))
file.close()
\ No newline at end of file
......@@ -4,4 +4,7 @@ from django.contrib import admin
from LectureSummarizingApp.models import *
admin.site.register(LectureAudio)
admin.site.register(LectureAudioNoiseRemoved)
admin.site.register(LectureSpeechToText)
admin.site.register(LectureNotices)
admin.site.register(LectureAudioSummary)
from rest_framework.views import APIView
from rest_framework.response import Response
from LectureSummarizingApp.models import LectureAudio
from LectureSummarizingApp.serializer import LectureAudioSerializer
from LectureSummarizingApp.models import LectureAudio, LectureAudioNoiseRemoved, LectureSpeechToText, \
LectureAudioSummary, LectureNotices
from LectureSummarizingApp.serializer import LectureAudioSerializer, LectureAudioNoiseRemovedSerializer, \
LectureSpeechToTextSerializer, LectureAudioSummarySerializer, LectureNoticesSerializer
from . import speech_to_text as stt
# this API will retrieve lecture audio details
class LectureAudioAPI(APIView):
def get(self, request):
lecture_audio = LectureAudio.objects.all()
lecture_audio = LectureAudio.objects.all().order_by('lecturer_date')
lecture_audio_serializer = LectureAudioSerializer(lecture_audio, many=True)
return Response(lecture_audio_serializer.data)
class audioNoiseRemovedList(APIView):
def get(self, request):
lecture_audio_noise_removed = LectureAudioNoiseRemoved.objects.all()
serializer = LectureAudioNoiseRemovedSerializer(lecture_audio_noise_removed, many=True)
return Response(serializer.data)
def post(self, request):
LectureAudioNoiseRemoved(
lecture_audio_noise_removed_id=request.data["lecture_audio_noise_removed_id"],
lecture_audio_id=request.data["lecture_audio_id"],
lecturer_date=request.data["lecturer_date"],
lecture_audio_name=request.data["lecture_audio_name"],
lecture_audio_length=request.data["lecture_audio_length"]
).save()
return Response({"response": request.data})
data = lecture_audio_serializer.data
class audioToTextList(APIView):
def get(self, request):
lecture_speech_to_text_id = LectureSpeechToText.objects.all()
serializer = LectureSpeechToTextSerializer(lecture_speech_to_text_id, many=True)
# return Response(serializer.data)
video_name = request.query_params.get("video_name")
print('video name: ', video_name)
stt.speech_to_text(video_name)
return Response({
"response": data
})
\ No newline at end of file
"response": "successful"
})
def post(self, request):
# video_name = request.data["video_name"]
#
# print('video name: ', video_name)
#
# stt.speech_to_text(video_name)
LectureSpeechToText(
lecture_speech_to_text_id=request.data["lecture_speech_to_text_id"],
lecture_audio_id=request.data["lecture_audio_id"],
audio_original_text=request.data["audio_original_text"]
).save()
return Response({"response": request.data})
class lectureSummaryList(APIView):
def get(self, request):
lecture_audio_summary_id = LectureAudioSummary.objects.all()
serializer = LectureAudioSummarySerializer(lecture_audio_summary_id, many=True)
return Response(serializer.data)
def post(self, request):
LectureAudioSummary(
lecture_speech_to_text_id=request.data["lecture_speech_to_text_id"],
lecture_audio_id=request.data["lecture_audio_id"],
audio_original_text=request.data["audio_original_text"],
audio_summary=request.data["audio_summary"]
).save()
return Response({"response": request.data})
class lectureNoticeList(APIView):
def get(self, request):
lecture_notice_id = LectureNotices.objects.all()
serializer = LectureNoticesSerializer(lecture_notice_id, many=True)
return Response(serializer.data)
def post(self, request):
LectureNotices(
lecture_notice_id=request.data["lecture_notice_id"],
lecture_audio_id=request.data["lecture_audio_id"],
notice_text=request.data["notice_text"]
).save()
return Response({"response": request.data})
error
\ No newline at end of file
# Generated by Django 2.2.12 on 2020-09-22 18:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0001_initial'),
('LectureSummarizingApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LectureAudioNoiseRemoved',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_audio_id', models.CharField(max_length=10)),
('lecturer_date', models.DateField()),
('lecture_audio_name', models.CharField(max_length=50)),
('lecture_audio_length', models.DurationField()),
('lecturer', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Lecturer')),
('subject', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Subject')),
],
),
]
# Generated by Django 2.2.12 on 2020-09-22 18:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('LectureSummarizingApp', '0002_lectureaudionoiseremoved'),
]
operations = [
migrations.RenameField(
model_name='lectureaudionoiseremoved',
old_name='lecture_audio_id',
new_name='lecture_audio_noise_removed_id',
),
]
# Generated by Django 2.2.12 on 2020-09-23 04:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('LectureSummarizingApp', '0003_auto_20200923_0002'),
]
operations = [
migrations.RemoveField(
model_name='lectureaudionoiseremoved',
name='lecturer',
),
migrations.RemoveField(
model_name='lectureaudionoiseremoved',
name='subject',
),
migrations.AddField(
model_name='lectureaudionoiseremoved',
name='lecture_audio_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='LectureSummarizingApp.LectureAudio'),
),
migrations.CreateModel(
name='LectureSpeechToText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_speech_to_text_id', models.CharField(max_length=10)),
('audio_original_text', models.TextField()),
('lecture_audio_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='LectureSummarizingApp.LectureAudio')),
],
),
migrations.CreateModel(
name='LectureNotices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_notice_id', models.CharField(max_length=10)),
('notice_text', models.TextField()),
('lecture_audio_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='LectureSummarizingApp.LectureAudio')),
],
),
]
......@@ -16,6 +16,27 @@ class LectureAudio (models.Model):
def __str__(self):
return self.lecture_audio_id
class LectureAudioNoiseRemoved (models.Model):
lecture_audio_noise_removed_id = models.CharField(max_length=10)
lecture_audio_id = models.ForeignKey(LectureAudio, on_delete=models.CASCADE, default=0)
lecturer_date = models.DateField()
lecture_audio_name = models.CharField(max_length=50)
lecture_audio_length = models.DurationField()
def __str__(self):
return self.lecture_audio_noise_removed_id
class LectureSpeechToText (models.Model):
lecture_speech_to_text_id = models.CharField(max_length=10)
lecture_audio_id = models.ForeignKey(LectureAudio, on_delete=models.CASCADE)
audio_original_text = models.TextField()
def __str__(self):
return self.lecture_speech_to_text_id
class LectureAudioSummary (models.Model):
lecture_audio_summary_id = models.CharField(max_length=10)
lecture_audio_id = models.ForeignKey(LectureAudio, on_delete=models.CASCADE)
......@@ -24,3 +45,13 @@ class LectureAudioSummary (models.Model):
def __str__(self):
return self.lecture_audio_summary_id
class LectureNotices (models.Model):
lecture_notice_id = models.CharField(max_length=10)
lecture_audio_id = models.ForeignKey(LectureAudio, on_delete=models.CASCADE)
notice_text = models.TextField()
def __str__(self):
return self.lecture_notice_id
import librosa
from pysndfx import AudioEffectsChain
import numpy as np
import math
import python_speech_features
import scipy as sp
from scipy import signal
import soundfile
def read_file(file_name):
sample_file = file_name
sample_directory = 'lectures/'
sample_path = sample_directory + sample_file
# generating audio time series and a sampling rate (int)
y, sr = librosa.load(sample_path)
return y, sr
# '''CENTROID'''
#
# def reduce_noise_centroid_s(y, sr):
#
# cent = librosa.feature.spectral_centroid(y=y, sr=sr)
# threshold_h = np.max(cent)
# threshold_l = np.min(cent)
# less_noise = AudioEffectsChain().lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5).limiter(gain=6.0)
# y_cleaned = less_noise(y)
# return y_cleaned
'''MFCC'''
def mffc_highshelf(y, sr):
mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.lifter(mfcc)
sum_of_squares = []
index = -1
for r in mfcc:
sum_of_squares.append(0)
index = index + 1
for n in r:
sum_of_squares[index] = sum_of_squares[index] + n**2
strongest_frame = sum_of_squares.index(max(sum_of_squares))
hz = python_speech_features.base.mel2hz(mfcc[strongest_frame])
max_hz = max(hz)
min_hz = min(hz)
speech_booster = AudioEffectsChain().highshelf(frequency=min_hz*(-1)*1.2, gain=-12.0, slope=0.6).limiter(gain=8.0)
y_speach_boosted = speech_booster(y)
return (y_speach_boosted)
def mfcc_lowshelf(y, sr):
mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.lifter(mfcc)
sum_of_squares = []
index = -1
for r in mfcc:
sum_of_squares.append(0)
index = index + 1
for n in r:
sum_of_squares[index] = sum_of_squares[index] + n**2
strongest_frame = sum_of_squares.index(max(sum_of_squares))
hz = python_speech_features.base.mel2hz(mfcc[strongest_frame])
max_hz = max(hz)
min_hz = min(hz)
speech_booster = AudioEffectsChain().lowshelf(frequency=min_hz*(-1), gain=12.0, slope=0.5)
y_speach_boosted = speech_booster(y)
return (y_speach_boosted)
def trim_silence(y):
y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500)
trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)
return y_trimmed, trimmed_length
def enhance(y):
apply_audio_effects = AudioEffectsChain().lowshelf(gain=10.0, frequency=260, slope=0.1).reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)#.normalize()
y_enhanced = apply_audio_effects(y)
return y_enhanced
def output_file(destination ,filename, y, sr, ext=""):
destination = destination + filename[:-4] + ext + '.wav'
librosa.output.write_wav(destination, y, sr)
lectures = ['Lecture01.wav']
for s in lectures:
filename = s
y, sr = read_file(filename)
# y_reduced_centroid_s = reduce_noise_centroid_s(y, sr)
y_reduced_mfcc_lowshelf = mfcc_lowshelf(y, sr)
y_reduced_mfcc_highshelf = mffc_highshelf(y, sr)
# trimming silences
# y_reduced_centroid_s, time_trimmed = trim_silence(y_reduced_centroid_s)
y_reduced_mfcc_up, time_trimmed = trim_silence(mfcc_lowshelf)
y_reduced_mfcc_down, time_trimmed = trim_silence(mffc_highshelf)
# output_file('lectures_trimmed_noise_reduced/' ,filename, y_reduced_centroid_s, sr, '_ctr_s')
output_file('lectures_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_up, sr, '_mfcc_up')
# output_file('lectures_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_down, sr, '_mfcc_down')
# output_file('lectures_trimmed_noise_reduced/' ,filename, y, sr, '_org')
......@@ -14,9 +14,37 @@ class LectureAudioSerializer(serializers.ModelSerializer):
fields = '__all__'
class LectureAudioNoiseRemovedSerializer(serializers.ModelSerializer):
lecture_audio_id = LectureAudioSerializer()
class Meta:
model = LectureAudioNoiseRemoved
fields = '__all__'
class LectureSpeechToTextSerializer(serializers.ModelSerializer):
# lecture_speech_to_text_id = LectureAudioNoiseRemovedSerializer()
lecture_audio_id = LectureAudioSerializer()
class Meta:
model = LectureSpeechToText
fields = '__all__'
class LectureAudioSummarySerializer(serializers.ModelSerializer):
# lecture_audio_noise_removed_id = LectureSpeechToTextSerializer()
lecture_audio_id = LectureAudioSerializer()
class Meta:
model = LectureAudioSummary
fields = '__all__'
class LectureNoticesSerializer(serializers.ModelSerializer):
# lecture_audio_noise_removed_id = LectureSpeechToTextSerializer()
lecture_audio_id = LectureAudioSerializer()
class Meta:
# model = LectureAudioSummary
model = LectureNotices
fields = '__all__'
\ No newline at end of file
import speech_recognition as sr
import os
def speech_to_text(video_name):
r = sr.Recognizer()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
VIDEO_PATH = os.path.join(BASE_DIR, "lectures\\{}".format(video_name))
with sr.AudioFile(VIDEO_PATH) as source:
audio = r.listen(source)
file = open('audioToText01.txt', 'w')
try:
text = r.recognize_google(audio)
file.write(text)
except:
file.write('error')
file.close()
\ No newline at end of file
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Summarization</title>
</head>
<body>
{% block javascript %}
{% load static %}
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Page level plugins -->
<script src="{% static 'FirstApp/vendor/datatables/jquery.dataTables.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.js' %}"></script>
<!-- Page level custom scripts -->
<script src="{% static 'FirstApp/js/demo/datatables-demo.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Load TensorFlow.js -->
<script src="https://unpkg.com/@tensorflow/tfjs"></script>
<!-- Load Posenet -->
<script src="https://unpkg.com/@tensorflow-models/posenet">
</script>
<script type="text/javascript">
$(document).ready(function() {
<!-- speech to text-->
$('.audio_to_text_process').click(function() {
alert('Processing');
//call the fetch API
fetch('http://127.0.0.1:8000/summary/lecture-audio-to-text/?video_name=Lecture01.wav')
.then((res) => res.json())
.then((out) => alert(out.response))
.catch((err) => alert('error: ' + err))
});
<!-- background noise-->
$('.audio_process').click(function() {
alert('Processing');
//call the fetch API
fetch('http://127.0.0.1:8000/summary/lecture-audio-to-text/?video_name=Lecture01.wav')
.then((res) => res.json())
.then((out) => alert(out.response))
.catch((err) => alert('error: ' + err))
});
<!-- To summary-->
$('.to_summary').click(function() {
alert('Processing');
//call the fetch API
fetch('http://127.0.0.1:8000/summary/lecture-audio-to-text/?video_name=Lecture01.wav')
.then((res) => res.json())
.then((out) => alert(out.response))
.catch((err) => alert('error: ' + err))
});
});
</script>
{% endblock %}
<div id="wrapper">
<div id="content-wrapper" class="d-flex flex-column">
<div id="content">
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Lecture Summarization</h1>
</div>
<!--first row -->
<div class="row p-2">
<!--first column -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Lecture Recording</h5>
</div>
<!--card body -->
<div class="card-body">
{% if lecture_audio_id.count == 0 %}
<div class="text-center">
<span class="font-italic">No Recordings</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<th>Module</th>
<th>Date</th>
<th>Recording Name</th>
<th></th>
</tr>
</thead>
<tbody>
{% for lec_audio in lec_audio_data %}
<tr class="recordings not_clicked" id="{{ lec_audio.lecture_audio_id }}">
<!-- <td>-->
<!-- <div class="radio">-->
<!-- <label><input type="radio"-->
<!-- id="{{ lec_audio.lecture_audio_id }}"-->
<!-- name="recording_radio"-->
<!-- data-name="{{ lec_audio.lecture_audio_name }}"-->
<!-- ></label>-->
<!-- </div>-->
<!-- </td>-->
<td>{{ lec_audio.subject.name }}</td>
<td>{{ lec_audio.lecturer_date }}</td>
<td>{{ lec_audio.lecture_audio_name }}</td>
<td>
<button TYPE="button" class="btn btn-success audio_process">Process
</button>
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!-- end of 1st column -->
<!-- 2nd column -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Lecture Recording (Noise-Removed)</h5>
</div>
<!--card body -->
<div class="card-body">
{% if noiseless_data.count == 0 %}
<div class="text-center">
<span class="font-italic">No Recordings</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<!-- <th></th>-->
<th>Module</th>
<th>Date</th>
<th>Recording Name</th>
<th></th>
</tr>
</thead>
<tbody>
{% for noiseless_audio in noiseless_data %}
<tr class="recordings not_clicked" id="{{ noiseless_audio.lecture_audio_id }}">
<!-- <td>-->
<!-- <div class="radio">-->
<!-- <label><input type="radio"-->
<!-- id="{{ noiseless_audio.lecture_audio_id }}"-->
<!-- name="recording_radio"-->
<!-- data-name="{{ noiseless_audio.lecture_audio_name }}"-->
<!-- ></label>-->
<!-- </div>-->
<!-- </td>-->
<td>{{ noiseless_audio.lecture_audio_id.subject.name }}</td>
<td>{{ noiseless_audio.lecture_audio_id.lecturer_date }}</td>
<td>{{ noiseless_audio.lecture_audio_name }}</td>
<td>
<button type="button"
class="btn btn-success audio_to_text_process">Convert
</button>
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!-- end of 2nd column -->
</div>
<!-- end of 1st row-->
<!--2ND row -->
<div class="row p-2">
<!--first column -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Converted Lecture (Text)</h5>
</div>
<!--card body -->
<div class="card-body">
{% if lecture_audio_id.count == 0 %}
<div class="text-center">
<span class="font-italic">No Recordings</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<th>Module</th>
<th>Date</th>
<th>Text ID</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
{% for lec_text in lecture_text_data %}
<tr class="recordings not_clicked" id="{{ lec_text.lecture_audio_id }}">
<!-- <td>-->
<!-- <div class="radio">-->
<!-- <label><input type="radio"-->
<!-- id="{{ lec_text.lecture_audio_id }}"-->
<!-- name="recording_radio"-->
<!-- data-name="{{ lec_text.lecture_audio_name }}"-->
<!-- ></label>-->
<!-- </div>-->
<!-- </td>-->
<td>{{ lec_text.lecture_audio_id.subject.name }}</td>
<td>{{ lec_text.lecture_audio_id.lecturer_date }}</td>
<td>{{ lec_text.lecture_speech_to_text_id }}</td>
<td>
<button TYPE="button" class="btn btn-success to_summary">Summary
</button>
</td>
<td>
<button TYPE="button" class="btn btn-danger get_notices">Notices
</button>
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!-- end of 1st column -->
</div>
<!-- end of 2nd row-->
<!--3rd row -->
<div class="row p-2">
<!--first column -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Lecture Summary</h5>
</div>
<!--card body -->
<div class="card-body">
{% if lecture_audio_summary_id.count == 0 %}
<div class="text-center">
<span class="font-italic">No Summaries</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<th>Module</th>
<th>Date</th>
<th>Summary</th>
</tr>
</thead>
<tbody>
{% for lec_summary in lec_summary_data %}
<tr class="recordings not_clicked" id="{{ lec_summary.lecture_audio_id }}">
<!-- <td>-->
<!-- <div class="radio">-->
<!-- <label><input type="radio"-->
<!-- id="{{ lec_summary.lecture_audio_id }}"-->
<!-- name="recording_radio"-->
<!-- data-name="{{ lec_summary.lecture_audio_name }}"-->
<!-- ></label>-->
<!-- </div>-->
<!-- </td>-->
<td>{{ lec_summary.lecture_audio_id.subject.name }}</td>
<td>{{ lec_summary.lecture_audio_id.lecturer_date }}</td>
<td>{{ lec_summary.lecture_audio_summary_id }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!-- end of 1st column -->
<!-- 2nd column -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Lecture Notices</h5>
</div>
<!--card body -->
<div class="card-body">
{% if noiseless_data.count == 0 %}
<div class="text-center">
<span class="font-italic">No Recordings</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<th>Module</th>
<th>Date</th>
<th>Notices</th>
</tr>
</thead>
<tbody>
{% for lec_notice in lec_notice_data %}
<tr class="recordings not_clicked" id="{{ lec_notice.lecture_audio_id }}">
<!-- <td>-->
<!-- <div class="radio">-->
<!-- <label><input type="radio"-->
<!-- id="{{ lec_notice.lecture_audio_id }}"-->
<!-- name="recording_radio"-->
<!-- data-name="{{ noiseless_audio.lecture_audio_name }}"-->
<!-- ></label>-->
<!-- </div>-->
<!-- </td>-->
<td>{{ lec_notice.lecture_audio_id.subject.name }}</td>
<td>{{ lec_notice.lecture_audio_id.lecturer_date }}</td>
<td>{{ lec_notice.lecture_notice_id }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!-- end of 2nd column -->
{% endblock %}
</div>
</div>
</div>
</body>
</html>
\ No newline at end of file
......@@ -8,6 +8,7 @@ router = routers.DefaultRouter()
# router.register(r'^register', views.register)
urlpatterns = [
path('lecture', views.summarization),
# path('', views.hello),
# path('login', views.login),
# path('register', views.register),
......@@ -25,7 +26,15 @@ urlpatterns = [
# # path('Video', views.hello)
# API to retrieve activity recognition
url(r'^get-lecture-audio/$', api.LectureAudioAPI.as_view()),
url(r'^lecture-audio/$', api.LectureAudioAPI.as_view()),
url(r'^lecture-audio-noise-removed/$', api.audioNoiseRemovedList.as_view()),
url(r'^lecture-audio-to-text/', api.audioToTextList.as_view()),
url(r'^lecture-summary/$', api.lectureSummaryList.as_view()),
url(r'^lecture-notices/$', api.lectureNoticeList.as_view()),
# # API to retrieve audio analysis
# url(r'^get-audio-analysis', api.GetLectureAudioAnalysis.as_view()),
......
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import viewsets
from .models import LectureAudio, LectureAudioNoiseRemoved, LectureSpeechToText, LectureAudioSummary, LectureNotices
from .serializer import LectureAudioSerializer, LectureAudioNoiseRemovedSerializer, LectureAudioSummarySerializer, \
LectureSpeechToTextSerializer, LectureNoticesSerializer
# Create your views here.
def summarization(request):
lec_audio = LectureAudio.objects.all()
lec_audio_serializer = LectureAudioSerializer(lec_audio, many=True)
data = lec_audio_serializer.data
lec_noiseless_audio = LectureAudioNoiseRemoved.objects.all()
lec_noiseless_audio_ser = LectureAudioNoiseRemovedSerializer(lec_noiseless_audio, many=True)
noiseless_data = lec_noiseless_audio_ser.data
lec_text = LectureSpeechToText.objects.all()
lec_text_ser = LectureSpeechToTextSerializer(lec_text, many=True)
lecture_text_data = lec_text_ser.data
lec_summary = LectureAudioSummary.objects.all()
lec_summary_ser = LectureAudioSummarySerializer(lec_summary, many=True)
lec_summary_data = lec_summary_ser.data
lec_notice = LectureNotices.objects.all()
lec_notice_ser = LectureNoticesSerializer(lec_notice, many=True)
lec_notice_data = lec_notice_ser.data
return render(request, "LectureSummarizingApp/summarization.html", {"lec_audio_data": data, "noiseless_data": noiseless_data,"lecture_text_data": lecture_text_data, "lec_summary_data" : lec_summary_data, "lec_notice_data":lec_notice_data})
class audioList(APIView):
def get(self, request):
lecture_audio = LectureAudio.objects.all()
serializer = LectureAudioSerializer(lecture_audio, many=True)
return Response(serializer.data)
def post(self):
pass
class audioNoiseRemovedList(APIView):
def get(self, request):
lecture_audio_noise_removed = LectureAudioNoiseRemoved.objects.all()
serializer = LectureAudioNoiseRemovedSerializer(lecture_audio_noise_removed, many=True)
return Response(serializer.data)
def post(self, request):
LectureAudioNoiseRemoved(
lecture_audio_noise_removed_id=request.data["lecture_audio_noise_removed_id"],
lecture_audio_id=request.data["lecture_audio_id"],
lecturer_date=request.data["lecturer_date"],
lecture_audio_name=request.data["lecture_audio_name"],
lecture_audio_length=request.data["lecture_audio_length"]
).save()
return Response({"response": request.data})
class audioToTextList(APIView):
def get(self, request):
lecture_speech_to_text_id = LectureSpeechToText.objects.all()
serializer = LectureSpeechToTextSerializer(lecture_speech_to_text_id, many=True)
return Response(serializer.data)
def post(self, request):
LectureSpeechToText(
lecture_speech_to_text_id=request.data["lecture_speech_to_text_id"],
lecture_audio_id=request.data["lecture_audio_id"],
audio_original_text=request.data["audio_original_text"]
).save()
return Response({"response": request.data})
class lectureSummaryList(APIView):
def get(self, request):
lecture_audio_summary_id = LectureAudioSummary.objects.all()
serializer = LectureAudioSummarySerializer(lecture_audio_summary_id, many=True)
return Response(serializer.data)
def post(self, request):
LectureAudioSummary(
lecture_speech_to_text_id=request.data["lecture_speech_to_text_id"],
lecture_audio_id=request.data["lecture_audio_id"],
audio_original_text=request.data["audio_original_text"],
audio_summary=request.data["audio_summary"]
).save()
return Response({"response": request.data})
class lectureNoticeList(APIView):
def get(self, request):
lecture_notice_id = LectureNotices.objects.all()
serializer = LectureNoticesSerializer(lecture_notice_id, many=True)
return Response(serializer.data)
def post(self, request):
LectureNotices(
lecture_notice_id=request.data["lecture_notice_id"],
lecture_audio_id=request.data["lecture_audio_id"],
notice_text=request.data["notice_text"]
).save()
return Response({"response": request.data})
\ No newline at end of file
from django.templatetags.static import static
from django.urls import reverse
from jinja2 import Environment
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': static,
'url': reverse,
})
return env
\ No newline at end of file
......@@ -23,5 +23,6 @@ urlpatterns = [
path('', include('FirstApp.urls')),
path('attendance/', include('AttendanceApp.urls')),
path('lecturer/', include('MonitorLecturerApp.urls')),
# path('lecturer/', include('MonitorLecturerApp.urls')),
path('summary/', include('LectureSummarizingApp.urls'))
]
This is a sample text file
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment