Commit 2dfede4d authored by SohanDanushka's avatar SohanDanushka

Merge remote-tracking branch 'origin/QA_RELEASE' into db_and_monitoring_IT17097284

parents 16102ea7 dc8d49fd
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import imutils
import cv2,os,urllib.request
import numpy as np
from django.conf import settings
from time import sleep
import random
face_detection_videocam = cv2.CascadeClassifier(os.path.join(
settings.BASE_DIR,'opencv_haarcascade_data/haarcascade_frontalface_default.xml'))
face_detection_webcam = cv2.CascadeClassifier(os.path.join(
settings.BASE_DIR,'opencv_haarcascade_data/haarcascade_frontalface_default.xml'))
# load our serialized face detector model from disk
prototxtPath = os.path.sep.join([settings.BASE_DIR, "face_detector/deploy.prototxt"])
weightsPath = os.path.sep.join([settings.BASE_DIR,"face_detector/res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector.model'))
class IPWebCam(object):
def __init__(self):
self.url = "http://192.168.8.100:8080/shot.jpg"
self._count = 0
def __del__(self):
cv2.destroyAllWindows()
def get_frame(self):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
imgResp = urllib.request.urlopen(self.url)
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
img= cv2.imdecode(imgNp,-1)
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces_detected = face_detection_webcam.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces_detected:
cv2.rectangle(img, pt1=(x, y), pt2=(x + w, y + h), color=(255, 0, 0), thickness=2)
resize = cv2.resize(img, (640, 480), interpolation = cv2.INTER_LINEAR)
frame_flip = cv2.flip(resize,1)
ret, jpeg = cv2.imencode('.jpg', frame_flip)
# capture frame and save on a given time in order to run the face recognition
sleep(3); cv2.imwrite("%d.jpg" % self._count, img)
self._count =+1
return jpeg.tobytes()
class MaskDetect(object):
def __init__(self):
self.vs = VideoStream(src=0).start()
def __del__(self):
cv2.destroyAllWindows()
def detect_and_predict_mask(self,frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
def get_frame(self):
frame = self.vs.read()
frame = imutils.resize(frame, width=650)
frame = cv2.flip(frame, 1)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = self.detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
import cv2
import os
import numpy as np
#This module contains all common functions that are called in tester.py file
#Given an image below function returns rectangle for face detected alongwith gray scale imagess
def faceDetection(test_img):
gray_img=cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)#convert color image to grayscale
gray_img=cv2.normalize(gray_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
gray_img=(255*gray_img).astype(np.uint8)
gray_img=cv2.fastNlMeansDenoising(gray_img);
face_haar_cascade=cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml')#Load haar classifier
faces=face_haar_cascade.detectMultiScale(gray_img,scaleFactor=1.32,minNeighbors=5)#detectMultiScale returns rectangles
return faces,gray_img
#Given a directory below function returns part of gray_img which is face alongwith its label/ID
def labels_for_training_data(directory):
faces=[]
faceID=[]
for path,subdirnames,filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file")#Skipping files that startwith .
continue
id=os.path.basename(path)#fetching subdirectory names
img_path=os.path.join(path,filename)#fetching image path
print("img_path:",img_path)
print("id:",id)
test_img=cv2.imread(img_path)#loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect,gray_img=faceDetection(test_img)#Calling faceDetection function to return faces detected in particular image
if len(faces_rect)!=1:
continue #Since we are assuming only single person images are being fed to classifier
(x,y,w,h)=faces_rect[0]
roi_gray=gray_img[y:y+w,x:x+h]#cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces,faceID
def train_classifier(faces,faceID):
face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces,np.array(faceID))
return face_recognizer
#Below function draws bounding boxes around detected face in image
def draw_rect(test_img,face):
(x,y,w,h)=face
cv2.rectangle(test_img,(x,y),(x+w,y+h),(255,0,0),thickness=4)
#Below function writes name of person for detected label
def put_text(test_img,text,x,y):
cv2.putText(test_img,text,(x,y),cv2.FONT_HERSHEY_DUPLEX,1,(255,0,0),3)
#Save video frames
def extractAndSaveFrames():
vidcap = cv2.VideoCapture('IT17098960.mp4')
success, image = vidcap.read()
count = 0
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success, image = vidcap.read()
print('Read a new frame: ', success)
count += 1
......@@ -31,18 +31,18 @@
</script>
<script type="text/javascript">
$(document).ready(function() {
$('#initiate_btn').click(function() {
fetch('http://127.0.0.1:8000/attendance/process-initiate-lecture')
.then((res) => res.json())
.then((out) => alert(out.response))
.catch((err) => alert('error: ' + err))
});
})
<script>
function toggleLectureLive() {
var x = document.getElementById("liveStreamLecture");
var y = document.getElementById("liveStreamLectureStartButton");
if (x.style.display === "none") {
x.style.display = "block";
y.style.display = "block";
} else {
x.style.display = "none";
y.style.display = "none";
}
}
</script>
{% endblock %}
......@@ -55,11 +55,19 @@
<div class="text-center">
<div class="card">
<div class="card-header">
<h4 class="card-title">Starting the lecture....</h4>
<h4 class="card-title">Lecture Live</h4>
</div>
<div class="card-body">
<button type="button" class="btn btn-success" id="initiate_btn">Initiate Lecture</button>
<button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button>
</div>
<div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%">
<div class="row justify-content-center">
<img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}">
</div>
<div class="row justify-content-center">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle"><i class="fas fa-video"></i></button>
</div>
</div>
</div>
</div>
......
import cv2
from .faceRecognition import faceDetection, train_classifier, labels_for_training_data, draw_rect, put_text
#This module takes images stored in diskand performs face recognition
test_img=cv2.imread('TestImages/frame0.jpg')#test_img path
faces_detected,gray_img=faceDetection(test_img)
print("faces_detected:",faces_detected)
#Comment belows lines when running this program second time.Since it saves training.yml file in directory
faces,faceID=labels_for_training_data('trainingImages')
face_recognizer=train_classifier(faces,faceID)
face_recognizer.write('trainingData.yml')
# For subsequent runs
face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read('trainingData.yml')#use this to load training data for subsequent runs
name={0: "IT17138000", 1: "IT17100908", 2: "IT17098960"}#creating dictionary containing names for each label
file = open("attendance.txt", "w")
for face in faces_detected:
(x,y,w,h)=face
roi_gray=gray_img[y:y+h,x:x+h]
label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image
print("confidence:",confidence)
print("label:",label)
draw_rect(test_img,face)
predicted_name=name[label]
if(confidence>90):#If confidence more than 37 then don't print predicted face text on screen
continue
file.write(predicted_name)
put_text(test_img,predicted_name,x,y)
file.close()
resized_img=cv2.resize(test_img,(700,700))
cv2.imshow("face dtecetion tutorial",resized_img)
cv2.waitKey(0)#Waits indefinitely until a key is pressed
cv2.destroyAllWindows
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -17,7 +17,7 @@ urlpatterns = [
path('student/', StudentAPIView.as_view()),
path('student/<str:pk>', StudentDetails.as_view()),
url(r'^upload/$', FileView.as_view(), name='file-upload'),
path('webcam_feed', views.webcam_feed, name='webcam_feed'),
# this url will initiate the lecture
url(r'^process-initiate-lecture/$', InitiateLecture.as_view())
]
from django.shortcuts import render
from django.http.response import StreamingHttpResponse
from AttendanceApp.camera import IPWebCam
def initiate_lecture(request):
return render(request, "AttendanceApp/Initiate_lecture.html")
\ No newline at end of file
return render(request, "AttendanceApp/Initiate_lecture.html")
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def webcam_feed(request):
return StreamingHttpResponse(gen(IPWebCam()),
content_type='multipart/x-mixed-replace; boundary=frame')
\ No newline at end of file
import cv2
import os
import re
import base64
import shutil
def saveImage(response):
dataUrlPattern = re.compile('data:image/(png|jpeg);base64,(.*)$')
base_path = os.path.join(os.path.abspath(__file__))
root_dir = os.path.dirname(os.path.dirname(base_path))
new_dir_name = "static\\FirstApp\\images\\{}".format(response["imageName"])
new_dir = os.path.join(root_dir, new_dir_name)
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
count = 0
for url in response["ImageURLS"]:
url = dataUrlPattern.match(url).group(2)
encoded = url.encode()
image = base64.b64decode(encoded)
imageName = response["imageName"] + '_img_' + format(count) + '.png'
new_file = os.path.join(new_dir, imageName)
count += 1
# saving the images (method 1)
with open(new_file, "wb") as f:
f.write(image)
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
"""
this file is responsible for creating the API classes so that
frontend could communicate with the backend, through JSON-based communication
each class is extended by djangorestframework APIVIEW class
each class contains methods to represent the HTTP methods received through the requests
in this case the GET method was mostly used.
each method will return an HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData
from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer
from .MongoModels import *
from rest_framework.views import *
from .ImageOperations import saveImage
from .logic import head_pose_estimation
from .logic import video_extraction
from .logic import activity_recognition as ar
from .logic import posenet_calculation as pc
from . import emotion_detector as ed
from .logic import id_generator as ig
from .logic import pdf_file_generator as pdf
from .logic import head_gaze_estimation as hge
from .logic import video_extraction as ve
from .models import Teachers, Video, VideoMeta, RegisterUser
from .MongoModels import *
from .serializers import *
import datetime
# to create images
class ImageViewSet(APIView):
def post(self, request):
saveImage(request.data)
return Response({"response": "successful"})
# to perform pose estimation on images
class GazeEstimationViewSet(APIView):
def post(self, request):
response = head_pose_estimation.estimatePose(request.data)
return Response({"response": response})
# to perform video extraction
class VideoExtractionViewSet(APIView):
def get(self, request):
response = video_extraction.getExtractedFrames(request.query_params)
return Response({"response": response})
def post(self, request):
response = video_extraction.VideoExtractor(request.data)
return Response({"response": response})
# lecture emotions view set
class LectureEmotionViewSet(APIView):
def get(self, request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
serializer = LectureEmotionSerializer(emotions, many=True)
return Response({"response": serializer.data})
def post(self, request):
LectureEmotionReport(
lecture_id=request.data["lecture_id"],
happy_perct=request.data["happy_perct"],
sad_perct=request.data["sad_perct"],
angry_perct=request.data["angry_perct"],
surprise_perct=request.data["surprise_perct"],
disgust_perct=request.data["disgust_perct"],
neutral_perct=request.data["neutral_perct"]
).save()
return Response({"response": request.data})
class LectureViewSet(APIView):
def get(self, request):
......@@ -189,20 +148,33 @@ class LectureVideoViewSet(APIView):
status=status.HTTP_400_BAD_REQUEST)
# this API will retrieve a lecture video details
class GetLectureVideoViewSet(APIView):
def get(self, request):
# get the lecturer id from the request
lecturer = request.query_params.get('lecturer')
# get the lecture date from the request
date = request.query_params.get('date')
# get the item number from the request
index = int(request.query_params.get('index'))
# retrieve the lecture video from the db
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
# serialize the object
serializer = LectureVideoSerializer(lecturer_video, many=True)
# get the lecture video id
lecture_video_id = serializer.data[0]['lecture_video_id']
print('lecture video id: ', lecture_video_id)
# retrieve the lecture activties exist for the given video
activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
# assign the whether there are found lecture activites or not
isActivityFound = (len(activities) > 0)
# return the response
return Response({
"response": serializer.data[index],
"isActivityFound": isActivityFound
......@@ -223,21 +195,22 @@ class GetLectureVideoViewSetForHome(APIView):
# to check whether there is only one lecture video for the query
# if there are more than one, send only the specified lecture video
if len(serializer.data) > 1:
lecture_video_id = serializer.data[counter]['lecture_video_id']
response = serializer.data[counter]
# else, send the only lecture video
else:
lecture_video_id = serializer.data[0]['lecture_video_id']
response = serializer.data[0]
# return the response
return Response({
"response": response
})
# ACTIVITY
##### ACTIVITY section #####
# API for lecture activities
class LectureActivityViewSet(APIView):
......@@ -261,20 +234,15 @@ class GetLectureActivityViewSet(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureActivitySerializer(lecture_activities, many=True)
return Response({
"response": serializer.data,
"extracted": extracted
})
# API to process lecture activity
# API to process lecture activity and save to DB
class LectureActivityProcess(APIView):
def get(self, request):
......@@ -284,8 +252,6 @@ class LectureActivityProcess(APIView):
self.activity(video_id, percentages)
return Response({"response": True})
def post(self, request):
pass
def activity(self, lec_video_id, percentages):
# lec_video = LectureVideo.objects.filter(lecture_video_id=lec_video_id)
......@@ -319,60 +285,6 @@ class LectureActivityProcess(APIView):
ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
class GetLectureActivityDetections(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
frame_name = request.query_params.get('frame_name')
detections = ar.get_detections(video_name, frame_name)
return Response({
"detections": detections
})
# the API class for getting student detections for a label
class GetLectureActvityDetectionsForLabel(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
label = request.query_params.get('label')
labelled_detections, detected_people = ar.get_detections_for_label(video_name, label)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class for getting students activity evaluations
class GetLectureActivityStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = ar.get_student_activity_evaluation(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class to retrieve individual student evaluation (activity)
class GetLectureActivityIndividualStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
student_name = request.query_params.get('student_name')
meta_data = ar.get_individual_student_evaluation(video_name, student_name)
return Response({
"response": meta_data
})
# API to retrieve activity detections for frames
class GetLectureActivityRecognitionsForFrames(APIView):
......@@ -511,45 +423,16 @@ class GetLectureEmotionReportViewSet(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureEmotionSerializer(lecture_emotions, many=True)
print(len(serializer.data))
return Response({
"response": serializer.data,
})
# the API class for getting students activity evaluations (emotions)
class GetLectureEmotionStudentEvaluations(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = ed.get_student_emotion_evaluations(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class to retrieve individual student evaluation (emotion)
class GetLectureEmotionIndividualStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
student_name = request.query_params.get('student_name')
meta_data = ed.get_individual_student_evaluation(video_name, student_name)
serialized = VideoMetaSerializer(meta_data)
return Response({
"response": serialized.data
})
# API to retrieve emotion detections for frames
class GetLectureEmotionRecognitionsForFrames(APIView):
......@@ -582,73 +465,6 @@ class GetLectureEmotionRecognitionsForFrames(APIView):
})
##### POSE #####
class GetLectureVideoForPose(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
date = request.query_params.get('date')
index = int(request.query_params.get('index'))
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
return Response({
"response": serializer.data[index]
})
# API to retrieve one lecture activity
class GetLectureVideoExtractedFrames(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
# lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
# serializer = LectureActivitySerializer(lecture_activities, many=True)
return Response({
# "response": serializer.data,
"extracted": extracted
})
# API to retrieve individual student detections
class GetLectureVideoIndividualStudentFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = pc.get_pose_estimations(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# API to process pose estimation for an individual student
class ProcessIndividualStudentPoseEstimation(APIView):
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated, IsAdminUser]
def get(self):
pass
# POST method
def post(self, request):
video_name = request.data['video_name']
student = request.data['student']
poses = request.data['poses']
pc.calculate_pose_estimation_for_student(video_name, student, poses)
return Response({
"response": video_name
})
##### GAZE ESTIMATION SECTION #####
class GetLectureGazeEstimationAvailaibility(APIView):
......@@ -725,8 +541,6 @@ class GetLectureGazeEstimationViewSet(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureGazeEstimationSerializer(lecture_gaze_estimations, many=True)
......@@ -1270,4 +1084,188 @@ class GetLectureGazeSummary(APIView):
"frame_landmarks": frame_landmarks,
"frame_group_percentages": frame_group_percentages,
"gaze_labels": gaze_labels
})
\ No newline at end of file
})
# =====OTHERS=====
# this API will retrieve the respective lecturer video name
class GetLecturerRecordedVideo(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
subject = request.query_params.get('subject')
date = request.query_params.get('date')
# retrieve data
lec_recorded_video = LectureRecordedVideo.objects.filter(lecturer_id=lecturer, subject__subject_code=subject, lecturer_date=date)
lec_recorded_video_ser = LectureRecordedVideoSerializer(lec_recorded_video, many=True)
lec_recorded_video_data = lec_recorded_video_ser.data[0]
# extract the lecturer video name
video_name = lec_recorded_video_data['lecture_video_name']
print('lecturer recorded video name: ', video_name)
# return the response
return Response({
"video_name": video_name
})
# this API will get lecture activity correlations
class GetLectureActivityCorrelations(APIView):
def get(self, request):
# this variable defines the number of dates to be considered for activity correlations
option = request.query_params.get('option')
# the lecturer id
lecturer = request.query_params.get('lecturer')
int_option = int(option)
# get the current date
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
# subtract the current date by the time period given
previous_date = current_date - option_date
# this list contains the student activities for each lecture
individual_lec_activities = []
# this list will contain the student activity-lecturer posture activity correlations
activity_correlations = []
# retrieving lecture activities
lec_activity = LectureActivity.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
if len(lec_activity) > 0:
isRecordFound = True
activity_serializer = LectureActivitySerializer(lec_activity, many=True)
activity_data = activity_serializer.data
_, individual_lec_activities, _ = ar.get_student_activity_summary_for_period(activity_data)
# retrieving lecturer recorded activities
lec_recorded_activity = LecturerVideoMetaData.objects.filter(
lecturer_video_id__lecturer_date__gte=previous_date,
lecturer_video_id__lecturer_date__lte=current_date,
lecturer_video_id__lecturer=lecturer
)
if len(lec_recorded_activity) > 0:
lec_recorded_activity_ser = LecturerVideoMetaDataSerializer(lec_recorded_activity, many=True)
lec_recorded_activity_data = lec_recorded_activity_ser.data
activity_correlations = ar.get_activity_correlations(individual_lec_activities, lec_recorded_activity_data)
print('activity correlations: ', activity_correlations)
return Response({
"correlations": activity_correlations
})
# this API will get lecture emotion correlations
class GetLectureEmotionCorrelations(APIView):
def get(self, request):
option = request.query_params.get('option')
lecturer = request.query_params.get('lecturer')
int_option = int(option)
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
previous_date = current_date - option_date
individual_lec_emotions = []
emotion_correlations = []
# retrieving lecture activities
lec_emotion = LectureEmotionReport.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are lecture emotions
if len(lec_emotion) > 0:
emotion_serializer = LectureEmotionSerializer(lec_emotion, many=True)
emotion_data = emotion_serializer.data
_, individual_lec_emotions, _ = ed.get_student_emotion_summary_for_period(emotion_data)
# retrieving lecturer recorded activities
lec_recorded_activity = LecturerVideoMetaData.objects.filter(
lecturer_video_id__lecturer_date__gte=previous_date,
lecturer_video_id__lecturer_date__lte=current_date,
lecturer_video_id__lecturer=lecturer
)
# if there are any recorded lectures
if len(lec_recorded_activity) > 0:
lec_recorded_activity_ser = LecturerVideoMetaDataSerializer(lec_recorded_activity, many=True)
lec_recorded_activity_data = lec_recorded_activity_ser.data
emotion_correlations = ed.get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data)
return Response({
"correlations": emotion_correlations
})
# this API will get lecture gaze correlations
class GetLectureGazeCorrelations(APIView):
def get(self, request):
option = request.query_params.get('option')
lecturer = request.query_params.get('lecturer')
int_option = int(option)
current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option)
previous_date = current_date - option_date
individual_lec_gaze = []
gaze_correlations = []
# retrieving lecture activities
lec_gaze = LectureGazeEstimation.objects.filter(
lecture_video_id__date__gte=previous_date,
lecture_video_id__date__lte=current_date,
lecture_video_id__lecturer=lecturer
)
# if there are gaze estimations
if len(lec_gaze) > 0:
gaze_serializer = LectureGazeEstimationSerializer(lec_gaze, many=True)
gaze_data = gaze_serializer.data
_, individual_lec_gaze, _ = hge.get_student_gaze_estimation_summary_for_period(gaze_data)
# retrieving lecturer recorded activities
lec_recorded_activity = LecturerVideoMetaData.objects.filter(
lecturer_video_id__lecturer_date__gte=previous_date,
lecturer_video_id__lecturer_date__lte=current_date,
lecturer_video_id__lecturer=lecturer
)
# if there are any recorded lectures
if len(lec_recorded_activity) > 0:
lec_recorded_activity_ser = LecturerVideoMetaDataSerializer(lec_recorded_activity, many=True)
lec_recorded_activity_data = lec_recorded_activity_ser.data
# find the correlations between lecture gaze estimations and recorded lecture
gaze_correlations = hge.get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data)
return Response({
"correlations": gaze_correlations
})
"""
this file contain the relevant methods to implement the student emotion recognition logic
main methods include
* the execution of emotion recognition model and saving the results into the database,
* retrieving the emotion recognition details for lectures within a given time period
* calculating the emotion recognition details for each frame, for a given lecture
* calculating the emotion recognition details for frame groups, for a given lecture
* calculating the emotion recognition correlations with the lecturer posture activities
"""
from tensorflow.keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import os
import numpy as np
......@@ -11,17 +25,27 @@ from . models import VideoMeta
from . logic import custom_sorter as cs
from .logic import id_generator as ig
from .logic import activity_recognition as ar
from .logic import utilities as ut
from .serializers import LectureEmotionSerializer
import pandas as pd
# emotion recognition method
from .serializers import LectureEmotionSerializer
# this method accepts:
# classifier: emotion recognition classifier (VGG model)
# face_classifier: face detection classifier (Haar-Cascade)
# image: image to be processed
# returns:
# label: the emotion recognition label
def emotion_recognition(classifier, face_classifier, image):
# this label will contain the recognized emotion label
label = ""
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# the detected faces in the image
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
......@@ -42,6 +66,13 @@ def emotion_recognition(classifier, face_classifier, image):
return label
# this method will perform emotion recognition for a lecture
# this method accepts:
# video_path: the lecture video name
# returns:
# percentages: the student activity percentages for the lecture video
def detect_emotion(video):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video))
......@@ -120,135 +151,18 @@ def detect_emotion(video):
# for testing purposes
print('ending the emotion recognition process')
# return the data
return meta_data
# to retrieve student evaluation for emotions
def get_student_emotion_evaluations(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# face_classifier = cv2.CascadeClassifier(
# os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
# classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
# classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
detections = []
frames = []
# this method will recognize the student emotions for each frame
# this method will accept:
# video_name: the lecture video name
for frame_folder in os.listdir(EXTRACTED_DIR):
# returns:
# sorted_emotion_frame_recognitions: the list of sorted student emotion recognitions for each frame
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
# label = emotion_recognition(classifier, face_classifier, image)
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = cs.custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = cs.custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# this method will retrieve individual student evaluations
def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# the object of type 'VideoMeta'
meta_data = VideoMeta()
# the class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
# taking a count on each label
count_frames = 0
count_angry = 0
count_happy = 0
count_sad = 0
count_neutral = 0
count_surprise = 0
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
for detections in os.listdir(FRAME_FOLDER):
# only take the images with the student name
if detections == student_name:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image)
# check for the label of the image
if (label == 'Anger'):
count_angry += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Anger')
# cv2.imwrite(os.path.join(path, 'Anger-{0}.jpg'.format(count)), frame)
elif (label == 'Happy'):
count_happy += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Happy')
# cv2.imwrite(os.path.join(path, 'Happy-{0}.jpg'.format(count)), frame)
elif (label == 'Neutral'):
count_neutral += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Neutral')
# cv2.imwrite(os.path.join(path, 'Neutral-{0}.jpg'.format(count)), frame)
elif (label == 'Sad'):
count_sad += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Sad')
# cv2.imwrite(os.path.join(path, 'Sad-{0}.jpg'.format(count)), frame)
elif (label == 'Surprise'):
count_surprise += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Surprise')
# cv2.imwrite(os.path.join(path, 'Surprise-{0}.jpg'.format(count)), frame)
# incrementing the frame_count
count_frames += 1
# setting up the counted values
meta_data.frame_count = count_frames
meta_data.happy_count = count_happy
meta_data.sad_count = count_sad
meta_data.angry_count = count_angry
meta_data.neutral_count = count_neutral
meta_data.surprise_count = count_surprise
# calculating the percentages
meta_data.calcPercentages()
return meta_data
# this method will
def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
......@@ -284,6 +198,7 @@ def get_frame_emotion_recognition(video_name):
# for testing purposes
print('starting the emotion frame recognition process')
# looping through the frames
while (frame_count < no_of_frames):
ret, image = cap.read()
......@@ -359,17 +274,25 @@ def get_frame_emotion_recognition(video_name):
frame_count += 1
# sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
sorted_emotion_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes
print('ending the emotion frame recognition process')
# return the detected frame percentages
return sorted_activity_frame_recognitions
return sorted_emotion_frame_recognitions
# this method will retrieve student activity summary for given time period
# this method will get the student emotion recognition summary for period
# this method accepts the following parameter
# emotions: the database records retrieved within the given time period
# returns:
# percentages: average percentages for each student activity recognition label
# individual_lec_emotions: contain the lecture emotion recognition details for each individual lecture
# emotion_labels: the emotion labels
def get_student_emotion_summary_for_period(emotions):
# declare variables to add percentage values
......@@ -383,8 +306,10 @@ def get_student_emotion_summary_for_period(emotions):
# get the number of activties to calculate average
no_of_emotions = len(emotions)
# this list will contain the emotion recognition details for each lecture
individual_lec_emotions = []
# emotion labels
emotion_labels = ["happy_perct", "sad_perct", "angry_perct", "disgust_perct", "surprise_perct", "neutral_perct"]
# iterate through the activities
......@@ -417,6 +342,7 @@ def get_student_emotion_summary_for_period(emotions):
surprise_average_perct = round((surprise_perct_combined / no_of_emotions), 1)
neutral_average_perct = round((neutral_perct_combined / no_of_emotions), 1)
# this dictionary will contain the student emotion average percentage values
percentages = {}
percentages["happy_perct"] = happy_average_perct
percentages["sad_perct"] = sad_average_perct
......@@ -425,12 +351,21 @@ def get_student_emotion_summary_for_period(emotions):
percentages["surprise_perct"] = surprise_average_perct
percentages["neutral_perct"] = neutral_average_perct
# return the values
return percentages, individual_lec_emotions, emotion_labels
# this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this method will get the lecture student emotion frame groupings
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student emotion labels for each frame group
# returns:
# frame_group_dict: the modified frame group dictionary
# emotion_labels: student emotion labels
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
......@@ -447,13 +382,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
# capture the video
cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables
frame_count = 0
......@@ -558,15 +491,7 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_neutral_count = frame_group_details['neutral_count']
group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count)
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
# calculate the frame group emotion percentages
frame_group_happy_perct = float(frame_group_happy_count / group_detection_count) * 100
frame_group_sad_perct = float(frame_group_sad_count / group_detection_count) * 100
frame_group_angry_perct = float(frame_group_angry_count / group_detection_count) * 100
......@@ -596,7 +521,15 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
return frame_group_dict, emotion_labels
# this section will handle some database operations
# THIS SECTION WILL HANDLE SOME DATABASE OPERATIONS
# this method will save frame detections to the database
# this method will accept
# video_name: lecture video name to be processed
# returns
# frame_detections: the student emotion frame detections
def save_frame_recognitions(video_name):
# for testing purposes
......@@ -646,7 +579,12 @@ def save_frame_recognitions(video_name):
return frame_detections
# this method will save the emotion frame groupings to the database
# this method will save gaze frame groupings to the database
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student emotion labels for each frame group
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
......@@ -686,3 +624,87 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# save
new_lec_emotion_frame_groupings.save()
# this method will get student emotion correlations
# this method accepts:
# individual_lec_emotions: the student emotion details for each individual lecture
# lec_recorded_activity_data: the lecturer posture recognition details
# returns:
# correlations: the lecture student emotions and lecturer posture recognition correlations
def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))]
# student activity labels
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
lecturer_activity_labels = ['seated', 'standing', 'walking']
# lecturer recorded data list (lecturer)
sitting_perct_list = []
standing_perct_list = []
walking_perct_list = []
# lecture activity data list (student)
happy_perct_list = []
sad_perct_list = []
angry_perct_list = []
surprise_perct_list = []
neutral_perct_list = []
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count']))
standing_perct_list.append(int(data['standing_count']))
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_emotions:
happy_perct_list.append(int(data['happy_perct']))
sad_perct_list.append(int(data['sad_perct']))
angry_perct_list.append(int(data['angry_perct']))
surprise_perct_list.append(int(data['surprise_perct']))
neutral_perct_list.append(int(data['neutral_perct']))
corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentEmotion = index[0] in student_emotion_labels
# check whether the second index is a lecturer activity
isLecturerAct = index[1] in lecturer_activity_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentEmotion & isLecturerAct:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
"""
this file contain the relevant methods to implement the student activity recognition logic
main methods include
* the execution of activity recognition model and saving the results into the database,
* retrieving the activity recognition details for lectures within a given time period
* calculating the activity recognition details for each frame, for a given lecture
* calculating the activity recognition details for frame groups, for a given lecture
* calculating the activity recognition correlations with the lecturer posture activities
"""
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import cv2
import os
import shutil
from .custom_sorter import *
from ..MongoModels import *
from ..serializers import *
from . import id_generator as ig
from . import utilities as ut
import pandas as pd
# this method will perform gaze estimation for a lecture
# this method accepts:
# video_path: the lecture video name
# returns:
# percentages: the student activity percentages for the lecture video
def activity_recognition(video_path):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
......@@ -32,11 +54,12 @@ def activity_recognition(video_path):
np.set_printoptions(suppress=True)
# class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
# define the student activity labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
# compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
......@@ -47,6 +70,8 @@ def activity_recognition(video_path):
# iteration
video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
# initialize the frame count and student activity count variables
frame_count = 0
total_detections = 0
phone_checking_count = 0
......@@ -56,10 +81,12 @@ def activity_recognition(video_path):
# for testing purposes
print('starting the activity recognition process')
# looping through the frames
while (frame_count < no_of_frames):
ret, image = video.read()
image = cv2.resize(image, size)
# perform person detection on the extracted image
detections = person_detection(image, net)
# this is for testing purposes
......@@ -69,8 +96,10 @@ def activity_recognition(video_path):
# if there are any person detections
if (len(detections) > 0):
# increment the total detections in the entire video
total_detections += len(detections)
# initialize the detection count
detection_count = 0
# looping through the person detections of the frame
......@@ -97,33 +126,41 @@ def activity_recognition(video_path):
elif (label == class_labels[2]):
note_taking_count += 1
# increment the detection count
detection_count += 1
# increment the frame count
frame_count += 1
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
# talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct
# percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct
# for testing purposes
print('activity recognition process is over')
# return the percentages
return percentages
# this method will perform the person detection for a given image
# this method accepts:
# image: image that needs to be processed
# net: the person detection model, which is a caffe implemented deep learning model
# returns:
# detected_person: this list contains the bounding box coordinates of the person detections in the input image
def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# set the threshold balue
threshold = 0.2
detected_person = []
......@@ -133,8 +170,8 @@ def person_detection(image, net):
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# initialize the person count
person_count = 0
# load the input image and construct an input blob for the image
......@@ -169,224 +206,52 @@ def person_detection(image, net):
# display the prediction
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
# print("[INFO] {}".format(label))
# if the detected object belongs to the 'person' class
if (format(label).__contains__("person")):
startX = 0 if startX < 0 else startX
startY = 0 if startY < 0 else startY
# extract the person
person = image[startY:startY + endY, startX:startX + endX]
detected_person.append(person)
person_count += 1
# return the detection person list
return detected_person
# retrieving the extracted frames and detections for a given video
def getExtractedFrames(folder_name):
image_list = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(folder_name))
# listing all the images in the directory
for frame_folders in os.listdir(EXTRACTED_DIR):
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame_folders)
frame_details = {}
frame_details['frame'] = frame_folders
detection_details = []
for detections in os.listdir(FRAME_FOLDER):
detection_details.append(detections)
frame_details['detections'] = detection_details
image_list.append(frame_details)
# checking for the number of frames
if (len(image_list) > 0):
image_list = custom_object_sorter(image_list)
return image_list
# this method will recognize the activity for each frame
# this method will accept:
# video_name: the lecture video name
else:
return "No extracted frames were found"
# get detections for a given frame name
def get_detections(video_name, frame_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_name)
detections = []
for detection in os.listdir(FRAME_DIR):
if 'frame' not in detection:
detections.append(detection)
return detections
# get detections for a given class name
def get_detections_for_label(video_name, label_index):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
class_labels = ['Phone checking', 'Talking with friends', 'note taking']
label_index = int(label_index)
given_label = class_labels[label_index]
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
# checking for equality in selected label and given label
if (label == given_label):
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# to get the student evaluations
def get_student_activity_evaluation(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
class_labels = ['Phone checking', 'Talking with friends', 'note taking']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# returns:
# sorted_activity_frame_recognitions: the list of sorted student activity recognitions for each frame
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# recognize the activity for each frame
def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk
# load our serialized person detection model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True)
# class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# load the activity recogntion model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
# compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
......@@ -496,87 +361,16 @@ def get_frame_activity_recognition(video_name):
return sorted_activity_frame_recognitions
# this method will get the student activity recognition summary for period
# this method accepts the following parameter
# activities: the database records retrieved within the given time period
# this method will retrieve individual student evaluation
def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
np.set_printoptions(suppress=True)
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# initializing the count variables
frame_count = 0
phone_count = 0
note_count = 0
listen_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
for detections in os.listdir(FRAME_FOLDER):
# only take the images with the student name
if detections == student_name:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
# checking for the label
if label == class_labels[0]:
phone_count += 1
elif label == class_labels[1]:
listen_count += 1
elif label == class_labels[2]:
note_count += 1
# returns:
# percentages: average percentages for each student activity recognition label
# individual_lec_activties: contain the lecture activity recognition details for each individual lecture
# activity_labels: the activity labels
# increment the frame count
frame_count += 1
# calculating the percentages
phone_perct = float(phone_count / frame_count) * 100
writing_perct = float(note_count / frame_count) * 100
listening_perct = float(listen_count / frame_count) * 100
# this dictionary will be returned
percentages = {}
percentages['phone_perct'] = phone_perct
percentages['writing_perct'] = writing_perct
percentages['listening_perct'] = listening_perct
return percentages
# this method will retrieve student activity summary for given time period
def get_student_activity_summary_for_period(activities):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
......@@ -586,8 +380,10 @@ def get_student_activity_summary_for_period(activities):
# get the number of activties to calculate average
no_of_activities = len(activities)
# this list will contain the student activity details for each lecture
individual_lec_activities = []
# activity labels
activity_labels = ["phone_perct", "listening_perct", "writing_perct"]
# iterate through the activities
......@@ -614,10 +410,20 @@ def get_student_activity_summary_for_period(activities):
percentages["listening_perct"] = listening_average_perct
percentages["writing_perct"] = note_taking_average_perct
# return the values
return percentages, individual_lec_activities, activity_labels
# this method will retrieve activity frame groupings for a lecture
# this method will get the lecture student activity frame groupings
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student activity labels for each frame group
# returns:
# frame_group_dict: the modified frame group dictionary
# activity_labels: student activity labels
def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
......@@ -730,14 +536,6 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_note_count = frame_group_details['note_count']
group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count)
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100
frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100
......@@ -754,14 +552,20 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[key].pop('note_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
activity_labels = ['phone_perct', 'listen_perct', 'note_perct']
# return the dictionary
return frame_group_dict, activity_labels
# this section will handle saving activity entities to the database
# this method will save frame detections to the database
# this method will accept
# video_name: lecture video name to be processed
# returns
# frame_detections: the student activity frame detections
def save_frame_recognition(video_name):
# for testing purposes
......@@ -808,7 +612,12 @@ def save_frame_recognition(video_name):
return frame_detections
# this method will save the activity frame groupings to the database
# this method will save gaze frame groupings to the database
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student activity labels for each frame group
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
......@@ -849,3 +658,85 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# save
new_lec_activity_frame_groupings.save()
# this method will get student activity correlations
# this method accepts:
# individual_lec_activities: the student activity details for each individual lecture
# lec_recorded_activity_data: the lecturer posture recognition details
# returns:
# correlations: the lecture student activities and lecturer posture recognition correlations
def get_activity_correlations(individual_lec_activities, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
data_index = ['lecture-{}'.format(i+1) for i in range(len(individual_lec_activities))]
# student activity labels
student_activity_labels = ['phone checking', 'listening', 'note taking']
lecturer_activity_labels = ['seated', 'standing', 'walking']
# lecturer recorded data list (lecturer)
sitting_perct_list = []
standing_perct_list = []
walking_perct_list = []
# lecture activity data list (student)
phone_perct_list = []
listen_perct_list = []
note_perct_list = []
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count']))
standing_perct_list.append(int(data['standing_count']))
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_activities:
phone_perct_list.append(int(data['phone_perct']))
listen_perct_list.append(int(data['listening_perct']))
note_perct_list.append(int(data['writing_perct']))
corr_data = {'phone checking': phone_perct_list, 'listening': listen_perct_list, 'note taking': note_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentAct = index[0] in student_activity_labels
# check whether the second index is a lecturer activity
isLecturerAct = index[1] in lecturer_activity_labels
# if both are student and lecturer activities, add to the doctionary
if isStudentAct & isLecturerAct:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
class PoseResponse:
# directory = ''
# image_name = ''
# label = ''
def __init__(self, directory, image_name, label):
self.directory = directory
self.image_name = image_name
self.labels = label
from imutils import face_utils
import os
import cv2
import dlib
import numpy as np
import imutils
def get2DPoints(image):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers")
detector_path = os.path.join(CLASSIFIER_DIR, "shape_predictor_68_face_landmarks.dat")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(detector_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
left_corner_arr = None
right_corner_arr = None
nose_tip_arr = None
right_mouth_arr = None
left_mouth_arr = None
chin_arr = None
face_center_arr = None
face_center_top_arr = None
face_center_bottom_arr = None
count = 0
print('no of faces: ', len(rects))
if (len(rects)):
left_corner_arr = np.zeros((len(rects), 2))
right_corner_arr = np.zeros((len(rects), 2))
nose_tip_arr = np.zeros((len(rects), 2))
right_mouth_arr = np.zeros((len(rects), 2))
left_mouth_arr = np.zeros((len(rects), 2))
chin_arr = np.zeros((len(rects), 2))
face_center_top_arr = np.zeros((len(rects), 2))
face_center_bottom_arr = np.zeros((len(rects), 2))
for (i, rect) in enumerate(rects):
left_corner = None
right_corner = None
nose_tip = None
right_mouth = None
left_mouth = None
chin = None
(fx, fy, fw, fh) = face_utils.rect_to_bb(rect)
cv2.rectangle(image, (fx, fy), (fx+fw, fy+fh), (0, 255, 0), 2)
face_center_top = [int(fx + fw/2), int(fy)]
face_center_bottom = [int(fx + fw/2), int(fy + fh)]
cv2.line(image, (int(fx + fw/2), int(fy)), (int(fx + fw/2), int(fy + fh)), (0, 255, 0), 2)
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# looping through each facial landmark category
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# clone the original image so we can draw on it, then
# display the name of the face part on the image
clone = image
# loop over the subset of facial landmarks, drawing the
# specific face part
for (x, y) in shape[i:j]:
if (name == 'left_eye'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
left_corner = np.amax(shape[i:j], axis=0)
elif (name == 'right_eye'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_corner = np.amin(shape[i:j], axis=0)
elif (name == 'jaw'):
minArr = np.array(shape[i:j][8], dtype=int)
chin = np.array(shape[i:j][8], dtype=int)
# cv2.putText(clone, "Chin", (int(minArr[0]), int(minArr[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.circle(image, (int(minArr[0]), int(minArr[1])), 3, (255, 0, 255), -1)
# cv2.circle(clone, (int(minArr[0]), int(minArr[1])), 3, (0, 255, 255), -1)
elif (name == 'nose'):
# nose_tip = np.array(shape[i:j][3], dtype=int)
nose_tip = np.array(shape[i:j][3], dtype=int)
# cv2.putText(clone, "Nose tip", (int(nose_tip[0]), int(nose_tip[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
# 2)
# cv2.circle(clone, (int(nose_tip[0]), int(nose_tip[1])), 3, (255, 0, 255), -1)
elif (name == 'inner_mouth'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_mouth = np.amin(shape[i:j], axis=0)
left_mouth = np.amax(shape[i:j], axis=0)
# cv2.circle(clone, (maxArr[0], maxArr[1]), 3, (127, 0, 255), -1)
# cv2.circle(clone, (minArr[0], minArr[1]), 3, (127, 0, 255), -1)
# else:
# cv2.circle(image, (x, y), 3, (255, 0, 255), -1)
left_corner_arr[count] = left_corner
right_corner_arr[count] = right_corner
nose_tip_arr[count] = nose_tip
right_mouth_arr[count] = right_mouth
left_mouth_arr[count] = left_mouth
chin_arr[count] = chin
face_center_top_arr[count] = face_center_top
face_center_bottom_arr[count] = face_center_bottom
count += 1
return left_corner_arr, right_corner_arr, nose_tip_arr, right_mouth_arr, left_mouth_arr, chin_arr, face_center_top_arr, face_center_bottom_arr, count
# extract the ROI of the face region as a separate image
# (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
# roi = image[y:y + h, x:x + w]
# roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
#
# # show the particular face part
# cv2.imshow("ROI", roi)
# cv2.imshow("Image", clone)
# cv2.waitKey(0)
#
# # visualize all facial landmarks with a transparent overlay
# output = face_utils.visualize_facial_landmarks(image, shape)
# cv2.imshow("Image", output)
# cv2.waitKey(0)
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 31 03:00:36 2020
@author: hp
this file contain the relevant methods to implement the student gaze estimation logic
main methods include
* the execution of gaze estimation model and saving the results into the database,
* retrieving the gaze estimation details for lectures within a given time period
* calculating the gaze estimation details for each frame, for a given lecture
* calculating the gaze estimation details for frame groups, for a given lecture
* calculating the gaze estimation correlations with the lecturer posture activities
"""
from decimal import Decimal
from . custom_sorter import *
import cv2
import numpy as np
import math
from . face_detector import get_face_detector, find_faces
from . face_landmarks import get_landmark_model, detect_marks
import os
import shutil
import math
import pandas as pd
from ..MongoModels import *
from ..serializers import *
from . import id_generator as ig
from . import utilities as ut
def get_2d_points(img, rotation_vector, translation_vector, camera_matrix, val):
......@@ -137,6 +142,12 @@ def head_pose_points(img, rotation_vector, translation_vector, camera_matrix):
# this method will perform gaze estimation for a lecture
# this method accepts:
# video_path: the lecture video name
# returns:
# percentages: the gaze estimation percentages for the lecture video
def process_gaze_estimation(video_path):
# get the base directory
......@@ -151,12 +162,15 @@ def process_gaze_estimation(video_path):
# load the face detection model
face_model = get_face_detector()
# load the facial landamrk model
landmark_model = get_landmark_model()
# capture the video
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
......@@ -200,12 +214,10 @@ def process_gaze_estimation(video_path):
# iterate the video frames
while True:
ret, img = cap.read()
if ret == True:
faces = find_faces(img, face_model)
# print('no of faces found: ', len(faces))
student_count = 0
# iterate through each detected face
for face in faces:
......@@ -216,8 +228,6 @@ def process_gaze_estimation(video_path):
isLookingLeft = False
isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
......@@ -230,6 +240,7 @@ def process_gaze_estimation(video_path):
marks[48], # Left Mouth corner
marks[54] # Right mouth corner
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
......@@ -258,57 +269,36 @@ def process_gaze_estimation(video_path):
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
# cv2.putText(img, 'looking down', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingDown = True
elif ang1 <= -THRESHOLD:
# cv2.putText(img, 'looking up', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingUp = True
else:
# cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingFront = True
# checking for angle 2
if ang2 >= THRESHOLD:
# cv2.putText(img, 'looking right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingRight = True
elif ang2 <= -THRESHOLD:
# cv2.putText(img, 'looking left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingLeft = True
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
# cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
# cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1
elif isLookingUp & isLookingRight:
# cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
# cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1
elif isLookingFront:
# cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1
# indicate the student name
# cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count
face_count += 1
# naming the new image
# image_name = "frame-{}.png".format(frame_count)
#
# # new image path
# image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image
# cv2.imwrite(image_path, img)
# for testing purposes
......@@ -321,10 +311,6 @@ def process_gaze_estimation(video_path):
break
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
head_up_left_perct = (Decimal(head_up_left_count) / Decimal(face_count)) * 100
......@@ -351,27 +337,17 @@ def process_gaze_estimation(video_path):
# return the dictionary
return percentages
# this method will retrieve extracted frames
def getExtractedFrames(lecture_video_name):
image_list = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(lecture_video_name))
# listing all the images in the directory
for image_path in os.listdir(EXTRACTED_DIR):
image_list.append(image_path)
# checking for the number of frames
if (len(image_list) > 0):
image_list = custom_sort(image_list)
return image_list
else:
return "No extracted frames were found"
# this method will retrieve lecture gaze estimation for each frame
# this method accepts the following parameter
# video_name: the lecture video name that needs to be processed
# returns:
# frame_detections: the list of detections containing each frame
# frame_rate: frame rate of the video
# this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory
......@@ -381,18 +357,22 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# play the video
video = cv2.VideoCapture(VIDEO_PATH)
# get the frame rate
frame_rate = video.get(cv2.CAP_PROP_FPS)
# this list will contain the frame detections
frame_detections = []
# load the face model
face_model = get_face_detector()
# load the face landmark model
landmark_model = get_landmark_model()
# capture the video
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
......@@ -444,7 +424,6 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# find the number of faces
faces = find_faces(img, face_model)
student_count = 0
# iterate through each detected face
for face in faces:
......@@ -456,8 +435,6 @@ def get_lecture_gaze_estimation_for_frames(video_name):
isLookingLeft = False
isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
......@@ -497,24 +474,18 @@ def get_lecture_gaze_estimation_for_frames(video_name):
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
# cv2.putText(img, 'looking down', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingDown = True
elif ang1 <= -THRESHOLD:
# cv2.putText(img, 'looking up', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingUp = True
else:
# cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingFront = True
# checking for angle 2
if ang2 >= THRESHOLD:
# cv2.putText(img, 'looking right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingRight = True
elif ang2 <= -THRESHOLD:
# cv2.putText(img, 'looking left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingLeft = True
# checking for vertical and horizontal directions
......@@ -575,6 +546,14 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# this method will get the student gaze estimation summary for period
# this method accepts the following parameter
# gaze_estimation_data: the database records retrieved within the given time period
# returns:
# percentages: average percentages for each gaze estimation label
# individual_lec_gaze_estimations: contain the lecture gaze estimation details for each individual lecture
# gaze_estimation_labels: the gaze estimation labels
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
......@@ -587,8 +566,10 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# get the number of activties to calculate average
no_of_gaze_estimations = len(gaze_estimation_data)
# this list will contain the lecture gaze estimation details for each individual lecture
individual_lec_gaze_estimations = []
# define the gaze estimation labels
gaze_estimation_labels = ["looking_up_and_right_perct", "looking_up_and_left_perct", "looking_down_and_right_perct", "looking_down_and_left_perct", "looking_front_perct"]
# iterate through the activities
......@@ -623,21 +604,31 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct
# return the values
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
# this method will get the lecture gaze estimation frame groupings
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant gaze estimation labels for each frame group
# returns:
# frame_group_dict: the modified frame group dictionary
# labels: gaze estimation labels
def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
print('video path: ', VIDEO_PATH)
# load the face detection model
face_model = get_face_detector()
# load the facial landamrk model
landmark_model = get_landmark_model()
# capture the video
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
......@@ -702,7 +693,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
head_up_left_count = 0
head_down_right_count = 0
head_down_left_count = 0
face_count = 0
detection_count = 0
......@@ -760,7 +750,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
isLookingDown = True
......@@ -833,7 +822,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_downleft_count = frame_group_details['downleft_count']
frame_group_front_count = frame_group_details['front_count']
print('detection count: ', frame_group_details['detection_count'])
group_detection_count = 1 if frame_group_details['detection_count'] == 0 else frame_group_details['detection_count']
......@@ -871,7 +859,15 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
return frame_group_dict, labels
# this section will handle some database operations
##### THIS SECTON WILL HANDLE SOME DATABASE OPERATIONS #####
# this method will save frame detections to the database
# this method will accept
# video_name: lecture video name to be processed
# returns
# frame_detections: the gaze estimation frame detections
def save_frame_detections(video_name):
# for testing purposes
......@@ -922,7 +918,13 @@ def save_frame_detections(video_name):
return frame_detections
# this method will save gaze frame groupings to the database
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant gaze estimation labels for each frame group
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes
......@@ -964,3 +966,82 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# save
new_lec_gaze_frame_groupings.save()
# this method will get gaze estimation correlations
# this method accepts:
# individual_lec_gaze: the gaze estimation details for each individual lecture
# lec_recorded_activity_data: the lecturer posture recognition details
# returns:
# correlations: the lecture gaze estimation and lecturer posture recognition correlations
def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# this variable will be used to store the correlations
correlations = []
limit = 10
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))]
# student gaze labels
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
lecturer_activity_labels = ['seated', 'standing', 'walking']
# lecturer recorded data list (lecturer)
sitting_perct_list = []
standing_perct_list = []
walking_perct_list = []
# lecture activity data list (student)
upright_perct_list = []
upleft_perct_list = []
downright_perct_list = []
downleft_perct_list = []
front_perct_list = []
# loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count']))
standing_perct_list.append(int(data['standing_count']))
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student)
for data in individual_lec_gaze:
upright_perct_list.append(int(data['looking_up_and_right_perct']))
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
downright_perct_list.append(int(data['looking_down_and_right_perct']))
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
front_perct_list.append(int(data['looking_front_perct']))
corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
'Down and Left': downleft_perct_list, 'Front': front_perct_list,
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe
df = pd.DataFrame(corr_data, index=data_index)
# calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit)
for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {}
index = pd_series.index[i]
# check whether the first index is a student activity
isStudentGaze = index[0] in student_gaze_labels
# check whether the second index is a lecturer activity
isLecturerAct = index[1] in lecturer_activity_labels
# if both are student and lecturer activities, add to the dictionary
if isStudentGaze & isLecturerAct:
corr_dict['index'] = index
corr_dict['value'] = pd_series.values[i]
# append the dictionary to the 'correlations' list
correlations.append(corr_dict)
# return the list
return correlations
import os
import cv2
import numpy as np
import shutil
from .facial_landmarks import get2DPoints
from .classes import pose
# Read Image
def estimatePose(request):
directory = request['directory']
images = request['images']
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
IMAGE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\images")
SPEC_DIR = os.path.join(IMAGE_DIR, "{}".format(directory))
# new directory will be created to store pose estimations
new_dir_name = "static\\FirstApp\\poses\\{}".format(directory)
new_dir = os.path.join(BASE_DIR, new_dir_name)
face_count_response = 0
pose_response_list = []
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
for im in images:
IMAGE_PATH = os.path.join(SPEC_DIR, "{}".format(im))
image = cv2.imread(IMAGE_PATH)
size = image.shape
left_corner, right_corner, nose_tip, right_mouth, left_mouth, chin, face_center_top, face_center_bottom, face_count = get2DPoints(image)
# if faces are found
if left_corner is not None:
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# print("Camera Matrix :\n {0}".format(camera_matrix))
for i in range (face_count):
text = ''
# 2D image points. If you change the image, you need to change vector
image_points = np.array([
nose_tip[i],
chin[i],
left_corner[i],
right_corner[i],
left_mouth[i],
right_mouth[i]
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector,
camera_matrix, dist_coeffs)
# for p in image_points:
# cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
if (p2[0] < face_center_top[i][0]):
text = 'RIGHT'
else:
text = 'LEFT'
cv2.putText(image, text, p2, cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
cv2.line(image, p1, p2, (255, 0, 0), 2)
# saving the image
new_file = os.path.join(new_dir, im)
cv2.imwrite(new_file, image)
face_count_response += 1
# create a response object for the image
pose_response = {}
pose_response["directory"] = directory
pose_response["image"] = im
pose_response["label"] = text
pose_response_list.append(pose_response)
else:
print('No faces found')
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
# returning the static path
STATIC_POSE = os.path.join(BASE_DIR, "assets\\FirstApp\\pose")
STATIC_SPEC = os.path.join(STATIC_POSE, "{}".format(directory))
# if no images were created
if (face_count_response < 1):
shutil.rmtree(new_dir)
return "No faces were found"
return pose_response_list
\ No newline at end of file
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import cv2
import os
import math
import shutil
from . import custom_sorter as cs
def get_pose_estimations(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# face_classifier = cv2.CascadeClassifier(
# os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
# classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
# classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
# label = emotion_recognition(classifier, face_classifier, image)
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = cs.custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = cs.custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# calculate pose estimations for a student
def calculate_pose_estimation_for_student(video_name, student, poses):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
POSE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\poses")
POSE_VIDEO_DIR = os.path.join(POSE_DIR, video_name)
pose_count = 0
# checking whether the pose directory
if os.path.isdir(POSE_VIDEO_DIR) == False:
# create the pose directory
os.mkdir(POSE_VIDEO_DIR)
# loop through each frame of the directory
for frame in os.listdir(VIDEO_DIR):
FRAME_FOLDER = os.path.join(VIDEO_DIR, frame)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# detection image
detection_img = cv2.imread(DETECTION_PATH)
# checking for the given student
if detection == student:
# select the correct pose detection
pose = poses[pose_count]
# extract the coordinates
x1 = int(pose['keypoints'][5]['position']['x'])
y1 = int(pose['keypoints'][5]['position']['y'])
x2 = int(pose['keypoints'][6]['position']['x'])
y2 = int(pose['keypoints'][6]['position']['y'])
# extract the head positions
x_diff = x1 - x2
y_diff = y1 - y2
x_pow = math.pow(x_diff, 2)
y_pow = math.pow(y_diff, 2)
summation = x_pow + y_pow
distance = int(math.sqrt(summation))
# defining the hyperparameter
param = 0.6
fraction = int(math.floor(distance * param)) if int(math.floor(distance * param)) > 0 else 1
middle_x = x2 + fraction
# middle_y = y2 - 20
middle_y = y2
head_x = middle_x
head_y = 0 if (middle_y - fraction) < 0 else (middle_y - fraction)
left_upper_x = 0 if (middle_x - fraction) < 0 else (middle_x - fraction)
# extract the new image
new_img = detection_img[head_y:head_y+fraction, left_upper_x:left_upper_x+distance]
# new directory name
# new_img_dir = os.path.join(POSE_VIDEO_DIR, frame)
new_img_dir = os.path.join(POSE_VIDEO_DIR, detection)
# check if the directory exists
if os.path.isdir(new_img_dir) == False:
# create the new directory
os.mkdir(new_img_dir)
# create new image name
frame_name = frame + ".png"
new_img_path = os.path.join(new_img_dir, frame_name)
# saving the new image
cv2.imwrite(new_img_path, new_img)
# increment the count
pose_count += 1
print('saving the image')
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
return au_corr[0:n]
from rest_framework import serializers
from .models import Teachers, RegisterUser
from .MongoModels import *
from .logic import classes
from . models import VideoMeta
"""
class TeachersSerializer(serializers.ModelSerializer):
This file is responsible for implementing the serializer classes for each model classes
class Meta:
model = Teachers
fields = ('firstName', 'lastName')
# fields = __all__
each serializer class extended by the djangorestframework's serializers class
class RegisterUserSerializer(serializers.ModelSerializer):
there should be an inner class named "Meta" that needs to implemented inside each serializer class
class Meta:
model = RegisterUser
fields = ('firstName', 'lastName', 'email', 'password')
there are two fields inside "Meta" class, as follows.
model: the relevant model class that needs to be serialized
fields: fields that need to be displayed when serializing the model class
('__all__' indicates that all the fields are required to be displayed)
"""
# image serializer
class ImageSerializer(serializers.Serializer):
metaData = serializers.CharField()
# image serializer
class PoseSerializer(serializers.Serializer):
directory = serializers.CharField()
image_name = serializers.CharField()
text = serializers.CharField()
from rest_framework import serializers
from .MongoModels import *
from . models import VideoMeta
# lecture serializer
......
......@@ -234,9 +234,9 @@
//fetch the video time landmark details
fetch('http://127.0.0.1:8000/get-lecture-video-summary-time-landmarks/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => assignTimeLandmarks(out.response))
.catch((err) => alert('error: ' + err));
.then((res) => res.json())
.then((out) => assignTimeLandmarks(out.response))
.catch((err) => alert('error: ' + err));
//display the progress bar area
......@@ -251,7 +251,6 @@
}
//this function will handle the activity 'summary' button
$('#activity_summary_btn').click(function (e) {
......@@ -260,9 +259,9 @@
//fetch the activity summary details
fetch('http://127.0.0.1:8000/get-lecture-activity-summary/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => activityFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
.then((res) => res.json())
.then((out) => activityFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
});
......@@ -275,9 +274,9 @@
//fetch the activity summary details
fetch('http://127.0.0.1:8000/get-lecture-emotion-summary/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => emotionFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
.then((res) => res.json())
.then((out) => emotionFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
});
......@@ -288,13 +287,12 @@
//fetch the activity summary details
fetch('http://127.0.0.1:8000/get-lecture-gaze-summary/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => gazeFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
.then((res) => res.json())
.then((out) => gazeFrameGroupPercentages(out, e))
.catch((err) => alert('error: ' + err));
});
//this function will handle the retrieved activity frame group percentages
function activityFrameGroupPercentages(response, e) {
......@@ -357,7 +355,6 @@
}
//this function will call the activity chart function
function renderActivityChart(activity_labels) {
......@@ -486,7 +483,6 @@
}
var chart = new CanvasJS.Chart("EmotionChartContainer", {
animationEnabled: true,
theme: "light2",
......@@ -570,7 +566,6 @@
}
var chart = new CanvasJS.Chart("GazeChartContainer", {
animationEnabled: true,
theme: "light2",
......@@ -609,7 +604,6 @@
}
//this function will render the chart for Activity statistics
function renderActivityStatistics() {
......@@ -626,7 +620,6 @@
];
for (let i = 0; i < label_length; i++) {
let label = activity_labels[i];
......@@ -634,7 +627,7 @@
for (let j = 0; j < activity_length; j++) {
let activity = individual_activities[j];
datapoints.push({label: "lecture " + (j+1), y: activity[label]});
datapoints.push({label: "lecture " + (j + 1), y: activity[label]});
}
......@@ -644,7 +637,7 @@
name: label,
markerType: "square",
{#xValueFormatString: "DD MMM, YYYY",#}
xValueFormatString: "lec " + (i+1),
xValueFormatString: "lec " + (i + 1),
color: getRandomColor(),
dataPoints: datapoints
};
......@@ -714,7 +707,7 @@
for (let j = 0; j < emotion_length; j++) {
let emotion = individual_emotions[j];
datapoints.push({label: "lecture " + (j+1), y: emotion[label]});
datapoints.push({label: "lecture " + (j + 1), y: emotion[label]});
}
let obj = {
......@@ -723,7 +716,7 @@
name: label,
markerType: "square",
{#xValueFormatString: "DD MMM, YYYY",#}
xValueFormatString: "Lec " + (i+1),
xValueFormatString: "Lec " + (i + 1),
color: colors[i - 1],
dataPoints: datapoints
};
......@@ -740,7 +733,7 @@
axisX: {
title: "Lecture",
{#valueFormatString: "DD MMM",#}
valueFormatString: "lec" ,
valueFormatString: "lec",
crosshair: {
enabled: true,
snapToDataPoint: true
......@@ -792,7 +785,7 @@
for (let j = 0; j < gaze_estimation_length; j++) {
let gaze_estimation = individual_gaze_estimations[j];
datapoints.push({label: "lecture " + (j+1), y: gaze_estimation[label]});
datapoints.push({label: "lecture " + (j + 1), y: gaze_estimation[label]});
}
let obj = {
......@@ -801,7 +794,7 @@
name: label,
markerType: "square",
{#xValueFormatString: "DD MMM, YYYY",#}
xValueFormatString: "Lec " + (i+1),
xValueFormatString: "Lec " + (i + 1),
color: colors[i - 1],
dataPoints: datapoints
};
......@@ -818,7 +811,7 @@
axisX: {
title: "Lecture",
{#valueFormatString: "DD MMM",#}
valueFormatString: "lec" ,
valueFormatString: "lec",
crosshair: {
enabled: true,
snapToDataPoint: true
......@@ -862,7 +855,7 @@
$('#student_behavior_view_summary_modal').modal();
});
});
//this function will handle the view summary option form
$('#view_summary_option_form').submit(function (e) {
......@@ -965,6 +958,235 @@
}
//this function will handle the advanced analysis for activity
$('#activity_advanced_btn').click(function () {
$('#activity_advanced_modal').modal();
//enable the loader
$('#activity_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-activity-correlations/?lecturer=' + lecturer + '&option=' + option)
.then((res) => res.json())
.then((out) => displayActivityCorrelations(out.correlations))
.catch((err) => alert('error: ' + err));
});
//this function will handle the advanced analysis for emotion
$('#emotion_advanced_btn').click(function () {
$('#emotion_advanced_modal').modal();
//enable the loader
$('#emotion_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-emotion-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayEmotionCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this function will handle the advanced analysis for gaze
$('#gaze_advanced_btn').click(function () {
$('#gaze_advanced_modal').modal();
//enable the loader
$('#gaze_corr_loader').attr('hidden', false);
let lecturer = "{{ lecturer }}";
let option = $("input[name='option']:checked").val();
//fetch the correlation data
fetch('http://127.0.0.1:8000/get-gaze-correlations/?lecturer=' + lecturer + "&option=" + option)
.then((res) => res.json())
.then((out) => displayGazeCorrelations(out.correlations))
.catch((err) => alert('err: ' + err));
});
//this method will display the activity correlations in a table
function displayActivityCorrelations(correlations) {
let htmlString = "";
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#activity_corr_tbody').append(htmlString);
//hide the loader
$('#activity_corr_loader').hide();
//show the table
$('#activity_corr_table').attr('hidden', false);
}
//this method will display the emotion correlations in a table
function displayEmotionCorrelations(correlations) {
let htmlString = "";
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#emotion_corr_tbody').append(htmlString);
//hide the loader
$('#emotion_corr_loader').hide();
//show the table
$('#emotion_corr_table').attr('hidden', false);
}
//this method will display the activity correlations in a table
function displayGazeCorrelations(correlations) {
let htmlString = "";
//create the html content for the activity correlation table
for (let i = 0; i < correlations.length; i++) {
let corr = correlations[i];
let indices = corr.index;
let value = corr.value;
value = Math.round(value * 100, 1);
if (value <= 100 && value > 80) {
htmlString += "<tr class='bg-success text-white'>";
}
else if (value <= 80 && value > 60) {
htmlString += "<tr class='bg-primary text-white'>";
}
else if (value <= 60 && value > 40) {
htmlString += "<tr class='bg-warning text-white'>";
}
else if (value <= 40 && value > 20) {
htmlString += "<tr class='bg-danger text-white'>";
}
else if (value <= 20 && value > 0) {
htmlString += "<tr class='bg-dark text-white'>";
}
//create a <tr> to be inserted
htmlString += "<td>";
htmlString += indices[0];
htmlString += "</td>";
htmlString += "<td>";
htmlString += indices[1];
htmlString += "</td>";
htmlString += "<td>";
htmlString += value;
htmlString += "</td>";
htmlString += "</tr>";
}
//append to the <tbody>
$('#gaze_corr_tbody').append(htmlString);
//hide the loader
$('#gaze_corr_loader').hide();
//show the table
$('#gaze_corr_table').attr('hidden', false);
}
});
</script>
......@@ -1189,6 +1411,13 @@
</button>
</div>
<!-- end of stats button -->
<!-- button to view advanced analysis -->
<button type="button" class="btn btn-danger float-right mr-2"
id="activity_advanced_btn">
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div>
</div>
<!-- end of Activity card -->
......@@ -1264,6 +1493,14 @@
</button>
</div>
<!-- end of stats button -->
<!-- button to view advanced analysis -->
<button type="button" class="btn btn-danger float-right mr-2"
id="emotion_advanced_btn">
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div>
</div>
......@@ -1333,6 +1570,14 @@
</button>
</div>
<!-- end of stats button -->
<!-- button to view advanced analysis -->
<button type="button" class="btn btn-danger float-right mr-2"
id="gaze_advanced_btn">
Advanced Analysis
</button>
<!-- end of button to view advanced analysis -->
</div>
</div>
......@@ -1400,10 +1645,13 @@
<hr>
<!-- button to view activity summary -->
<button type="button" class="btn btn-primary float-right" id="activity_summary_btn">
<button type="button" class="btn btn-primary float-right"
id="activity_summary_btn">
Summary
</button>
<!-- end of button to view activity summary -->
</li>
<!-- end of the activity list item -->
......@@ -1469,10 +1717,13 @@
<hr>
<!-- button to view emotion summary -->
<button type="button" class="btn btn-primary float-right" id="emotion_summary_btn">
<button type="button" class="btn btn-primary float-right"
id="emotion_summary_btn">
Summary
</button>
<!-- end of button to view emotion summary -->
</li>
<!-- end of the emotion list item -->
......@@ -1513,7 +1764,8 @@
<span class="float-right" id="looking_down_right_perct">50%</span>
<div class="progress mb-4">
<div class="progress-bar bg-success" role="progressbar" id="looking_down_right_width"
<div class="progress-bar bg-success" role="progressbar"
id="looking_down_right_width"
style="width: 60%"
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
......@@ -1544,7 +1796,8 @@
<!-- button to view gaze summary -->
<button type="button" class="btn btn-primary float-right" id="gaze_summary_btn">
<button type="button" class="btn btn-primary float-right"
id="gaze_summary_btn">
Summary
</button>
<!-- end of button to view gaze summary -->
......@@ -1554,7 +1807,6 @@
<!-- end of the gaze list item -->
</ul>
......@@ -1884,7 +2136,8 @@
</div>
<div class="custom-control custom-radio mt-2">
<input type="radio" class="custom-control-input" id="customRadio3" name="option" value="10000">
<input type="radio" class="custom-control-input" id="customRadio3" name="option"
value="10000">
<label class="custom-control-label" for="customRadio3">All</label>
</div>
......@@ -1955,7 +2208,8 @@
<!-- gaze estimation Modal-->
<div class="modal fade" id="gaze_estimation_stats_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
<div class="modal fade" id="gaze_estimation_stats_modal" tabindex="-1" role="dialog"
aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 1400px">
<div class="modal-content">
......@@ -1977,6 +2231,139 @@
<!-- end of activity statistics modal -->
<!-- activity advanced analysis modal -->
<div class="modal fade" id="activity_advanced_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Activity Advanced Analysis</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Activity VS. Lecturer Activity</h3>
<!-- ajax loader -->
<div class="text-center" id="activity_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="activity_corr_table" hidden>
<thead>
<tr>
<th>Student Activity</th>
<th>Lecturer Activity</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="activity_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of activity advanced analysis modal -->
<!-- emotion advanced analysis modal -->
<div class="modal fade" id="emotion_advanced_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Emotion Advanced Analysis</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Emotions VS. Lecturer Activity</h3>
<!-- ajax loader -->
<div class="text-center" id="emotion_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="emotion_corr_table" hidden>
<thead>
<tr>
<th>Student Emotion</th>
<th>Lecturer Activity</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="emotion_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of emotion advanced analysis modal -->
<!-- gaze advanced analysis modal -->
<div class="modal fade" id="gaze_advanced_modal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document" style="max-width: 700px">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Gaze Advanced Analysis</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<h3 class="font-weight-bold">Student Gaze estimation VS. Lecturer Activity</h3>
<!-- ajax loader -->
<div class="text-center" id="gaze_corr_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!-- correlation table -->
<table class="table table-striped" id="gaze_corr_table" hidden>
<thead>
<tr>
<th>Student Gaze estimation</th>
<th>Lecturer Activity</th>
<th>Correlation Score</th>
</tr>
</thead>
<tbody id="gaze_corr_tbody">
</tbody>
</table>
<!-- end of correlation table -->
</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
</div>
</div>
</div>
</div>
<!-- end of gaze advanced analysis modal -->
{% endblock %}
<!--scripts-->
{% block 'scripts' %}
......
......@@ -30,7 +30,11 @@
var global_video_name = '';
var global_lecturer_subject_index = 0;
var global_lecture_date = '';
var global_lecturer_video_name = '';
var lecturer_fps = 0;
;
//jquery
$(document).ready(function () {
......@@ -242,7 +246,16 @@
//assign the video src
$('#student_video').attr('src', video_src);
$('#integrate_modal').modal();
//fetch the lecture recorded video name
fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
{#global_lecturer_video_name = "Test_3.mp4";#}
//fetch data from the API
......@@ -253,6 +266,21 @@
});
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
}
//this function will load the activity recognition for frames
function displayActivityRecognitionForFrame(response) {
//hide the loader
......@@ -302,25 +330,91 @@
//append the html
$('#student_video_column').append(htmlString);
//start retrieving lecturer activity frame recognition
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json())
.then((out) => displayLecturerActivityRecognitionForFrame(out))
.catch((err) => alert('error: ' + err))
}
//this function will load the activity recognition for frames
function displayLecturerActivityRecognitionForFrame(response) {
//hide the loader
$('#lecturer_video_progress_loader').attr('hidden', true);
//show the progress bars
$('#lecturer_video_progress').attr('hidden', false);
//creating the html string
let htmlString = "";
let duration = 1000 / response.fps;
lecturer_fps = Math.round(duration, 0);
console.log('lecturer fps: ', lecturer_fps);
//creating the html string, iteratively
response.frame_recognitions.map((frame) => {
let frame_name = frame.frame_name;
let sitting_perct = Math.round(frame.sitting_perct, 0);
let standing_perct = Math.round(frame.standing_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let walking_perct = Math.round(frame.walking_perct, 0);
//append to the html string
//sitting
htmlString += "<div class='progress_area' id='progress_lecturer_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Sitting</h4>";
htmlString += "<span class='float-right' id='sitting_instant_" + frame_name + "'>" + sitting_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_" + frame_name + "' style='width: " + sitting_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//standing
htmlString += "<h4 class='small font-weight-bold'>Standing</h4>";
htmlString += "<span class='float-right' id='standing_instant_" + frame_name + "'>" + standing_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='standing_instant_value_" + frame_name + "' style='width: " + standing_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//walking
htmlString += "<h4 class='small font-weight-bold'>Walking</h4>";
htmlString += "<span class='float-right' id='walking_instant_" + frame_name + "'>" + walking_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_" + frame_name + "' style='width: " + walking_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//ending the progress area
htmlString += "</div>";
});
//append the html
$('#lecturer_video_column').append(htmlString);
}
//to handle the 'integrate' play button
$('#play_integrate_button').click(function () {
let video = $('video')[0];
let video1 = $('video')[1];
let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause';
let count = 0;
let count_lecturer = 0;
let classes = $(this).attr('class');
let video_interval = setInterval(() => {
let talking_number = Math.round(Math.random() * 100, 0);
let phone_number = Math.round(Math.random() * 100, 0);
let note_number = Math.round(Math.random() * 100, 0);
let listening_number = Math.round(Math.random() * 100, 0);
//=====STUDENTS COLUMN=====
//get the relevant progress area
let progress_area = "progress_frame-" + count;
......@@ -335,35 +429,53 @@
//replace the current progress area with the selected one
$('#student_video_progress').html(progress_area_html);
//increment the count
count++;
//setting the values
/*
$('#talking_instant').text(talking_number + '%');
$('#phone_checking_instant').text(phone_number + '%');
$('#note_taking_instant').text(note_number + '%');
$('#listening_instant').text(listening_number + '%');
console.log('current frame (student): ', count);
//setting the width
$('#talking_instant_value').width(talking_number + '%');
$('#phone_checking_instant_value').width(phone_number + '%');
$('#note_taking_instant_value').width(note_number + '%');
$('#listening_instant_value').width(listening_number + '%');
*/
}, 33);
let video_interval_lecturer = setInterval(() => {
//=====LECTURER COLUMN=====
//get the relevant progress area
let progress_area_lecturer = "progress_lecturer_frame-" + count_lecturer;
let progress_area_id_lecturer = "#" + progress_area_lecturer;
//find the corresponding progress area
let progress_area_html_lecturer = document.getElementById(progress_area_lecturer);
//display the retrieved progress area
$(progress_area_id_lecturer).attr('hidden', false);
//replace the current progress area with the selected one
$('#lecturer_video_progress').html(progress_area_html_lecturer);
//increment the count
count_lecturer++;
console.log('current frame (lecturer): ', count_lecturer);
}, lecturer_fps);
//check for the current class
if (classes === play_class) {
$(this).text('Pause');
$(this).attr('class', pause_class);
video.play();
video1.play();
} else if (classes === pause_class) {
$(this).text('Play');
$(this).attr('class', play_class);
video.pause();
video1.pause();
}
//function to do when the video is paused
......@@ -373,7 +485,13 @@
video.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval);
}
};
//function to do when the lecturer video is ended
video1.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval_lecturer);
};
});
......@@ -422,11 +540,11 @@
{% load static %}
<!-- Page Heading -->
{# <div class="d-sm-flex align-items-center justify-content-between mb-4">#}
{# <h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>#}
{# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#}
{# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#}
{# </div>#}
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Student Activity Recognition</h1>
{# <button type="button" data-target="#generateReportModal" data-toggle="modal" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm" id="generate_report_before" disabled><i#}
{# class="fas fa-download fa-sm text-white-50"></i> Generate Report</button>#}
</div>
<!--first row -->
......@@ -633,7 +751,6 @@
</div>
</div>
......@@ -1020,24 +1137,74 @@
<!--end of 1st column -->
<!--2nd column -->
<div class="col-md-6">
<div class="col-md-6" id="lecturer_video_column">
<div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div>
<!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section">
<!--temporary text -->
<div class="text-center" id="temp_lecturer_text">
<span class="font-italic">No video was found</span>
{# <!--temporary text -->#}
{# <div class="text-center" id="temp_lecturer_text">#}
{# <span class="font-italic">No video was found</span>#}
{# </div>#}
<!--display lecturer video -->
<div class="text-center m-3" id="lecturer_video_section">
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
</div>
<!--end of lecturer video section -->
<!-- ajax loader section -->
<div class="text-center mt-3" id="lecturer_video_progress_loader">
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
</div>
<!--progress bar section -->
<div class="progress_area" id="lecturer_video_progress" hidden>
<!--sitting -->
<h4 class="small font-weight-bold">Sitting</h4>
<span class="float-right" id="sitting_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="sitting_instant_value"
{# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--standing -->
<h4 class="small font-weight-bold">Standing</h4>
<span class="float-right" id="standing_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar"
id="standing_instant_value"
{# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--walking-->
<h4 class="small font-weight-bold">Walking</h4>
<span class="float-right" id="walking_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="walking_instant_value"
{# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
<!-- video -->
{# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div>
<!--end of lecture video section -->
......
<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" href="style.css">
<title>{% block title %}My amazing site{% endblock %}</title>
</head>
<body>
<div id="sidebar">
{% block sidebar %}
<ul>
<li><a href="/">Home</a></li>
<li><a href="/blog/">Blog</a></li>
</ul>
{% endblock %}
</div>
<div id="content">
{% block content %}{% endblock %}
</div>
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SB Admin 2 - Blank</title>
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
</head>
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Sidebar -->
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="index.html">
<div class="sidebar-brand-icon rotate-n-15">
<i class="fas fa-laugh-wink"></i>
</div>
<div class="sidebar-brand-text mx-3">SB Admin <sup>2</sup></div>
</a>
<!-- Divider -->
<hr class="sidebar-divider my-0">
<!-- Nav Item - Dashboard -->
<li class="nav-item">
<a class="nav-link" href="index.html">
<i class="fas fa-fw fa-tachometer-alt"></i>
<span>Dashboard</span></a>
</li>
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
Interface
</div>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
<i class="fas fa-fw fa-cog"></i>
<span>Components</span>
</a>
<div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Custom Components:</h6>
<a class="collapse-item" href="gaze.html">Buttons</a>
<a class="collapse-item" href="pose.html">Cards</a>
</div>
</div>
</li>
<!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities" aria-expanded="true" aria-controls="collapseUtilities">
<i class="fas fa-fw fa-wrench"></i>
<span>Utilities</span>
</a>
<div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Custom Utilities:</h6>
<a class="collapse-item" href="utilities-color.html">Colors</a>
<a class="collapse-item" href="utilities-border.html">Borders</a>
<a class="collapse-item" href="utilities-animation.html">Animations</a>
<a class="collapse-item" href="utilities-other.html">Other</a>
</div>
</div>
</li>
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
Addons
</div>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item active">
<a class="nav-link" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">
<i class="fas fa-fw fa-folder"></i>
<span>Pages</span>
</a>
<div id="collapsePages" class="collapse show" aria-labelledby="headingPages" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Login Screens:</h6>
<a class="collapse-item" href="login.html">Login</a>
<a class="collapse-item" href="register.html">Register</a>
<a class="collapse-item" href="forgot-password.html">Forgot Password</a>
<div class="collapse-divider"></div>
<h6 class="collapse-header">Other Pages:</h6>
<a class="collapse-item" href="404.html">404 Page</a>
<a class="collapse-item active" href="blank.html">Blank Page</a>
</div>
</div>
</li>
<!-- Nav Item - Charts -->
<li class="nav-item">
<a class="nav-link" href="charts.html">
<i class="fas fa-fw fa-chart-area"></i>
<span>Charts</span></a>
</li>
<!-- Nav Item - Tables -->
<li class="nav-item">
<a class="nav-link" href="tables.html">
<i class="fas fa-fw fa-table"></i>
<span>Tables</span></a>
</li>
<!-- Divider -->
<hr class="sidebar-divider d-none d-md-block">
<!-- Sidebar Toggler (Sidebar) -->
<div class="text-center d-none d-md-inline">
<button class="rounded-circle border-0" id="sidebarToggle"></button>
</div>
</ul>
<!-- End of Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Topbar -->
<nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow">
<!-- Sidebar Toggle (Topbar) -->
<button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3">
<i class="fa fa-bars"></i>
</button>
<!-- Topbar Search -->
<form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form>
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
<li class="nav-item dropdown no-arrow d-sm-none">
<a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-search fa-fw"></i>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">
<form class="form-inline mr-auto w-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form>
</div>
</li>
<!-- Nav Item - Alerts -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-bell fa-fw"></i>
<!-- Counter - Alerts -->
<span class="badge badge-danger badge-counter">3+</span>
</a>
<!-- Dropdown - Alerts -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">
<h6 class="dropdown-header">
Alerts Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-primary">
<i class="fas fa-file-alt text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 12, 2019</div>
<span class="font-weight-bold">A new monthly report is ready to download!</span>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-success">
<i class="fas fa-donate text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 7, 2019</div>
$290.29 has been deposited into your account!
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-warning">
<i class="fas fa-exclamation-triangle text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 2, 2019</div>
Spending Alert: We've noticed unusually high spending for your account.
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>
</div>
</li>
<!-- Nav Item - Messages -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-envelope fa-fw"></i>
<!-- Counter - Messages -->
<span class="badge badge-danger badge-counter">7</span>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">
<h6 class="dropdown-header">
Message Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">
<div class="status-indicator bg-success"></div>
</div>
<div class="font-weight-bold">
<div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>
<div class="small text-gray-500">Emily Fowler · 58m</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">
<div class="status-indicator"></div>
</div>
<div>
<div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>
<div class="small text-gray-500">Jae Chun · 1d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">
<div class="status-indicator bg-warning"></div>
</div>
<div>
<div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>
<div class="small text-gray-500">Morgan Alvarez · 2d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">
<div class="status-indicator bg-success"></div>
</div>
<div>
<div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>
<div class="small text-gray-500">Chicken the Dog · 2w</div>
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>
</div>
</li>
<div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mr-2 d-none d-lg-inline text-gray-600 small">Valerie Luna</span>
<img class="img-profile rounded-circle" src="https://source.unsplash.com/QAB-WJcbgJk/60x60">
</a>
<!-- Dropdown - User Information -->
<div class="dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="userDropdown">
<a class="dropdown-item" href="#">
<i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i>
Profile
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i>
Settings
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i>
Activity Log
</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="#" data-toggle="modal" data-target="#logoutModal">
<i class="fas fa-sign-out-alt fa-sm fa-fw mr-2 text-gray-400"></i>
Logout
</a>
</div>
</li>
</ul>
</nav>
<!-- End of Topbar -->
<!-- Begin Page Content -->
<div class="container-fluid">
<!-- Page Heading -->
<h1 class="h3 mb-4 text-gray-800">Blank Page</h1>
</div>
<!-- /.container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
<!DOCTYPE html>
<html lang="en">
<head>
{% load static %}
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SB Admin 2 - Charts</title>
<!-- Custom fonts for this template-->
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<!-- Custom styles for this template-->
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Sidebar -->
<ul class="navbar-nav bg-gradient-primary sidebar sidebar-dark accordion" id="accordionSidebar">
<!-- Sidebar - Brand -->
<a class="sidebar-brand d-flex align-items-center justify-content-center" href="index.html">
<div class="sidebar-brand-icon rotate-n-15">
<i class="fas fa-laugh-wink"></i>
</div>
<div class="sidebar-brand-text mx-3">SB Admin <sup>2</sup></div>
</a>
<!-- Divider -->
<hr class="sidebar-divider my-0">
<!-- Nav Item - Dashboard -->
<li class="nav-item">
<a class="nav-link" href="index.html">
<i class="fas fa-fw fa-tachometer-alt"></i>
<span>Dashboard</span></a>
</li>
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
Interface
</div>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="true" aria-controls="collapseTwo">
<i class="fas fa-fw fa-cog"></i>
<span>Components</span>
</a>
<div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Custom Components:</h6>
<a class="collapse-item" href="gaze.html">Buttons</a>
<a class="collapse-item" href="pose.html">Cards</a>
</div>
</div>
</li>
<!-- Nav Item - Utilities Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapseUtilities" aria-expanded="true" aria-controls="collapseUtilities">
<i class="fas fa-fw fa-wrench"></i>
<span>Utilities</span>
</a>
<div id="collapseUtilities" class="collapse" aria-labelledby="headingUtilities" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Custom Utilities:</h6>
<a class="collapse-item" href="utilities-color.html">Colors</a>
<a class="collapse-item" href="utilities-border.html">Borders</a>
<a class="collapse-item" href="utilities-animation.html">Animations</a>
<a class="collapse-item" href="utilities-other.html">Other</a>
</div>
</div>
</li>
<!-- Divider -->
<hr class="sidebar-divider">
<!-- Heading -->
<div class="sidebar-heading">
Addons
</div>
<!-- Nav Item - Pages Collapse Menu -->
<li class="nav-item">
<a class="nav-link collapsed" href="#" data-toggle="collapse" data-target="#collapsePages" aria-expanded="true" aria-controls="collapsePages">
<i class="fas fa-fw fa-folder"></i>
<span>Pages</span>
</a>
<div id="collapsePages" class="collapse" aria-labelledby="headingPages" data-parent="#accordionSidebar">
<div class="bg-white py-2 collapse-inner rounded">
<h6 class="collapse-header">Login Screens:</h6>
<a class="collapse-item" href="login.html">Login</a>
<a class="collapse-item" href="register.html">Register</a>
<a class="collapse-item" href="forgot-password.html">Forgot Password</a>
<div class="collapse-divider"></div>
<h6 class="collapse-header">Other Pages:</h6>
<a class="collapse-item" href="404.html">404 Page</a>
<a class="collapse-item" href="blank.html">Blank Page</a>
</div>
</div>
</li>
<!-- Nav Item - Charts -->
<li class="nav-item active">
<a class="nav-link" href="charts.html">
<i class="fas fa-fw fa-chart-area"></i>
<span>Charts</span></a>
</li>
<!-- Nav Item - Tables -->
<li class="nav-item">
<a class="nav-link" href="tables.html">
<i class="fas fa-fw fa-table"></i>
<span>Tables</span></a>
</li>
<!-- Divider -->
<hr class="sidebar-divider d-none d-md-block">
<!-- Sidebar Toggler (Sidebar) -->
<div class="text-center d-none d-md-inline">
<button class="rounded-circle border-0" id="sidebarToggle"></button>
</div>
</ul>
<!-- End of Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Topbar -->
<nav class="navbar navbar-expand navbar-light bg-white topbar mb-4 static-top shadow">
<!-- Sidebar Toggle (Topbar) -->
<button id="sidebarToggleTop" class="btn btn-link d-md-none rounded-circle mr-3">
<i class="fa fa-bars"></i>
</button>
<!-- Topbar Search -->
<form class="d-none d-sm-inline-block form-inline mr-auto ml-md-3 my-2 my-md-0 mw-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form>
<!-- Topbar Navbar -->
<ul class="navbar-nav ml-auto">
<!-- Nav Item - Search Dropdown (Visible Only XS) -->
<li class="nav-item dropdown no-arrow d-sm-none">
<a class="nav-link dropdown-toggle" href="#" id="searchDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-search fa-fw"></i>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-menu dropdown-menu-right p-3 shadow animated--grow-in" aria-labelledby="searchDropdown">
<form class="form-inline mr-auto w-100 navbar-search">
<div class="input-group">
<input type="text" class="form-control bg-light border-0 small" placeholder="Search for..." aria-label="Search" aria-describedby="basic-addon2">
<div class="input-group-append">
<button class="btn btn-primary" type="button">
<i class="fas fa-search fa-sm"></i>
</button>
</div>
</div>
</form>
</div>
</li>
<!-- Nav Item - Alerts -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="alertsDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-bell fa-fw"></i>
<!-- Counter - Alerts -->
<span class="badge badge-danger badge-counter">3+</span>
</a>
<!-- Dropdown - Alerts -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="alertsDropdown">
<h6 class="dropdown-header">
Alerts Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-primary">
<i class="fas fa-file-alt text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 12, 2019</div>
<span class="font-weight-bold">A new monthly report is ready to download!</span>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-success">
<i class="fas fa-donate text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 7, 2019</div>
$290.29 has been deposited into your account!
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="mr-3">
<div class="icon-circle bg-warning">
<i class="fas fa-exclamation-triangle text-white"></i>
</div>
</div>
<div>
<div class="small text-gray-500">December 2, 2019</div>
Spending Alert: We've noticed unusually high spending for your account.
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Show All Alerts</a>
</div>
</li>
<!-- Nav Item - Messages -->
<li class="nav-item dropdown no-arrow mx-1">
<a class="nav-link dropdown-toggle" href="#" id="messagesDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<i class="fas fa-envelope fa-fw"></i>
<!-- Counter - Messages -->
<span class="badge badge-danger badge-counter">7</span>
</a>
<!-- Dropdown - Messages -->
<div class="dropdown-list dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="messagesDropdown">
<h6 class="dropdown-header">
Message Center
</h6>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/fn_BT9fwg_E/60x60" alt="">
<div class="status-indicator bg-success"></div>
</div>
<div class="font-weight-bold">
<div class="text-truncate">Hi there! I am wondering if you can help me with a problem I've been having.</div>
<div class="small text-gray-500">Emily Fowler · 58m</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/AU4VPcFN4LE/60x60" alt="">
<div class="status-indicator"></div>
</div>
<div>
<div class="text-truncate">I have the photos that you ordered last month, how would you like them sent to you?</div>
<div class="small text-gray-500">Jae Chun · 1d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/CS2uCrpNzJY/60x60" alt="">
<div class="status-indicator bg-warning"></div>
</div>
<div>
<div class="text-truncate">Last month's report looks great, I am very happy with the progress so far, keep up the good work!</div>
<div class="small text-gray-500">Morgan Alvarez · 2d</div>
</div>
</a>
<a class="dropdown-item d-flex align-items-center" href="#">
<div class="dropdown-list-image mr-3">
<img class="rounded-circle" src="https://source.unsplash.com/Mv9hjnEUHR4/60x60" alt="">
<div class="status-indicator bg-success"></div>
</div>
<div>
<div class="text-truncate">Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren't good...</div>
<div class="small text-gray-500">Chicken the Dog · 2w</div>
</div>
</a>
<a class="dropdown-item text-center small text-gray-500" href="#">Read More Messages</a>
</div>
</li>
<div class="topbar-divider d-none d-sm-block"></div>
<!-- Nav Item - User Information -->
<li class="nav-item dropdown no-arrow">
<a class="nav-link dropdown-toggle" href="#" id="userDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
<span class="mr-2 d-none d-lg-inline text-gray-600 small">Valerie Luna</span>
<img class="img-profile rounded-circle" src="https://source.unsplash.com/QAB-WJcbgJk/60x60">
</a>
<!-- Dropdown - User Information -->
<div class="dropdown-menu dropdown-menu-right shadow animated--grow-in" aria-labelledby="userDropdown">
<a class="dropdown-item" href="#">
<i class="fas fa-user fa-sm fa-fw mr-2 text-gray-400"></i>
Profile
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-cogs fa-sm fa-fw mr-2 text-gray-400"></i>
Settings
</a>
<a class="dropdown-item" href="#">
<i class="fas fa-list fa-sm fa-fw mr-2 text-gray-400"></i>
Activity Log
</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="#" data-toggle="modal" data-target="#logoutModal">
<i class="fas fa-sign-out-alt fa-sm fa-fw mr-2 text-gray-400"></i>
Logout
</a>
</div>
</li>
</ul>
</nav>
<!-- End of Topbar -->
<!-- Begin Page Content -->
<div class="container-fluid">
<!-- Page Heading -->
<h1 class="h3 mb-2 text-gray-800">Charts</h1>
<p class="mb-4">Chart.js is a third party plugin that is used to generate the charts in this theme. The charts below have been customized - for further customization options, please visit the <a target="_blank" href="https://www.chartjs.org/docs/latest/">official Chart.js documentation</a>.</p>
<!-- Content Row -->
<div class="row">
<div class="col-xl-8 col-lg-7">
<!-- Area Chart -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">Area Chart</h6>
</div>
<div class="card-body">
<div class="chart-area">
<canvas id="myAreaChart"></canvas>
</div>
<hr>
Styling for the area chart can be found in the <code>/js/demo/chart-area-demo.js</code> file.
</div>
</div>
<!-- Bar Chart -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">Bar Chart</h6>
</div>
<div class="card-body">
<div class="chart-bar">
<canvas id="myBarChart"></canvas>
</div>
<hr>
Styling for the bar chart can be found in the <code>/js/demo/chart-bar-demo.js</code> file.
</div>
</div>
</div>
<!-- Donut Chart -->
<div class="col-xl-4 col-lg-5">
<div class="card shadow mb-4">
<!-- Card Header - Dropdown -->
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">Donut Chart</h6>
</div>
<!-- Card Body -->
<div class="card-body">
<div class="chart-pie pt-4">
<canvas id="myPieChart"></canvas>
</div>
<hr>
Styling for the donut chart can be found in the <code>/js/demo/chart-pie-demo.js</code> file.
</div>
</div>
</div>
</div>
</div>
<!-- /.container-fluid -->
</div>
<!-- End of Main Content -->
<!-- Footer -->
<footer class="sticky-footer bg-white">
<div class="container my-auto">
<div class="copyright text-center my-auto">
<span>Copyright &copy; Your Website 2019</span>
</div>
</div>
</footer>
<!-- End of Footer -->
</div>
<!-- End of Content Wrapper -->
</div>
<!-- End of Page Wrapper -->
<!-- Scroll to Top Button-->
<a class="scroll-to-top rounded" href="#page-top">
<i class="fas fa-angle-up"></i>
</a>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="login.html">Logout</a>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
<!-- Page level plugins -->
<script src="vendor/chart.js/Chart.min.js"></script>
<!-- Page level custom scripts -->
<script src="js/demo/chart-area-demo.js"></script>
<script src="js/demo/chart-pie-demo.js"></script>
<script src="js/demo/chart-bar-demo.js"></script>
</body>
</html>
{% extends "FirstApp/base.html" %}
{% block title %}My amazing blog{% endblock %}
{% block content %}
{% for entry in blog_entries %}
<h2>{{ entry.title }}</h2>
<p>{{ entry.body }}</p>
{% endfor %}
{% endblock %}
\ No newline at end of file
......@@ -29,6 +29,8 @@
var global_lecture_video_id = '';
var global_video_name = '';
var global_lecturer_subject_index = 0;
var global_lecturer_video_name = '';
var lecturer_fps = 0;
//jquery
$(document).ready(function () {
......@@ -239,9 +241,20 @@
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
......@@ -320,25 +333,92 @@
//append the html
$('#student_video_column').append(htmlString);
//start retrieving lecturer activity frame recognition
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err))
}
//this function will load the activity recognition for frames
function displayLecturerEmotionRecognitionForFrame(response) {
//hide the loader
$('#lecturer_video_progress_loader').attr('hidden', true);
//show the progress bars
$('#lecturer_video_progress').attr('hidden', false);
//creating the html string
let htmlString = "";
let duration = 1000 / response.fps;
lecturer_fps = Math.round(duration, 0);
console.log('lecturer fps: ', lecturer_fps);
//creating the html string, iteratively
response.frame_recognitions.map((frame) => {
let frame_name = frame.frame_name;
let sitting_perct = Math.round(frame.sitting_perct, 0);
let standing_perct = Math.round(frame.standing_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let walking_perct = Math.round(frame.walking_perct, 0);
//append to the html string
//sitting
htmlString += "<div class='progress_area' id='progress_lecturer_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Sitting</h4>";
htmlString += "<span class='float-right' id='sitting_instant_" + frame_name + "'>" + sitting_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_" + frame_name + "' style='width: " + sitting_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//standing
htmlString += "<h4 class='small font-weight-bold'>Standing</h4>";
htmlString += "<span class='float-right' id='standing_instant_" + frame_name + "'>" + standing_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='standing_instant_value_" + frame_name + "' style='width: " + standing_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//walking
htmlString += "<h4 class='small font-weight-bold'>Walking</h4>";
htmlString += "<span class='float-right' id='walking_instant_" + frame_name + "'>" + walking_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_" + frame_name + "' style='width: " + walking_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//ending the progress area
htmlString += "</div>";
});
//append the html
$('#lecturer_video_column').append(htmlString);
}
//to handle the 'integrate' play button
$('#play_integrate_button').click(function () {
let video = $('video')[0];
let video1 = $('video')[1];
let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause';
let count = 0;
let count_lecturer = 0;
let classes = $(this).attr('class');
let video_interval = setInterval(() => {
let talking_number = Math.round(Math.random() * 100, 0);
let phone_number = Math.round(Math.random() * 100, 0);
let note_number = Math.round(Math.random() * 100, 0);
let listening_number = Math.round(Math.random() * 100, 0);
//=====STUDENTS COLUMN=====
//get the relevant progress area
let progress_area = "progress_frame-" + count;
......@@ -356,32 +436,49 @@
//increment the count
count++;
//setting the values
/*
$('#talking_instant').text(talking_number + '%');
$('#phone_checking_instant').text(phone_number + '%');
$('#note_taking_instant').text(note_number + '%');
$('#listening_instant').text(listening_number + '%');
//setting the width
$('#talking_instant_value').width(talking_number + '%');
$('#phone_checking_instant_value').width(phone_number + '%');
$('#note_taking_instant_value').width(note_number + '%');
$('#listening_instant_value').width(listening_number + '%');
}, 33);
let video_interval_lecturer = setInterval(() => {
//=====LECTURER COLUMN=====
//get the relevant progress area
let progress_area_lecturer = "progress_lecturer_frame-" + count_lecturer;
let progress_area_id_lecturer = "#" + progress_area_lecturer;
//find the corresponding progress area
let progress_area_html_lecturer = document.getElementById(progress_area_lecturer);
//display the retrieved progress area
$(progress_area_id_lecturer).attr('hidden', false);
//replace the current progress area with the selected one
$('#lecturer_video_progress').html(progress_area_html_lecturer);
//increment the count
count_lecturer++;
console.log('current frame (lecturer): ', count_lecturer);
}, lecturer_fps);
*/
}, 1000);
//check for the current class
if (classes === play_class) {
$(this).text('Pause');
$(this).attr('class', pause_class);
video.play();
video1.play();
} else if (classes === pause_class) {
$(this).text('Play');
$(this).attr('class', play_class);
video.pause();
video1.pause();
}
//function to do when the video is paused
......@@ -391,6 +488,12 @@
video.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval);
};
//function to do when the video is ended
video1.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval_lecturer);
}
});
......@@ -642,7 +745,6 @@
</div>
</div>
......@@ -1072,25 +1174,71 @@
</div>
<!--end of 1st column -->
<!--2nd column -->
<div class="col-md-6">
<!-- 2nd column -->
<div class="col-md-6" id="lecturer_video_column">
<div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div>
<!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section">
<!--temporary text -->
<div class="text-center" id="temp_lecturer_text">
<span class="font-italic">No video was found</span>
<!--display lecturer video -->
<div class="text-center m-3" id="lecturer_video_section">
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
</div>
<!--end of lecturer video section -->
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
<!-- ajax loader section -->
<div class="text-center mt-3" id="lecturer_video_progress_loader">
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
</div>
<!--progress bar section -->
<div class="progress_area" id="lecturer_video_progress" hidden>
<!--sitting -->
<h4 class="small font-weight-bold">Sitting</h4>
<span class="float-right" id="sitting_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="sitting_instant_value"
{# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--standing -->
<h4 class="small font-weight-bold">Standing</h4>
<span class="float-right" id="standing_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar"
id="standing_instant_value"
{# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--walking-->
<h4 class="small font-weight-bold">Walking</h4>
<span class="float-right" id="walking_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="walking_instant_value"
{# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
</div>
<!--end of lecture video section -->
......
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
{% block javascript %}
{% load static %}
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<script type="text/javascript">
const cur_date = new Date().toDateString();
let video_name = '';
$(document).ready(function () {
$('.video_row').click(function () {
video_name = $(this).attr('id');
let video_duration = $(this).attr('data-duration');
let src = "{% static '' %}FirstApp/videos/" + video_name;
//assigning the src
$("video").attr('src', src);
//setting the video details
$('#video_name').text(video_name);
$('#video_duration').text(video_duration);
$('#video_date').text(cur_date);
});
//to handle video extraction button
$('#extractBtn').click(function () {
//run the api
fetch('http://127.0.0.1:8000/videoExtract', {
method: 'POST',
headers: {
"Content-type": "application/json"
},
body: JSON.stringify({
"video_name": video_name
})
})
.then((res) => res.json())
.then((out) => alert(out.response))
.catch((error) => alert('this is an error'));
});
});
</script>
{% endblock %}
<div id="wrapper">
<!-- Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Video Extractor</h1>
</div>
<!--1st row -->
<div class="row p-2">
<!--first column -->
<div class="col-lg-6">
<!-- Main Video Context -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">Main Video</h6>
</div>
<div class="card-body">
<video width="500" height="300" id="first_video" controls>
<source src="{% static '' %}FirstApp/videos/{{ firstVideo.name }}"
type="video/mp4">
Your browser does not support the video tag.
</video>
<table class="table table-borderless table-striped m-1">
<tbody>
<tr>
<td class="font-weight-bold">Name</td>
<td id="video_name">{{ firstVideo.name }}</td>
</tr>
<tr>
<td class="font-weight-bold">Duration</td>
<td id="video_duration">{{ firstVideo.duration }}</td>
</tr>
<tr>
<td class="font-weight-bold">Date</td>
<td id="video_date"></td>
</tr>
</tbody>
</table>
<div class="col-lg-3 p-3">
<button type="button" class="btn btn-outline-primary" id="extractBtn">Extract</button>
</div>
</div>
</div>
</div>
<!--second column -->
<div class="col-lg-6">
<!-- Main Video Context -->
<div class="card shadow mb-4">
<div class="card-header py-3">
<h6 class="m-0 font-weight-bold text-primary">List of Lectures</h6>
</div>
<div class="card-body">
<table class="table table-bordered">
<thead>
<tr>
<th>Video Name</th>
<th>Length</th>
</tr>
</thead>
<tbody>
{% for video in Videos %}
<tr class="video_row" id="{{ video.name }}" data-duration="{{ video.duration }}">
<td>
{{ video.name }}
</td>
<td>{{ video.duration }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>
{% endblock %}
<!-- End of container-fluid -->
</div>
<!-- End of Main Content -->
</div>
<!-- End of Content Wrapper -->
</div>
{% block 'modal' %}
<div class="modal fade" id="gif-body" role="dialog" aria-labelledby="gif-body">
<div class="modal-dialog modal-lg" style="max-width: 1600px; max-height: 800px">
<div class="modal-content">
<div class="modal-header">
<h2 class="modal-title">Processing....</h2>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" width="200" height="500"
alt="This is a GIF">
<h5>This might take few seconds...</h5>
</div>
</div>
</div>
</div>
{% endblock %}
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SB Admin 2 - Forgot Password</title>
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block bg-password-image"></div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-2">Forgot Your Password?</h1>
<p class="mb-4">We get it, stuff happens. Just enter your email address below and we'll send you a link to reset your password!</p>
</div>
<form class="user">
<div class="form-group">
<input type="email" class="form-control form-control-user" id="exampleInputEmail" aria-describedby="emailHelp" placeholder="Enter Email Address...">
</div>
<a href="login.html" class="btn btn-primary btn-user btn-block">
Reset Password
</a>
</form>
<hr>
<div class="text-center">
<a class="small" href="register.html">Create an Account!</a>
</div>
<div class="text-center">
<a class="small" href="login.html">Already have an account? Login!</a>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
......@@ -29,6 +29,8 @@
var global_lecture_video_id = '';
var global_video_name = '';
var global_lecturer_subject_index = 0;
var global_lecturer_video_name = '';
var lecturer_fps = 0;
//jquery
$(document).ready(function () {
......@@ -237,11 +239,21 @@
//define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
$('#integrate_modal').modal();
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
//fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name)
......@@ -318,25 +330,89 @@
//append the html
$('#student_video_column').append(htmlString);
//start retrieving lecturer activity frame recognition
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err))
}
//this function will load the activity recognition for frames
function displayLecturerEmotionRecognitionForFrame(response) {
//hide the loader
$('#lecturer_video_progress_loader').attr('hidden', true);
//show the progress bars
$('#lecturer_video_progress').attr('hidden', false);
//creating the html string
let htmlString = "";
let duration = 1000 / response.fps;
lecturer_fps = Math.round(duration, 0);
console.log('lecturer fps: ', lecturer_fps);
//creating the html string, iteratively
response.frame_recognitions.map((frame) => {
let frame_name = frame.frame_name;
let sitting_perct = Math.round(frame.sitting_perct, 0);
let standing_perct = Math.round(frame.standing_perct, 0);
{#let listen_perct = Math.round(frame.listening_perct, 0);#}
let walking_perct = Math.round(frame.walking_perct, 0);
//append to the html string
//sitting
htmlString += "<div class='progress_area' id='progress_lecturer_" + frame_name + "' hidden>";
htmlString += "<h4 class='small font-weight-bold'>Sitting</h4>";
htmlString += "<span class='float-right' id='sitting_instant_" + frame_name + "'>" + sitting_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-warning' role='progressbar' id='sitting_instant_value_" + frame_name + "' style='width: " + sitting_perct + "%' aria-valuenow='40' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//standing
htmlString += "<h4 class='small font-weight-bold'>Standing</h4>";
htmlString += "<span class='float-right' id='standing_instant_" + frame_name + "'>" + standing_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar' role='progressbar' id='standing_instant_value_" + frame_name + "' style='width: " + standing_perct + "%' aria-valuenow='0' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//walking
htmlString += "<h4 class='small font-weight-bold'>Walking</h4>";
htmlString += "<span class='float-right' id='walking_instant_" + frame_name + "'>" + walking_perct + "%</span>";
htmlString += "<div class='progress mb-4'>";
htmlString += "<div class='progress-bar bg-info' role='progressbar' id='walking_instant_value_" + frame_name + "' style='width: " + walking_perct + "%' aria-valuenow='80' aria-valuemin='0' aria-valuemax='100'></div>";
htmlString += "</div>";
//ending the progress area
htmlString += "</div>";
});
//append the html
$('#lecturer_video_column').append(htmlString);
}
//to handle the 'integrate' play button
$('#play_integrate_button').click(function () {
let video = $('video')[0];
let video1 = $('video')[1];
let test_video = document.getElementsByTagName('video')[0];
let play_class = 'btn btn-outline-danger play';
let pause_class = 'btn btn-outline-danger pause';
let count = 0;
let count_lecturer = 0;
let classes = $(this).attr('class');
let video_interval = setInterval(() => {
{#let talking_number = Math.round(Math.random() * 100, 0);#}
{#let phone_number = Math.round(Math.random() * 100, 0);#}
{#let note_number = Math.round(Math.random() * 100, 0);#}
{#let listening_number = Math.round(Math.random() * 100, 0);#}
//=====STUDENTS COLUMN=====
//get the relevant progress area
let progress_area = "progress_frame-" + count;
......@@ -354,33 +430,49 @@
//increment the count
count++;
//setting the values
{#$('#looking_up_right_instant_perct').text(talking_number + '%');#}
{#$('#looking_up_left_instant_perct').text(phone_number + '%');#}
{#$('#looking_down_right_instant_perct').text(note_number + '%');#}
{#$('#looking_down_left_instant_perct').text(listening_number + '%');#}
{#$('#looking_front_instant_perct').text(listening_number + '%');#}
{##}
{#//setting the width#}
{#$('#talking_instant_value').width(talking_number + '%');#}
{#$('#phone_checking_instant_value').width(phone_number + '%');#}
{#$('#note_taking_instant_value').width(note_number + '%');#}
{#$('#listening_instant_value').width(listening_number + '%');#}
}, 33);
}, 33);
let video_interval_lecturer = setInterval(() => {
//=====LECTURER COLUMN=====
//get the relevant progress area
let progress_area_lecturer = "progress_lecturer_frame-" + count_lecturer;
let progress_area_id_lecturer = "#" + progress_area_lecturer;
//find the corresponding progress area
let progress_area_html_lecturer = document.getElementById(progress_area_lecturer);
//display the retrieved progress area
$(progress_area_id_lecturer).attr('hidden', false);
//replace the current progress area with the selected one
$('#lecturer_video_progress').html(progress_area_html_lecturer);
//increment the count
count_lecturer++;
console.log('current frame (lecturer): ', count_lecturer);
}, lecturer_fps);
//check for the current class
if (classes === play_class) {
$(this).text('Pause');
$(this).attr('class', pause_class);
video.play();
video1.play();
} else if (classes === pause_class) {
$(this).text('Play');
$(this).attr('class', play_class);
video.pause();
video1.pause();
}
//function to do when the video is paused
......@@ -390,6 +482,12 @@
video.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval);
};
//function to do when the video is ended
video1.onended = function (e) {
//stop changing the activity values
clearInterval(video_interval_lecturer);
}
});
......@@ -903,30 +1001,76 @@
</div>
<!--end of 1st column -->
<!--2nd column -->
<div class="col-md-6">
<!-- 2nd column -->
<div class="col-md-6" id="lecturer_video_column">
<div class="text-center">
<span class="h3 font-italic font-weight-bold">Lecturer Performance</span>
</div>
<!--display lecture video -->
<div class="text-center m-3" id="lecturer_video_section">
<!--temporary text -->
<div class="text-center" id="temp_lecturer_text">
<span class="font-italic">No video was found</span>
<!--display lecturer video -->
<div class="text-center m-3" id="lecturer_video_section">
<video width="500" height="300" id="lecturer_video" controls>
<source src="#"
type="video/mp4">
Your browser does not support the video tag.
</video>
</div>
<!--end of lecturer video section -->
<!-- ajax loader section -->
<div class="text-center mt-3" id="lecturer_video_progress_loader">
<img src="{% static 'FirstApp/images/ajax-loader-1.gif' %}" alt="loader">
</div>
{# <video width="500" height="300" id="lecturer_video" controls>#}
{# <source src="#"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
<!--progress bar section -->
<div class="progress_area" id="lecturer_video_progress" hidden>
<!--sitting -->
<h4 class="small font-weight-bold">Sitting</h4>
<span class="float-right" id="sitting_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar"
id="sitting_instant_value"
{# style="width: 0%"#}
aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--standing -->
<h4 class="small font-weight-bold">Standing</h4>
<span class="float-right" id="standing_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar"
id="standing_instant_value"
{# style="width: 0%"#}
aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<!--walking-->
<h4 class="small font-weight-bold">Walking</h4>
<span class="float-right" id="walking_instant">0%</span>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar"
id="walking_instant_value"
{# style="width: 80%"#}
aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
<!--end of progress bar section -->
</div>
<!--end of lecture video section -->
</div>
<!--end of 2nd column -->
</div>
<!--end of 1st row -->
......
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<p>This is my username{{ username }}</p>
</body>
</html>
\ No newline at end of file
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
{% block javascript %}
{% load static %}
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Page level plugins -->
<script src="{% static 'FirstApp/vendor/datatables/jquery.dataTables.min.js' %}"></script>
<script src="{% static 'FirstApp/vendor/datatables/dataTables.bootstrap4.min.js' %}"></script>
<!-- Page level custom scripts -->
<script src="{% static 'FirstApp/js/demo/datatables-demo.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Load TensorFlow.js -->
<script src="https://unpkg.com/@tensorflow/tfjs"></script>
<!-- Load Posenet -->
<script src="https://unpkg.com/@tensorflow-models/posenet">
</script>
<script type="text/javascript">
var global_subject = '';
var global_lecturer = '';
var global_lecture_video_id = '';
var global_video_name = '';
var global_lecturer_subject_index = 0;
//jquery
$(document).ready(function () {
//select a particular subject
$('input[type=radio]').click(function () {
let subject_id = $(this).attr('id');
global_subject = subject_id;
let lecturer = $(this).attr('data-lecturer');
global_lecturer = lecturer;
let subject_name = $(this).attr('data-name');
$('#timetable').attr('hidden', true);
$('#no_timetable_content').attr('hidden', true);
$('.student-detection-rows').remove();
$('#timetable_body').children().map(function () {
$(this).remove();
});
$('#no_subject_selected').attr('hidden', true);
$('#timetable_caption').text('subject: ' + subject_name);
$('#loader').attr('hidden', false);
//fetching the timetable from the db
fetch('http://127.0.0.1:8000/timetables')
.then((res) => res.json())
.then((out) => createTimeTable(out, subject_id, lecturer))
.catch((error) => alert('this is the error: ' + error))
});
$(document).on('click', '.btn-info', function (e) {
let clicked_class = e.target.className;
let object = e;
let real_class = clicked_class.split(' ')[1];
real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML;
fetch('http://127.0.0.1:8000/get-lecture-video-for-pose/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json())
.then((out) => displayLectureVideoDetails(out, object))
.catch((error) => alert('an error occurred: ' + error));
});
function createTimeTable(timetable, subject, lecturer) {
$('#loader').attr('hidden', true);
$('#timetable').attr('hidden', false);
let isTimetableSubject = false;
timetable.map((item, i) => {
item.timetable.map((table, index) => {
let lecturer_subject_index_arr = [];
//to get the number of subjects taught by the lecturer in a day
table.time_slots.forEach((slot1, ind) => {
let isLecturer = slot1.lecturer.id === Number(lecturer);
if (isLecturer) {
lecturer_subject_index_arr.push(ind);
}
});
//iterating each slot (for a given day)
table.time_slots.forEach((slot, in1) => {
let isLecturer = slot.lecturer.id === Number(lecturer);
let isLecSubject = slot.subject.subject_code === subject;
if (isLecturer && isLecSubject) {
let html = '';
global_lecturer_subject_index = lecturer_subject_index_arr.findIndex((inner) => inner === in1);
isTimetableSubject = true;
html += "<tr class='lecture-details'><td class='slot_date'>" + table.date + "</td>"
+ "<td>" + slot.location + "</td>"
+ "<td>" + slot.start_time + "</td>"
+ "<td>" + slot.end_time + "</td>"
+ "<td><button type='button' class='btn btn-info'>Video</button></td>"
+ "<td></td>"
+ "</tr>";
$('#timetable_body').append(html);
}
});
});
if (!isTimetableSubject) {
$('#timetable').attr('hidden', true);
$('#no_timetable_content').attr('hidden', false);
}
});
}
//function to display lecture video details
function displayLectureVideoDetails(lectureVideo, e) {
//get the lecture video response
let video = lectureVideo.response;
$('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length);
$('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id;
global_video_name = video.video_name;
//display the modal
$('#video_modal').modal();
//showing the video frames
//removing the previous frames (if there is any)
$('#main_frames').remove();
//displaying the loader (for the frames)
$('#frame_loader').attr('hidden', false);
//hiding the temporary text
$('#temporary_text').attr('hidden', true);
fetch('http://127.0.0.1:8000/get-lecture-video-extracted-frames/?lecture_video_id=' + global_lecture_video_id + '&lecture_video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => {
let frames = createFrames(out);
return frames
})
.then((obj) => {
$('#video_frames').prepend(obj);
$('#frame_loader').attr('hidden', true);
$('#slidecontainer').attr('hidden', false);
})
.catch((error) => alert('this is the error: ' + error));
}
//to handle the 'btn-success' (process) button
$(document).on('click', '.btn-success', function (e) {
//sending the POST request to process the lecture activities
fetch('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + global_video_name + '&lecture_video_id=' + global_lecture_video_id)
.then((res) => res.json())
.then((out) => handleResponse(out.response, e))
.catch((error) => alert('error: ' + error));
});
//this is to change the button from 'process' to 'results'
function handleResponse(response, e) {
//change the button, if the response is positive
if (response) {
e.target.parentNode.parentNode.lastChild.innerHTML = '<button type="button" class="btn btn-primary">Results</button>';
}
}
//this section is responsible for displaying the frames as video
//creating the frame content
function createFrames(res) {
let main_frame_content = "<div class='row' id='main_frames'>";
main_frame_content += "<ul class='list-group list-group-horizontal'>";
let count = 0;
//loop through the frames
res.extracted.map((image) => {
let img_src = "";
let len = image.detections.length;
if (count === 0) {
main_frame_content += "<li class='list-group-item text-center' id='image_0'>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + res.extracted[0].frame + "/" + res.extracted[0].detections[0] + "' width='400' height='400'>";
} else {
main_frame_content += "<li class='list-group-item other-frames' id='image_" + count + "' hidden>";
img_src = "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + image.frame + "/" + image.detections[len - 1] + "' class='img-link' width='400' height='400'>";
}
main_frame_content += img_src;
main_frame_content += "</li>";
count++;
});
main_frame_content += "</ul>";
main_frame_content += "</div>";
//setting the min, max values of the slider
$('#myActivityRange').attr({'min': 0, 'max': count});
//dsiplay the 'process' button
$('#process_btn').attr('hidden', false);
return main_frame_content;
}
//declaring the variable for setInterval function
let timeVar = null;
//handling the play button
$('#play_pause_icon_activity').click(function () {
//defining the two possible classes
let play_class = "fas fa-play";
let pause_class = "fas fa-pause";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
//when the button is playing
if (current_class === play_class) {
timeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value);
//displaying the relevant image
$('#image_0').html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(timeVar);
}
});
//handling the slider
let slider = document.getElementById("myActivityRange");
let output = document.getElementById("demo");
output.innerHTML = slider.value;
slider.oninput = function () {
output.innerHTML = this.value;
let selectedImage = '#image_' + Number(this.value);
//hide
{#$('#image_0').attr('hidden', true);#}
$('#image_0').html($(selectedImage).html());
//setting the selected image
{#$(selectedImage).attr('hidden', false);#}
};
$(document).on('click', '.img-link', function (e) {
//removing previously displayed detections
$('.detections').remove();
//removing the no-content message
$('#no_detection_message_content').hide();
//appearing the loader
$('#detection_loader').attr('hidden', false);
let img_src_arr = e.target.src.split('/');
let len = img_src_arr.length;
let src = img_src_arr[len - 1];
let frame_name_arr = src.split('.');
let frame_name = frame_name_arr[0];
//fetching the detection for the selected frame
fetch('http://127.0.0.1:8000/get-lecture-activity-frame-detection/?video_name=' + global_video_name + "&frame_name=" + frame_name)
.then((res) => res.json())
.then((out) => displayDetections(out.detections, frame_name))
.catch((error) => alert('this is an error'));
});
//the function to display detections
function displayDetections(detections, frame_name) {
let img_string = '';
let no_of_detections = detections.length;
//disabling the loader
$('#detection_loader').attr('hidden', true);
//appearing the no of detections number area
$('#detection_number_area').attr('hidden', false);
$('#no_of_detections').text(no_of_detections);
detections.map((detection) => {
img_string += "<img src='{% static '' %}FirstApp/activity/" + global_video_name + "/" + frame_name + "/" + detection + "' class='detections m-2' width='100' height='100' >"
});
$('#detection_frames').prepend(img_string);
}
//listening for click events in labels
$('.labels').click(function () {
let label = Number($(this).attr('data-number'));
//removing the previous student detection lists
$('.student_detection_lists').remove();
//appearing the loader
$('#detection_student_loader').attr('hidden', false);
//disappearing the no content message
$('#no_detection_student_content').attr('hidden', true);
//fetching from the api
fetch('http://127.0.0.1:8000/get-lecture-activity-detection-for-label/?video_name=' + global_video_name + '&label=' + label)
.then((res) => res.json())
.then((out) => createDetectedStudentFrames(out))
.catch((error) => alert('this is the error: ' + error))
});
//creating the detected students frames
function createDetectedStudentFrames(detections) {
let htmlString = "";
//iterating through the student
detections.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-detection-rows'>";
let student_count = 0;
//iterating through the frames
detections.response.map((frame) => {
let frame_detections = frame.detections;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='image_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' width='200' height='200'>";
images += "</li>";
//increment the student count
student_count++;
}
});
htmlString += "<h6 class='font-italic'>" + title + "</h6>";
htmlString += "<ul class='list-group list-group-horizontal student_detection_lists' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-frames' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
});
//disappearing the loader
$('#detection_student_loader').attr('hidden', true);
//append to the relevant html card content
$('#detection_students').append(htmlString);
}
let studentTimeVar = null;
//playing the frames for each student detection
$(document).on('click', '.play-pause-icon-student-frames', function (e) {
//defining the two possible classes
let play_class = "fas fa-play play-pause-icon-student-frames";
let pause_class = "fas fa-pause play-pause-icon-student-frames";
//retrieving the current icon class
let current_class = $(this).attr('class');
//assigning the correct class based on the icon clicked
let new_class = (current_class === play_class) ? pause_class : play_class;
//setting the new class
$(this).attr('class', new_class);
//extracting the title pf the clicked icon
let title_part = $(this).attr('id');
let title = title_part.split("_")[1];
//handling the slider
let slider = document.getElementById("slider_" + title);
let output = document.getElementById("demo_" + title);
//when the button is playing
if (current_class === play_class) {
studentTimeVar = setInterval(() => {
let value = slider.value;
let new_slider_value = Number(value) + 1;
slider.value = new_slider_value;
output.innerHTML = new_slider_value.toString();
let selectedImage = '#image_' + Number(value) + '_' + title;
//displaying the relevant image
$('#image_0_' + title).html($(selectedImage).html());
}, 100);
}
//when the button is paused
else if (current_class === pause_class) {
clearInterval(studentTimeVar);
}
});
//processing the video frames for pose estimation
$('#process_btn').click(function () {
//hide the message
$('#no_individual_student_frames').attr('hidden', true);
//show the loader
$('#individual_student_frames_loader').attr('hidden', false);
//using the fetch api
fetch('http://127.0.0.1:8000/get-lecture-video-individual-student-frames/?video_name=' + global_video_name)
.then((res) => res.json())
.then((out) => displayStudentIndividualFrames(out))
.catch((error) => alert('this is the error: ' + error))
});
//this function will display each student individual frames
function displayStudentIndividualFrames(response) {
let htmlString = "";
//iterating through the student
response.people.map((student) => {
let title = student.split('.')[0];
let images = "";
htmlString += "<div class='row p-3 student-individual-rows'>";
let student_count = 0;
//iterating through the frames
response.response.map((frame) => {
let frame_detections = frame.detections;
let frame_detection_length = frame_detections.length;
if (frame_detections.includes(student)) {
if (student_count === 0) {
images += "<li class='list-group-item frame-0' id='image_0_individual_" + title + "'>";
} else {
images += "<li class='list-group-item other-student-frames' id='list_item_individual_" + student_count + "_" + title + "' hidden>";
}
images += "<img src='{% static '' %}FirstApp/Activity/" + global_video_name + "/" + frame.frame + "/" + student + "' id='image_individual_" + student_count + "_" + title + "' width='200' height='200'>";
images += "</li>";
if (student_count === (frame_detection_length - 1)) {
images += "<li class='list-group-item'>";
images += "<button type='button' class='btn btn-dark individual-student-frame' id='individual_student_" + title + "'>calculate</button>";
images += "</li>";
}
//increment the student count
student_count++;
}
});
htmlString += "<ul class='list-group'>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row m-3'>";
htmlString += "<h4 class='font-weight-bold'>Student ID: <span>" + title + "</span></h4>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='row'>";
htmlString += "<ul class='list-group list-group-horizontal student_individual_lists' id='student_individual_" + title + "' style='overflow-x: scroll'>";
htmlString += images;
htmlString += "</ul>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "<li class='list-group-item'>";
htmlString += "<div class='slidecontainer'>";
htmlString += "<div class='row m-3'></div>";
htmlString += "<div class='row'>";
htmlString += "<span><i class='fas fa-play play-pause-icon-student-evaluations' id='icon_" + title + "'></i></span>";
htmlString += "</div>";
htmlString += "<input type='range' min='1' max='100' value='0' class='slider' id='slider_evaluation" + title + "'>";
htmlString += "<p>No of frames: <span id='demo_evaluation" + title + "'></span></p>";
htmlString += "</div>";
htmlString += "</div>";
htmlString += "</li>";
htmlString += "</ul>";
});
//disappearing the loader
$('#individual_student_frames_loader').attr('hidden', true);
//append to the relevant html card content
$('#individual_student_frames').append(htmlString);
}
//to listen to click event from individual students
$(document).on('click', '.individual-student-frame', function (e) {
let student_id_parts = $(this).attr('id');
let student_id = student_id_parts.split('_')[2];
let frame_list_id = '#student_individual_' + student_id;
//traverse through the children of the selected list
let image_list = $(frame_list_id).children().map((i, child) => {
//traversing the <li>
return $(child).find('img').attr('id');
});
student_id += ".png";
e.target.innerHTML = "<span class='font-italic'>Processing</span>";
//running the posenet model
loadModel(image_list, student_id, e);
});
//POSENET model
//this function will load the posenet model
async function loadModel(imageList, student, e) {
const imageScaleFactor = 1.0;
const flipHorizontal = false;
const outputStride = 16;
// get up to 5 poses
const maxPoseDetections = 20;
// minimum confidence of the root part of a pose
const scoreThreshold = 0.2;
// minimum distance in pixels between the root parts of poses
const nmsRadius = 20;
//load the model
const model = await posenet.load();
//this array will keep all the detected poses
let keypoints_arr = [];
//loop through the selected image IDs
for (let i = 0; i < imageList.length; i++) {
let image_id = imageList[i];
let imageElement = document.getElementById(image_id);
//estimate the poses
const poses = await model.estimateSinglePose(imageElement, {
imageScaleFactor: 1.0,
flipHorizontal: false,
outputStride: 8,
maxDetections: 10,
scoreThreshold: 0.5,
nmsRadius: 20
});
//push to the created keypoints array
keypoints_arr.push(poses)
}
//using the fetch API to POST the data
await fetch('http://127.0.0.1:8000/process-lecture-video-individual-pose-estimation', {
method: 'POST',
headers: {
"Content-type": "application/json"
},
body: JSON.stringify({
"video_name": global_video_name,
"student": student,
"poses": keypoints_arr
})
})
.then((res) => res.json())
.then((out) => {
e.target.innerHTML = "<span>Completed</span>";
})
.catch((err) => alert('this is the error: ' + err));
/*
const imageElement = document.getElementById(id);
let isImageDisplayed = true;
if (isImageDisplayed) {
$('#message').hide();
$('#croppedMessage').hide();
}
let splitted = imageElement.src.split('/');
//assigning the image name
imageName = splitted[7].split('.')[0];
//show the loading screen
$('#loading').attr('hidden', false);
$('#cropped_loading').attr('hidden', false);
const model = await posenet.load();
const poses = await model.estimateMultiplePoses(imageElement, {
imageScaleFactor: 1.0,
flipHorizontal: false,
outputStride: 8,
maxDetections: 10,
scoreThreshold: 0.5,
nmsRadius: 20
});
let keypoints_arr = [];
//hide the loading screen
$('#loading').hide();
$('#cropped_loading').hide();
//removing the previous canvas
$('#showFace').remove();
//removing the previous canvas row
$('#canvas-frame').remove();
//creating the same canvas
let newCanvasContent = "<canvas id='showFace' width='500' height='300'></canvas>";
$(newCanvasContent).prependTo('#canvas-body');
let canvas = document.getElementById('showFace');
let context = canvas.getContext('2d');
context.drawImage(imageElement, 0, 0, 500, 300);
let htmlContent = "<div id='canvas-frame'><div class='row'>";
{#drawing rectangle for each person face#}
for (let count = 0; count < poses.length; count++) {
let person = poses[count];
let new_canvas_id = 'canvas-' + (count + 1);
let new_canvas = document.getElementById(new_canvas_id);
{#let new_context = new_canvas.getContext('2d');#}
let x1 = Math.round(Number(person.keypoints[5].position.x));
let y1 = Math.round(Number(person.keypoints[5].position.y));
let x2 = Math.round(Number(person.keypoints[6].position.x));
let y2 = Math.round(Number(person.keypoints[6].position.y));
let x_diff = x1 - x2;
let y_diff = y1 - y2;
let x_pow = Math.pow(x_diff, 2);
let y_pow = Math.pow(y_diff, 2);
let summation = x_pow + y_pow;
let distance = Math.sqrt(summation);
let fraction = Math.round(distance * 0.6);
let middle_x = x2 + fraction;
let middle_y = y2 - 20;
let head_x = middle_x;
let head_y = middle_y - fraction;
let left_upper_x = middle_x - fraction;
context.rect(left_upper_x, head_y, distance, fraction);
{#context.fillStyle = 'red';#}
context.stroke();
let canvasHtml = "<canvas id='" + new_canvas_id + "' height='200' width='200'></canvas>";
htmlContent += "<div class='flex-column' style='width: 30%'>" +
canvasHtml
+ "</div>";
if (((count + 1)) % 3 === 0) {
htmlContent += "</div><div class='row'>";
}
//appending the keypoints to the array
keypoints_arr[keypoints_arr.length] = {
'left_x': left_upper_x,
'left_y': head_y,
'width': distance,
'height': fraction
};
//appending the keypoints to the global array
points[points.length] = {
'left_x': left_upper_x,
'left_y': head_y,
'width': distance,
'height': fraction
};
}
htmlContent += "</div>";
htmlContent += "</div>";
//append the html content
$('#test-canvas').prepend(htmlContent);
{#getting the natural dimensions of the image#}
let originalWidth = imageElement.naturalWidth;
let originalHeight = imageElement.naturalHeight;
//drawing extracted faces on the canvas
for (let i = 0; i < poses.length; i++) {
let canvas_id = 'canvas-' + (i + 1);
let canvasDoc = document.getElementById(canvas_id);
let canvasCont = canvasDoc.getContext('2d');
let left_x = keypoints_arr[i].left_x;
let left_y = keypoints_arr[i].left_y;
let width = keypoints_arr[i].width;
let height = keypoints_arr[i].height;
let x_scale = originalWidth / imageElement.offsetWidth;
let y_scale = originalHeight / imageElement.offsetHeight;
let x_corner = left_x * x_scale;
let y_corner = left_y * y_scale;
let modified_width = width * x_scale;
let modified_height = height * y_scale;
{#canvasCont.drawImage(imageElement, left_x * 6, left_y * 6, width * 10, height * 10, 0, 0, 100, 150);#}
{#canvasCont.drawImage(imageElement, 0, 800, 800, 500, 0, 0, 100, 150);#}
{#canvasCont.drawImage(imageElement, 230.2, 700, 400, 400, 0, 0, 150, 150);#}
{#canvasCont.drawImage(imageElement, left_x, left_y, width * 10, height * 10, 0, 0, 100, 150);#}
{#canvasCont.drawImage(imageElement, left_x, left_y, width, height, 0, 0, 100, 150);#}
{#canvasCont.drawImage(imageElement, left_x * 10, left_y * 10, width * 10, height * 10, 0, 0, 100, 150);#}
canvasCont.drawImage(imageElement, x_corner, y_corner, modified_width, modified_height, 0, 0, 199, 199);
}
//displaying the download button
{#$('#downloadBtn').attr('hidden', false);#}
//then save the images
//saveImages();
*/
}
});
</script>
{% endblock %}
<div id="wrapper">
<!-- Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">Student Pose Estimation</h1>
</div>
<!--first row -->
<div class="row p-2">
<!--first column -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Lecturer Subjects</h5>
</div>
<!--card body -->
<div class="card-body">
{% if lecturer_subjects.count == 0 %}
<div class="text-center">
<span class="font-italic">No subjects</span>
</div>
{% else %}
<div class="table-responsive">
<table class="table table-bordered" id="datatable">
<thead>
<tr>
<th></th>
<th>Subject Name</th>
<th>Code</th>
<th>Year</th>
</tr>
</thead>
<tbody>
{% for subject in subjects %}
<tr class="subjects not_clicked" id="{{ subject.0.subject_code }}">
<td>
<div class="radio">
<label><input type="radio"
id="{{ subject.0.subject_code }}"
name="subject_radio"
data-name="{{ subject.0.name }}"
data-lecturer="{{ lecturer }}"></label>
</div>
</td>
<td>{{ subject.0.name }}</td>
<td>{{ subject.0.subject_code }}</td>
<td>{{ subject.0.year }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endif %}
</div>
</div>
</div>
<!--end of first column -->
<!--second column (timetable column) -->
<div class="col-lg-6" style="overflow-x: scroll">
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">View timetable</h5>
</div>
<!--card body -->
<div class="card-body">
<!--loading gif -->
<div class="text-center" id="no_subject_selected">
<span class="font-italic">No lecture is selected</span>
</div>
<!--no lecture selected message -->
<div class="text-center" id="loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" alt="Loader">
</div>
<!--no lecture selected message -->
<div class="text-center" id="no_timetable_content" hidden>
<span class="font-italic">Not included in the timetable</span>
</div>
<!--displaying the timetable -->
<table class="table table-striped" id="timetable" hidden>
{# <caption id="timetable_caption"></caption>#}
<thead>
<tr>
<th>Date</th>
<th>Hall No.</th>
<th>start time</th>
<th>end time</th>
</tr>
</thead>
<tbody id="timetable_body">
</tbody>
</table>
</div>
</div>
</div>
<!--end of second column -->
</div>
<!--end of 1st row -->
<!--2nd row -->
<div class="row p-2">
<!--1st column -->
<div class="col-lg-6">
<!--card content -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Pose Estimation</h5>
</div>
<!--card body -->
<div class="card-body">
<!--nav tabs-->
<ul class="nav nav-tabs nav-fill" id="nav_bar" role="tablist">
<li class="nav-item">
<a class="nav-link active" id="frame-tab" data-toggle="tab" href="#frame"
role="tab" aria-controls="frame" aria-selected="true">Frame</a>
</li>
<li class="nav-item">
<a class="nav-link" id="graph-tab" data-toggle="tab" href="#graph"
role="tab" aria-controls="graph" aria-selected="false">Graphs</a>
</li>
</ul>
<!--tab content -->
<div class="tab-content" id="tabContentDetails">
<!--frame tab -->
<div class="tab-pane fade show active" id="frame" role="tabpanel"
aria-labelledby="home-tab">
<!--this area will display the frame -->
<div class="text-center" id="frame_area" style="height: 600px">
<!--temporary text -->
<span class="font-italic" id="temporary_text">Frame will be displayed here</span>
<!--loading buffer area-->
<div class="text-center" id="frame_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
<!--frames -->.
<div class="text-center p-4" id="video_frames">
<!-- slide container -->
<div id="slidecontainer" hidden>
<div class="row m-3"></div>
<!-- play/pause icon -->
<div class="row">
<span><i class="fas fa-play"
id="play_pause_icon_activity"></i></span>
</div>
<input type="range" min="1" max="100" value="0" class="slider"
id="myActivityRange">
<p>No of frames: <span id="demo"></span></p>
</div>
</div>
<!--end of video frames -->
<button type="button" class="btn btn-danger float-right"
id="process_btn" hidden>Process
</button>
</div>
</div>
<!--end of tab content (frames) -->
<!--graph tab -->
<div class="tab-pane fade" id="graph" role="tabpanel"
aria-labelledby="profile-tab">
<!--card content -->
<div class="card shadow mb-4 p-3">
<!-- Card Header - Dropdown -->
<div class="card-header py-3 d-flex flex-row align-items-center justify-content-between">
<h6 class="m-0 font-weight-bold text-primary">Student
Activities</h6>
<div class="dropdown no-arrow">
<a class="dropdown-toggle" href="#" role="button"
id="dropdownMenuLink" data-toggle="dropdown"
aria-haspopup="true" aria-expanded="false">
<i class="fas fa-ellipsis-v fa-sm fa-fw text-gray-400"></i>
</a>
<div class="dropdown-menu dropdown-menu-right shadow animated--fade-in"
aria-labelledby="dropdownMenuLink">
<div class="dropdown-header">Dropdown Header:</div>
<a class="dropdown-item" href="#">Action</a>
<a class="dropdown-item" href="#">Another action</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="#">Something else here</a>
</div>
</div>
</div>
<!-- Card Body -->
<div class="card-body">
<div class="chart-pie pt-4 pb-2">
<canvas id="myPieChart"></canvas>
</div>
<div class="mt-4 text-center small">
<span class="mr-2">
<i class="fas fa-circle text-primary"></i> Direct
</span>
<span class="mr-2">
<i class="fas fa-circle text-success"></i> Social
</span>
<span class="mr-2">
<i class="fas fa-circle text-info"></i> Referral
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!--2nd column -->
<div class="col-lg-6">
<!--card content -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Frame Detections</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_frames">
<!--no content message-->
<div class="text-center p-2" id="no_detection_message_content">
<span class="font-italic">No frame is selected</span>
</div>
<div class="text-left m-3" id="detection_number_area" hidden>
<p>No of detections: <span id="no_of_detections"></span></p>
</div>
<!--the detection loader -->
<div class="text-center p-2" id="detection_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
<!--detection person card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header py-3">
<h5 class="m-0 font-weight-bold text-primary">Detected Students</h5>
</div>
<!--card body -->
<div class="text-center p-4" id="detection_students">
<!--no content message-->
<div class="text-center p-2" id="no_detection_student_content">
<span class="font-italic">No activity type is selected</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="detection_student_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
</div>
</div>
<!-- end of 2nd row -->
<!--3rd row -->
<div class="row p-2">
<!--this column will display individual student frames -->
<!--1st column -->
<div class="col-lg-6">
<!--card -->
<div class="card shadow mb-4">
<!--card header -->
<div class="card-header">
<h5 class="m-0 font-weight-bold text-primary">Individual Student Frames</h5>
</div>
<!--card body -->
<div class="card-body" id="individual_student_frames">
<!--no content message-->
<div class="text-center p-2" id="no_individual_student_frames">
<span class="font-italic">Press 'Process' button to display Individual Student Frames</span>
</div>
<!--the detection student loader -->
<div class="text-center p-2" id="individual_student_frames_loader" hidden>
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}"
alt="Loader">
</div>
</div>
</div>
</div>
</div>
<!--end of 3rd row -->
</div>
{% endblock %}
<!-- End of container-fluid -->
</div>
<!-- End of Main Content -->
</div>
<!-- End of Content Wrapper -->
</div>
{% block 'modal' %}
<div class="modal fade" id="video_modal" role="dialog" aria-labelledby="gif-body">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h2 class="modal-title">Video details</h2>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<table class="table table-borderless">
<tr>
<td class="font-weight-bold">Video Name</td>
<td id="video_name"></td>
</tr>
<tr>
<td class="font-weight-bold">Duration</td>
<td id="video_duration"></td>
</tr>
<tr>
<td class="font-weight-bold">Date of Creation</td>
<td id="video_date"></td>
</tr>
</table>
</div>
<!-- modal footer -->
<div class="modal-footer">
<button type="button" data-dismiss="modal" class="btn btn-danger text-white">Close</button>
</div>
</div>
</div>
</div>
<!-- Logout Modal-->
<div class="modal fade" id="logoutModal" tabindex="-1" role="dialog" aria-labelledby="exampleModalLabel"
aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<h5 class="modal-title" id="exampleModalLabel">Ready to Leave?</h5>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">Select "Logout" below if you are ready to end your current session.</div>
<div class="modal-footer">
<button class="btn btn-secondary" type="button" data-dismiss="modal">Cancel</button>
<a class="btn btn-primary" href="/logout">Logout</a>
</div>
</div>
</div>
</div>
{% endblock %}
</body>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SB Admin 2 - Register</title>
{% load static %}
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<script type="text/javascript">
function insert() {
fetch('http://127.0.0.1:8000/register/', {
method: 'POST',
headers: {
"Content-type": "application/json"
},
body: JSON.stringify({
"firstName": document.getElementById('exampleFirstName').value,
"lastName": document.getElementById('exampleLastName').value,
"email": document.getElementById('exampleInputEmail').value,
"password": document.getElementById('exampleInputPassword').value
})
})
.then(resposne => resposne.json())
.then(json => {
if (json) {
alert('successfully inserted');
}
}
)
}
</script>
<div class="container">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-5 d-none d-lg-block bg-register-image"></div>
<div class="col-lg-7">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-4">Create an Account!</h1>
</div>
<form action="#" class="user">
<div class="form-group row">
<div class="col-sm-6 mb-3 mb-sm-0">
<input type="text" class="form-control form-control-user" id="exampleFirstName" placeholder="First Name">
</div>
<div class="col-sm-6">
<input type="text" class="form-control form-control-user" id="exampleLastName" placeholder="Last Name">
</div>
</div>
<div class="form-group">
<input type="email" class="form-control form-control-user" id="exampleInputEmail" placeholder="Email Address">
</div>
<div class="form-group row">
<div class="col-sm-6 mb-3 mb-sm-0">
<input type="password" class="form-control form-control-user" id="exampleInputPassword" placeholder="Password">
</div>
<div class="col-sm-6">
<input type="password" class="form-control form-control-user" id="exampleRepeatPassword" placeholder="Repeat Password">
</div>
</div>
<button type="submit" class="btn btn-primary btn-user btn-block" onclick="insert()">
Register Account
</button>
<hr>
<a href="index.html" class="btn btn-google btn-user btn-block">
<i class="fab fa-google fa-fw"></i> Register with Google
</a>
<a href="index.html" class="btn btn-facebook btn-user btn-block">
<i class="fab fa-facebook-f fa-fw"></i> Register with Facebook
</a>
</form>
<hr>
<div class="text-center">
<a class="small" href="forgot-password.html">Forgot Password?</a>
</div>
<div class="text-center">
<a class="small" href="login.html">Already have an account? Login!</a>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery/jquery.js' %}"></script>
<script src="{% static 'FirstApp/vendor/bootstrap/js/bootstrap.bundle.min.js' %}"></script>
<!-- Core plugin JavaScript-->
<script src="{% static 'FirstApp/vendor/jquery-easing/jquery.easing.min.js' %}"></script>
<!-- Custom scripts for all pages-->
<script src="{% static 'FirstApp/js/sb-admin-2.min.js' %}"></script>
</body>
</html>
{% extends 'FirstApp/template.html' %}
<!DOCTYPE html>
<html lang="en">
<body id="page-top">
<!-- Page Wrapper -->
<div id="wrapper">
<!-- Sidebar -->
<!-- Content Wrapper -->
<div id="content-wrapper" class="d-flex flex-column">
<!-- Main Content -->
<div id="content">
<!-- Begin Page Content -->
{% block 'container-fluid' %}
<div class="container-fluid">
{% load static %}
<!-- Page Heading -->
<div class="d-sm-flex align-items-center justify-content-between mb-4">
<h1 class="h3 mb-0 text-gray-800">{{video_name}}</h1>
<a href="#" class="d-none d-sm-inline-block btn btn-sm btn-primary shadow-sm"><i class="fas fa-download fa-sm text-white-50"></i> Generate Report</a>
</div>
<!-- &lt;!&ndash; Content Row &ndash;&gt;-->
<div class="row">
<!-- Earnings (Monthly) Card Example -->
<div class="col-xl-12 col-md-6 mb-4">
<div class="card border-left-primary shadow h-100 py-2">
<div class="card-body">
<div class="row">
<div class="col-3">
<video width="500" height="300" controls>
<source src="{% static '' %}FirstApp/videos/{{video_name}}" type="video/mp4">
Your browser does not support the video tag.
</video>
</div>
<div class="col-3"></div>
<!--progress bars-->
<!-- showing the percentage for each emotion-->
<div class="col-6">
<h2 class="big font-weight-bold">Emotion Detection</h2>
<h4 class="small font-weight-bold">Anger
<span class="float-right">{{meta.angry_perct}}%</span></h4>
<div class="progress mb-4">
<div class="progress-bar bg-danger" role="progressbar" style="width: 20%" aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<h4 class="small font-weight-bold">Happy
<span class="float-right">{{meta.happy_perct}}%</span></h4>
<div class="progress mb-4">
<div class="progress-bar bg-warning" role="progressbar" style="width: 40%" aria-valuenow="40" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<h4 class="small font-weight-bold">Sadness
<span class="float-right">{{meta.sad_perct}}%</span></h4>
<div class="progress mb-4">
<div class="progress-bar" role="progressbar" style="width: 60%" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<h4 class="small font-weight-bold">Surprise
<span class="float-right">{{meta.surprise_perct}}%</span></h4>
<div class="progress mb-4">
<div class="progress-bar bg-info" role="progressbar" style="width: 80%" aria-valuenow="80" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<h4 class="small font-weight-bold">Neutral
<span class="float-right">{{meta.neutral_perct}}%</span></h4>
<div class="progress">
<div class="progress-bar bg-success" role="progressbar" style="width: 100%" aria-valuenow="100" aria-valuemin="0" aria-valuemax="100"></div>
</div>
<div class="form-control">
<button type="button" id="test" data-target="#gif-body" data-toggle="modal">Test</button>
</div>
</div>
</div>
<div class="row">
<h2 class="big font-weight-bold">Eye Gaze Estimation</h2>
</div>
<div class="row">
<div class="col-6">
<div class="progress">
<div class="progress-bar bg-success" role="progressbar" style="width: 100%" aria-valuenow="100" aria-valuemin="0" aria-valuemax="100"></div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
{% endblock %}
<!-- End of container-fluid -->
</div>
<!-- End of Main Content -->
</div>
<!-- End of Content Wrapper -->
</div>
{% block 'modal' %}
<div class="modal fade" id="gif-body" role="dialog" aria-labelledby="gif-body">
<div class="modal-dialog modal-lg" style="max-width: 1600px; max-height: 800px">
<div class="modal-content">
<div class="modal-header">
<h2 class="modal-title">Processing....</h2>
<button class="close" type="button" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body text-center">
<img src="{% static 'FirstApp/images/ajax-loader.gif' %}" width="200" height="500" alt="This is a GIF">
<h5>This might take few seconds...</h5>
</div>
</div>
</div>
</div>
{% endblock %}
</body>
</html>
\ No newline at end of file
"""
- this file will handle the urlpatterns for the 'FirstApp' application
- the 'urlpatterns' variable will contain the list of url patterns exist for this application
- inside this 'list' variable, the 'path' variable will accept the url mappings to be redirected
to an HTML page (view)
- the 'url' will accept the url mappings to be redirected to a RESTful endpoint
"""
from django.urls import path, re_path, include
from django.conf.urls import url
from rest_framework import routers
......@@ -5,29 +19,18 @@ from . import views
from . import api
router = routers.DefaultRouter()
router.register(r'^register', views.RegisterViewSet)
# router.register(r'^createImage', views.ImageViewSet)
urlpatterns = [
path('', views.hello),
path('login', views.loginForm),
path('logout', views.logoutView),
path('register-user', views.register),
path('404', views.view404),
path('401', views.view401),
path('500', views.view500),
path('blank', views.blank),
path('gaze', views.gaze),
path('gaze-process', views.processGaze),
path('pose', views.pose),
path('charts', views.charts),
path('forgot-password', views.forget_password),
path('webcam', views.webcam),
path('template', views.template),
path('base', views.base),
path('child', views.child),
# extractor path
path('extract', views.extractor),
# emotion path
path('emotion', views.emotion_view),
# video results
......@@ -49,9 +52,6 @@ urlpatterns = [
# this is used for activity
path('activity', views.activity),
# tables view
path('tables', views.tables),
# test view (delete later)
path('test', views.test),
......@@ -59,21 +59,6 @@ urlpatterns = [
# user direct view
path('user-direct', views.userDirect),
url(r'^register', views.RegisterViewSet),
# re_path('video/?video_name<str:video_name>', views.video),
url(r'^teachers/', views.teachersList.as_view()),
url(r'^video/', views.video, name='video'),
url(r'^createImage', api.ImageViewSet.as_view()),
# for gaze estimation
url(r'^estimateGaze', api.GazeEstimationViewSet.as_view()),
# for video extraction (POST)
url(r'^videoExtract', api.VideoExtractionViewSet.as_view()),
# for video extraction (GET)
url(r'^videoExtract/(?P<folder_name>\D+)', api.VideoExtractionViewSet.as_view()),
# testing the lecture emotions in the API
url(r'^lecture_emotions', api.LectureEmotionViewSet.as_view()),
# testing the lecture in the API
url(r'^lectures', api.LectureViewSet.as_view()),
......@@ -101,9 +86,10 @@ urlpatterns = [
# lecture video API (to retrieve a lecture)
url(r'^get-lecture-video/$', api.GetLectureVideoViewSet.as_view()),
# lecture video API (to retrieve a lecture)
# lecture video API (to retrieve a lecture video in the lecturer home page)
url(r'^get-lecture-video-for-home/$', api.GetLectureVideoViewSetForHome.as_view()),
##### ACTIVITIES API #####
# lecture activity API (to retrieve lecture activities)
......@@ -115,26 +101,14 @@ urlpatterns = [
# lecture activity API (to retrieve a lecture activity)
url(r'^process-lecture-activity/$', api.LectureActivityProcess.as_view()),
# lecture activity detection API (to retrieve detections for a given lecture activity frame)
url(r'^get-lecture-activity-frame-detection/$', api.GetLectureActivityDetections.as_view()),
# lecture activity detection for label API (to retrieve detections for a certain label)
url(r'^get-lecture-activity-detection-for-label/$', api.GetLectureActvityDetectionsForLabel.as_view()),
# lecture activity detection for label API (to retrieve detections for a certain label)
url(r'^get-lecture-activity-student-evaluation/$', api.GetLectureActivityStudentEvaluation.as_view()),
# lecture activity detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-activity-for-frame/$', api.GetLectureActivityRecognitionsForFrames.as_view()),
# lecture activity evaluation for individual students
url(r'^get-lecture-activity-individual-student-evaluation/$',
api.GetLectureActivityIndividualStudentEvaluation.as_view()),
# lecture activity report generation
url(r'^lecture-activity-report-generation/$',
api.GenerateActivityReport.as_view()),
###### EMOTION Section #####
# getting lecture emotion record availability
url(r'^get-lecture-emotion-availability/$', api.GetLectureEmotionAvailability.as_view()),
......@@ -145,30 +119,10 @@ urlpatterns = [
# process a lecture emotion record
url(r'^process-lecture-emotion/$', api.LectureEmotionProcess.as_view()),
# lecture emotion evaluation for students
url(r'^get-lecture-emotion-student-evaluation/$', api.GetLectureEmotionStudentEvaluations.as_view()),
# lecture emotion evaluation for students
url(r'^get-lecture-emotion-individual-student-evaluation/$',
api.GetLectureEmotionIndividualStudentEvaluation.as_view()),
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section #####
# lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
# lecture video extracted frames API (for Pose estimation)
url(r'^get-lecture-video-extracted-frames/$', api.GetLectureVideoExtractedFrames.as_view()),
# lecture video individual student extracted frames API (for Pose estimation)
url(r'^get-lecture-video-individual-student-frames/$', api.GetLectureVideoIndividualStudentFrames.as_view()),
# lecture video individual student process pose estimation API (for Pose estimation)
url(r'^process-lecture-video-individual-pose-estimation', api.ProcessIndividualStudentPoseEstimation.as_view()),
##### GAZE Section #####
# lecture video Gaze estimation
......@@ -203,6 +157,21 @@ urlpatterns = [
# retrieves lecture activity summary
url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()),
# retrieves lecture activity summary
url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()),
# retrieves lecture activity summary
url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()),
# retrieves lecture activity summary
url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()),
##### OTHERS #####
# retrieves lecture recorded video name
url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()),
# routers
# path('', include(router.urls)),
......
"""
this file will contain the function-based views where each function will render its
corresponding view (in other words, the HTML file)
each function will accept one parameter
params:
request - this is the HTTP request sent from the frontend
returns:
render
------
:params
request: the HTTP request received
template_name: path of the HTML template, relative the location of this file
context: the context that needs to be passed into the template
"""
from django.shortcuts import render, redirect
from django.contrib.auth import (
authenticate,
......@@ -5,108 +29,16 @@ from django.contrib.auth import (
logout,
)
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import viewsets
from . models import Teachers, Video, VideoMeta, RegisterUser
from . MongoModels import *
from . serializers import *
from . emotion_detector import detect_emotion
from . ImageOperations import saveImage
from . logic import head_pose_estimation
from . logic import video_extraction
from . forms import *
import cv2
import os
from datetime import datetime
# hashing
from django.contrib.auth.hashers import make_password
# Create your views here.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class teachersList(APIView):
def get(self, request):
teachers = Teachers.objects.all()
serializer = TeachersSerializer(teachers, many=True)
return Response(serializer.data)
def post(self):
pass
class RegisterViewSet(viewsets.ModelViewSet):
queryset = RegisterUser.objects.all().order_by('firstName')
serializer_class = RegisterUserSerializer
# to create images
class ImageViewSet(APIView):
def post(self, request):
saveImage(request.data)
return Response({"response": "successful"})
# to perform pose estimation on images
class GazeEstimationViewSet(APIView):
def post(self, request):
response = head_pose_estimation.estimatePose(request.data)
return Response({"response": response})
# to perform video extraction
class VideoExtractionViewSet(APIView):
def get(self, request):
response = video_extraction.getExtractedFrames(request.query_params)
return Response({"response": response})
def post(self, request):
response = video_extraction.VideoExtractor(request.data)
return Response({"response": response})
# lecture emotions view set
class LectureEmotionViewSet(APIView):
def get(self, request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
serializer = LectureEmotionSerializer(emotions, many=True)
return Response({"response": serializer.data})
def post(self, request):
LectureEmotionReport(
lecture_id=request.data["lecture_id"],
happy_perct=request.data["happy_perct"],
sad_perct=request.data["sad_perct"],
angry_perct=request.data["angry_perct"],
surprise_perct=request.data["surprise_perct"],
disgust_perct=request.data["disgust_perct"],
neutral_perct=request.data["neutral_perct"]
).save()
return Response({"response": request.data})
class LectureViewSet(APIView):
def get(self, request):
lectures = Lecture.objects.all().order_by('date')
serializer = LectureSerializer(lectures, many=True)
return Response(serializer.data)
def post(self, request):
Lecture(
lecture_id=request.data['lecture_id']
).save()
return Response({"response": request})
####### VIEWS ######
@login_required(login_url='/user-direct')
......@@ -181,21 +113,7 @@ def hello(request):
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
videos = []
durations = []
#
# for videoPath in videoPaths:
# video = Video()
# cap = cv2.VideoCapture(videoPath)
# fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
# frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# duration = int(frame_count / fps)
# durations.append(duration)
# videoName = os.path.basename(videoPath)
# # videoName = videos.append(os.path.basename(videoPath))
# durationObj = datetime.timedelta(seconds=duration)
# video.path = videoPath
# video.name = videoName
# video.duration = str(durationObj)
# videos.append(video)
context = {'object': obj, 'Videos': videos, 'durations': durations, 'template_name': 'FirstApp/template.html', 'lecturer_details': lecturer_details, "lecturer": lecturer}
return render(request, 'FirstApp/Home.html', context)
......@@ -214,10 +132,6 @@ def view404(request):
def view401(request):
return render(request, 'FirstApp/401.html')
# querying the database
def blank(request):
emotions = LectureEmotionReport.objects.all().order_by('lecture_id')
return render(request, 'FirstApp/blank.html', {'details': emotions})
@login_required(login_url='/user-direct')
def gaze(request):
......@@ -250,134 +164,15 @@ def gaze(request):
{"lecturer_subjects": lecturer_subjects, "subjects": subject_list, "lecturer": lecturer})
# to redirect to the gaze interface
def processGaze(request):
print('My name is Ishan')
images = request.session.get('images', default='')
imageList = images.split(',')
for i in imageList:
print(i)
return redirect('/')
# the corresponding view for pose estimation
@login_required(login_url='/user-direct')
def pose(request):
try:
# retrieving data from the db
lecturer = request.session['lecturer']
lecturer_subjects = LecturerSubject.objects.filter(lecturer_id_id=lecturer)
lec_sub_serilizer = LecturerSubjectSerializer(lecturer_subjects, many=True)
subject_list = []
subjects = lec_sub_serilizer.data[0]['subjects']
for sub in subjects:
subject = Subject.objects.filter(id=sub)
subject_serialized = SubjectSerializer(subject, many=True)
subject_list.append(subject_serialized.data)
except Exception as exc:
return redirect('/500')
return render(request, "FirstApp/pose.html",
{"lecturer_subjects": lecturer_subjects, "subjects": subject_list, "lecturer": lecturer})
def charts(request):
return render(request, 'FirstApp/charts.html')
def forget_password(request):
return render(request, 'FirstApp/forgot-password.html')
def loginForm(request):
return render(request, 'FirstApp/login.html')
def register(request):
return render(request, 'FirstApp/register.html')
def webcam(request):
video = cv2.VideoCapture(os.path.join(BASE_DIR, 'static//FirstApp//videos//Classroom_video.mp4'))
while (True):
cap, frame = video.read()
cv2.imshow("Frame", frame)
if (cv2.waitKey(1) & 0XFF == ord('q')):
break
video.release()
cv2.destroyAllWindows()
# video = cv2.imread('D://SLIIT/Year 4//Sample Projects//django_project//DemoProject//static//FirstApp/videos/Classroom_video.mp4')
return redirect('/')
# to process video for emotion detection
@login_required(login_url='/user-direct')
def video(request):
title = 'Student and Lecturer Performance Enhancement System'
video_name = request.GET.get('video_name')
video_url = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos\\{0}'.format(video_name)))
meta_data = detect_emotion(video_url)
# meta_data = VideoMeta()
# calculating the respective percentages
meta_data.calcPercentages()
context = {'title': title, 'video_name': video_name, 'url': video_url, 'meta': meta_data}
return render(request, 'FirstApp/video.html', context)
# extractor view
@login_required(login_url='/user-direct')
def extractor(request):
folder = os.path.join(BASE_DIR, os.path.join('static\\FirstApp\\videos'))
videoPaths = [os.path.join(folder, file) for file in os.listdir(folder)]
videos = []
durations = []
# setting up the first video details
first_video_path = videoPaths[0]
first_video = Video()
cap = cv2.VideoCapture(first_video_path)
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = int(frame_count / fps)
videoName = os.path.basename(first_video_path)
durationObj = datetime.timedelta(seconds=duration)
first_video.path = first_video_path
first_video.name = videoName
first_video.duration = str(durationObj)
for videoPath in videoPaths:
video = Video()
cap = cv2.VideoCapture(videoPath)
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = int(frame_count / fps)
durations.append(duration)
videoName = os.path.basename(videoPath)
durationObj = datetime.timedelta(seconds=duration)
video.path = videoPath
video.name = videoName
video.duration = str(durationObj)
videos.append(video)
context = {'Videos': videos, 'firstVideo': first_video, 'durations': durations, 'template_name': 'FirstApp/template.html'}
return render(request, 'FirstApp/extractor.html', context)
def template(request):
obj = {'Message': 'Student and Lecturer Performance Enhancement System'}
return render(request, 'FirstApp/template.html', {'template_name': 'FirstApp/template.html', 'object': obj})
def base(request):
return render(request, 'FirstApp/base.html')
def child(request):
return render(request, 'FirstApp/child.html', {'template_name': 'FirstApp/base.html'})
# displaying video results
@login_required(login_url='/user-direct')
......@@ -546,10 +341,6 @@ def view500(request):
return render(request, "FirstApp/500.html")
# tables page
def tables(request):
return render(request, "FirstApp/tables.html")
@login_required(login_url='/user-direct')
def activity(request):
......
import spacy
from spacy.lang.pt.stop_words import STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer
import pt_core_news_sm
# Reading the file
nlp = pt_core_news_sm.load()
with open("audioToText01.txt", "r", encoding="utf-8") as f:
text = " ".join(f.readlines())
doc = nlp(text)
#calculating the word frequency
corpus = [sent.text.lower() for sent in doc.sents ]
cv = CountVectorizer(stop_words=list(STOP_WORDS))
cv_fit=cv.fit_transform(corpus)
......@@ -18,6 +18,7 @@ word_list = cv.get_feature_names()
count_list = cv_fit.toarray().sum(axis=0)
word_frequency = dict(zip(word_list,count_list))
val=sorted(word_frequency.values())
higher_word_frequencies = [word for word,freq in word_frequency.items() if freq in val[-3:]]
print("\nWords with higher frequencies: ", higher_word_frequencies)
......@@ -26,6 +27,7 @@ higher_frequency = val[-1]
for word in word_frequency.keys():
word_frequency[word] = (word_frequency[word]/higher_frequency)
#calculating sentence rank and taking top ranked sentences for the summary
sentence_rank={}
for sent in doc.sents:
for word in sent :
......
What is OOP OBJECT ORIENTED PROGRAMMING is a programming concept that works on the principles of abstraction, encapsulation, inheritance, and polymorphism. The basic concept of OOPs is to create objects, re-use them throughout the program, and manipulate these objects to get results. Object Oriented Programming popularly known as OOP, is used in a modern programming language like Java OBJECT ORIENTED PROGRAMMING is a programming concept that works on the principles of abstraction, encapsulation, inheritance and polymorphism the basic concept of OOPs is to create objects re-use them in the program and manipulate these objects to get results Object Oriented Programming popularly known as OOP, is used in programming language like Java. Core OOPS concepts are as follow. While the methods may be performed with these cars are driving, reverse, braking etc. next is Object An object can be defined as an instance of a class, and there can be multiple instances of a class in a program.
\ No newline at end of file
......@@ -9,7 +9,7 @@ from LectureSummarizingApp.serializer import LectureAudioSerializer, LectureAudi
from . import speech_to_text as stt
# this API will retrieve lecture audio details
# APIs used in Lecture Summarizing Component
class LectureAudioAPI(APIView):
def get(self, request):
......
# from django.db import models
from djongo import models
# Create your models here.
# Models used in Lecture Summarization Component
from FirstApp.MongoModels import Lecturer, Subject
......
import librosa
from pysndfx import AudioEffectsChain
import numpy as np
import math
import python_speech_features
import scipy as sp
from scipy import signal
import soundfile
def read_file(file_name):
......@@ -14,31 +10,18 @@ def read_file(file_name):
sample_path = sample_directory + sample_file
# generating audio time series and a sampling rate (int)
y, sr = librosa.load(sample_path)
return y, sr
# '''CENTROID'''
#
# def reduce_noise_centroid_s(y, sr):
#
# cent = librosa.feature.spectral_centroid(y=y, sr=sr)
# threshold_h = np.max(cent)
# threshold_l = np.min(cent)
# less_noise = AudioEffectsChain().lowshelf(gain=-12.0, frequency=threshold_l, slope=0.5).highshelf(gain=-12.0, frequency=threshold_h, slope=0.5).limiter(gain=6.0)
# y_cleaned = less_noise(y)
# return y_cleaned
a, sr = librosa.load(sample_path)
return a, sr
'''MFCC'''
def mffc_highshelf(y, sr):
def mffc_highshelf(a, sr):
mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.mfcc(a)
mfcc = python_speech_features.base.logfbank(a)
mfcc = python_speech_features.base.lifter(mfcc)
sum_of_squares = []
......@@ -56,22 +39,22 @@ def mffc_highshelf(y, sr):
min_hz = min(hz)
speech_booster = AudioEffectsChain().highshelf(frequency=min_hz*(-1)*1.2, gain=-12.0, slope=0.6).limiter(gain=8.0)
y_speach_boosted = speech_booster(y)
a_speach_boosted = speech_booster(a)
return (y_speach_boosted)
return (a_speach_boosted)
def mfcc_lowshelf(y, sr):
def mfcc_lowshelf(a, sr):
mfcc = python_speech_features.base.mfcc(y)
mfcc = python_speech_features.base.logfbank(y)
mfcc = python_speech_features.base.mfcc(a)
mfcc = python_speech_features.base.logfbank(a)
mfcc = python_speech_features.base.lifter(mfcc)
sum_of_squares = []
index = -1
for r in mfcc:
for x in mfcc:
sum_of_squares.append(0)
index = index + 1
for n in r:
for n in x:
sum_of_squares[index] = sum_of_squares[index] + n**2
strongest_frame = sum_of_squares.index(max(sum_of_squares))
......@@ -81,49 +64,49 @@ def mfcc_lowshelf(y, sr):
min_hz = min(hz)
speech_booster = AudioEffectsChain().lowshelf(frequency=min_hz*(-1), gain=12.0, slope=0.5)
y_speach_boosted = speech_booster(y)
a_speach_boosted = speech_booster(a)
return (y_speach_boosted)
return (a_speach_boosted)
def trim_silence(y):
y_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500)
trimmed_length = librosa.get_duration(y) - librosa.get_duration(y_trimmed)
a_trimmed, index = librosa.effects.trim(y, top_db=20, frame_length=2, hop_length=500)
trimmed_length = librosa.get_duration(y) - librosa.get_duration(a_trimmed)
return y_trimmed, trimmed_length
return a_trimmed, trimmed_length
def enhance(y):
apply_audio_effects = AudioEffectsChain().lowshelf(gain=10.0, frequency=260, slope=0.1).reverb(reverberance=25, hf_damping=5, room_scale=5, stereo_depth=50, pre_delay=20, wet_gain=0, wet_only=False)#.normalize()
y_enhanced = apply_audio_effects(y)
a_enhanced = apply_audio_effects(y)
return y_enhanced
return a_enhanced
def output_file(destination ,filename, y, sr, ext=""):
def output_file(destination ,filename, a, sr, ext=""):
destination = destination + filename[:-4] + ext + '.wav'
librosa.output.write_wav(destination, y, sr)
librosa.output.write_wav(destination, a, sr)
lectures = ['Lecture01.wav']
for s in lectures:
filename = s
y, sr = read_file(filename)
a, sr = read_file(filename)
# y_reduced_centroid_s = reduce_noise_centroid_s(y, sr)
y_reduced_mfcc_lowshelf = mfcc_lowshelf(y, sr)
y_reduced_mfcc_highshelf = mffc_highshelf(y, sr)
# a_reduced_centroid_s = reduce_noise_centroid_s(a, sr)
a_reduced_mfcc_lowshelf = mfcc_lowshelf(a, sr)
a_reduced_mfcc_highshelf = mffc_highshelf(a, sr)
# trimming silences
# y_reduced_centroid_s, time_trimmed = trim_silence(y_reduced_centroid_s)
y_reduced_mfcc_up, time_trimmed = trim_silence(mfcc_lowshelf)
y_reduced_mfcc_down, time_trimmed = trim_silence(mffc_highshelf)
# a_reduced_centroid_s, time_trimmed = trim_silence(a_reduced_centroid_s)
a_reduced_mfcc_up, time_trimmed = trim_silence(mfcc_lowshelf)
a_reduced_mfcc_down, time_trimmed = trim_silence(mffc_highshelf)
# output_file('lectures_trimmed_noise_reduced/' ,filename, y_reduced_centroid_s, sr, '_ctr_s')
output_file('lectures_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_up, sr, '_mfcc_up')
# output_file('lectures_trimmed_noise_reduced/' ,filename, y_reduced_mfcc_down, sr, '_mfcc_down')
# output_file('lectures_trimmed_noise_reduced/' ,filename, y, sr, '_org')
output_file('lectures_trimmed_noise_reduced/' ,filename, a_reduced_mfcc_up, sr, '_mfcc_up')
# output_file('lectures_trimmed_noise_reduced/' ,filename, a_reduced_mfcc_down, sr, '_mfcc_down')
# output_file('lectures_trimmed_noise_reduced/' ,filename, a, sr, '_org')
import numpy as nump
import scipy as sip
from scipy.io.wavfile import read
from scipy.io.wavfile import write
from scipy import signal
import matplotlib.pyplot as mplt
#get_ipython().magic('matplotlib inline')
(Frequency, array) = read('lectures/Lecture01.wav')
len(array)
mplt.plot(array)
mplt.title('Original Signal Spectrum')
mplt.xlabel('Frequency(Hz)')
mplt.ylabel('Amplitude')
fourierTransformation = sip.fft(array)
scale = sip.linspace(0, Frequency, len(array))
mplt.stem(scale[0:5000], nump.abs(fourierTransformation[0:5000]), 'r')
mplt.title('Signal spectrum after FFT')
mplt.xlabel('Frequency(Hz)')
mplt.ylabel('Amplitude')
guassianNoise = nump.random.rand(len(fourierTransformation))
NewSound = guassianNoise + array
write("New-Sound-Added-With-Guassian-Noise.wav", Frequency, NewSound)
u,v = signal.butter(5, 1000/(Frequency/2), btype='highpass')
filteredSignal = signal.lfilter(u,v,NewSound)
# plotting the signal.
mplt.plot(filteredSignal)
mplt.title('Highpass Filter')
mplt.xlabel('Frequency(Hz)')
mplt.ylabel('Amplitude')
# ButterWorth low-filter
x,y = signal.butter(5, 380/(Frequency/2), btype='lowpass')
# Applying the filter to the signal
newFilteredSignal = signal.lfilter(x,y,filteredSignal)
# plotting the signal.
mplt.plot(newFilteredSignal)
mplt.title('Lowpass Filter')
mplt.xlabel('Frequency(Hz)')
mplt.ylabel('Amplitude')
write("removed.wav", Frequency, nump.int16(newFilteredSignal/nump.max(nump.abs(newFilteredSignal)) * 32767))
\ No newline at end of file
......@@ -4,6 +4,7 @@ from FirstApp.serializers import LecturerSerializer, SubjectSerializer
from . models import *
#serializers used in Lecture Summarizing Component
class LectureAudioSerializer(serializers.ModelSerializer):
lecturer = LecturerSerializer()
......@@ -23,7 +24,6 @@ class LectureAudioNoiseRemovedSerializer(serializers.ModelSerializer):
class LectureSpeechToTextSerializer(serializers.ModelSerializer):
# lecture_speech_to_text_id = LectureAudioNoiseRemovedSerializer()
lecture_audio_id = LectureAudioSerializer()
class Meta:
......@@ -32,7 +32,6 @@ class LectureSpeechToTextSerializer(serializers.ModelSerializer):
class LectureAudioSummarySerializer(serializers.ModelSerializer):
# lecture_audio_noise_removed_id = LectureSpeechToTextSerializer()
lecture_audio_id = LectureAudioSerializer()
class Meta:
......@@ -41,10 +40,8 @@ class LectureAudioSummarySerializer(serializers.ModelSerializer):
class LectureNoticesSerializer(serializers.ModelSerializer):
# lecture_audio_noise_removed_id = LectureSpeechToTextSerializer()
lecture_audio_id = LectureAudioSerializer()
class Meta:
# model = LectureAudioSummary
model = LectureNotices
fields = '__all__'
\ No newline at end of file
......@@ -4,17 +4,17 @@ import os
def speech_to_text(video_name):
#calling the Recognizer()
r = sr.Recognizer()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
VIDEO_PATH = os.path.join(BASE_DIR, "lectures\\{}".format(video_name))
with sr.AudioFile(VIDEO_PATH) as source:
audio = r.listen(source)
file = open('audioToText01.txt', 'w')
file = open('audioToText01.txt', 'w') #open file
try:
text = r.recognize_google(audio)
text = r.recognize_google(audio) #Convert using google recognizer
file.write(text)
except:
file.write('error')
......
......@@ -5,27 +5,12 @@ from django.conf.urls import url
from . import api
router = routers.DefaultRouter()
# router.register(r'^register', views.register)
urlpatterns = [
path('lecture', views.summarization),
# path('', views.hello),
# path('login', views.login),
# path('register', views.register),
# path('404', views.view404),
# path('blank', views.blank),
# path('buttons', views.buttons),
# path('cards', views.cards),
# path('charts', views.charts),
# path('forgot-password', views.forget_password),
# # path('webcam', views.webcam),
# path('template', views.template),
# path('base', views.base),
# path('child', views.child),
# path('lecture-video', views.lecVideo),
# # path('Video', views.hello)
# API to retrieve activity recognition
# API to retrieve lecture summarizing details
url(r'^lecture-audio/$', api.LectureAudioAPI.as_view()),
url(r'^lecture-audio-noise-removed/$', api.audioNoiseRemovedList.as_view()),
......@@ -36,15 +21,6 @@ urlpatterns = [
url(r'^lecture-notices/$', api.lectureNoticeList.as_view()),
# # API to retrieve audio analysis
# url(r'^get-audio-analysis', api.GetLectureAudioAnalysis.as_view()),
#
# # API to retrieve lecture audio text
# url(r'^get-lecture-audio-text', api.LectureAudioTextAPI.as_view()),
#
# # test API
# url(r'^test-api', api.TestAPI.as_view()),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from rest_framework.views import APIView
from rest_framework.response import Response
......@@ -9,7 +7,7 @@ from .serializer import LectureAudioSerializer, LectureAudioNoiseRemovedSerializ
LectureSpeechToTextSerializer, LectureNoticesSerializer
# Create your views here.
# Views used in Lecture Summarization
def summarization(request):
......
input: "data"
input_shape {
dim: 1
dim: 3
dim: 300
dim: 300
}
layer {
name: "data_bn"
type: "BatchNorm"
bottom: "data"
top: "data_bn"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "data_scale"
type: "Scale"
bottom: "data_bn"
top: "data_bn"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv1_h"
type: "Convolution"
bottom: "data_bn"
top: "conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
convolution_param {
num_output: 32
pad: 3
kernel_size: 7
stride: 2
weight_filler {
type: "msra"
variance_norm: FAN_OUT
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "conv1_bn_h"
type: "BatchNorm"
bottom: "conv1_h"
top: "conv1_h"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "conv1_scale_h"
type: "Scale"
bottom: "conv1_h"
top: "conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv1_relu"
type: "ReLU"
bottom: "conv1_h"
top: "conv1_h"
}
layer {
name: "conv1_pool"
type: "Pooling"
bottom: "conv1_h"
top: "conv1_pool"
pooling_param {
kernel_size: 3
stride: 2
}
}
layer {
name: "layer_64_1_conv1_h"
type: "Convolution"
bottom: "conv1_pool"
top: "layer_64_1_conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 32
bias_term: false
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_64_1_bn2_h"
type: "BatchNorm"
bottom: "layer_64_1_conv1_h"
top: "layer_64_1_conv1_h"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_64_1_scale2_h"
type: "Scale"
bottom: "layer_64_1_conv1_h"
top: "layer_64_1_conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_64_1_relu2"
type: "ReLU"
bottom: "layer_64_1_conv1_h"
top: "layer_64_1_conv1_h"
}
layer {
name: "layer_64_1_conv2_h"
type: "Convolution"
bottom: "layer_64_1_conv1_h"
top: "layer_64_1_conv2_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 32
bias_term: false
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_64_1_sum"
type: "Eltwise"
bottom: "layer_64_1_conv2_h"
bottom: "conv1_pool"
top: "layer_64_1_sum"
}
layer {
name: "layer_128_1_bn1_h"
type: "BatchNorm"
bottom: "layer_64_1_sum"
top: "layer_128_1_bn1_h"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_128_1_scale1_h"
type: "Scale"
bottom: "layer_128_1_bn1_h"
top: "layer_128_1_bn1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_128_1_relu1"
type: "ReLU"
bottom: "layer_128_1_bn1_h"
top: "layer_128_1_bn1_h"
}
layer {
name: "layer_128_1_conv1_h"
type: "Convolution"
bottom: "layer_128_1_bn1_h"
top: "layer_128_1_conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_128_1_bn2"
type: "BatchNorm"
bottom: "layer_128_1_conv1_h"
top: "layer_128_1_conv1_h"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_128_1_scale2"
type: "Scale"
bottom: "layer_128_1_conv1_h"
top: "layer_128_1_conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_128_1_relu2"
type: "ReLU"
bottom: "layer_128_1_conv1_h"
top: "layer_128_1_conv1_h"
}
layer {
name: "layer_128_1_conv2"
type: "Convolution"
bottom: "layer_128_1_conv1_h"
top: "layer_128_1_conv2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_128_1_conv_expand_h"
type: "Convolution"
bottom: "layer_128_1_bn1_h"
top: "layer_128_1_conv_expand_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: false
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_128_1_sum"
type: "Eltwise"
bottom: "layer_128_1_conv2"
bottom: "layer_128_1_conv_expand_h"
top: "layer_128_1_sum"
}
layer {
name: "layer_256_1_bn1"
type: "BatchNorm"
bottom: "layer_128_1_sum"
top: "layer_256_1_bn1"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_256_1_scale1"
type: "Scale"
bottom: "layer_256_1_bn1"
top: "layer_256_1_bn1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_256_1_relu1"
type: "ReLU"
bottom: "layer_256_1_bn1"
top: "layer_256_1_bn1"
}
layer {
name: "layer_256_1_conv1"
type: "Convolution"
bottom: "layer_256_1_bn1"
top: "layer_256_1_conv1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_256_1_bn2"
type: "BatchNorm"
bottom: "layer_256_1_conv1"
top: "layer_256_1_conv1"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_256_1_scale2"
type: "Scale"
bottom: "layer_256_1_conv1"
top: "layer_256_1_conv1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_256_1_relu2"
type: "ReLU"
bottom: "layer_256_1_conv1"
top: "layer_256_1_conv1"
}
layer {
name: "layer_256_1_conv2"
type: "Convolution"
bottom: "layer_256_1_conv1"
top: "layer_256_1_conv2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_256_1_conv_expand"
type: "Convolution"
bottom: "layer_256_1_bn1"
top: "layer_256_1_conv_expand"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: false
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_256_1_sum"
type: "Eltwise"
bottom: "layer_256_1_conv2"
bottom: "layer_256_1_conv_expand"
top: "layer_256_1_sum"
}
layer {
name: "layer_512_1_bn1"
type: "BatchNorm"
bottom: "layer_256_1_sum"
top: "layer_512_1_bn1"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_512_1_scale1"
type: "Scale"
bottom: "layer_512_1_bn1"
top: "layer_512_1_bn1"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_512_1_relu1"
type: "ReLU"
bottom: "layer_512_1_bn1"
top: "layer_512_1_bn1"
}
layer {
name: "layer_512_1_conv1_h"
type: "Convolution"
bottom: "layer_512_1_bn1"
top: "layer_512_1_conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
stride: 1 # 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_512_1_bn2_h"
type: "BatchNorm"
bottom: "layer_512_1_conv1_h"
top: "layer_512_1_conv1_h"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "layer_512_1_scale2_h"
type: "Scale"
bottom: "layer_512_1_conv1_h"
top: "layer_512_1_conv1_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "layer_512_1_relu2"
type: "ReLU"
bottom: "layer_512_1_conv1_h"
top: "layer_512_1_conv1_h"
}
layer {
name: "layer_512_1_conv2_h"
type: "Convolution"
bottom: "layer_512_1_conv1_h"
top: "layer_512_1_conv2_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: false
pad: 2 # 1
kernel_size: 3
stride: 1
dilation: 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_512_1_conv_expand_h"
type: "Convolution"
bottom: "layer_512_1_bn1"
top: "layer_512_1_conv_expand_h"
param {
lr_mult: 1.0
decay_mult: 1.0
}
convolution_param {
num_output: 256
bias_term: false
pad: 0
kernel_size: 1
stride: 1 # 2
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "layer_512_1_sum"
type: "Eltwise"
bottom: "layer_512_1_conv2_h"
bottom: "layer_512_1_conv_expand_h"
top: "layer_512_1_sum"
}
layer {
name: "last_bn_h"
type: "BatchNorm"
bottom: "layer_512_1_sum"
top: "layer_512_1_sum"
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
param {
lr_mult: 0.0
}
}
layer {
name: "last_scale_h"
type: "Scale"
bottom: "layer_512_1_sum"
top: "layer_512_1_sum"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 1.0
}
scale_param {
bias_term: true
}
}
layer {
name: "last_relu"
type: "ReLU"
bottom: "layer_512_1_sum"
top: "fc7"
}
layer {
name: "conv6_1_h"
type: "Convolution"
bottom: "fc7"
top: "conv6_1_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 0
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv6_1_relu"
type: "ReLU"
bottom: "conv6_1_h"
top: "conv6_1_h"
}
layer {
name: "conv6_2_h"
type: "Convolution"
bottom: "conv6_1_h"
top: "conv6_2_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv6_2_relu"
type: "ReLU"
bottom: "conv6_2_h"
top: "conv6_2_h"
}
layer {
name: "conv7_1_h"
type: "Convolution"
bottom: "conv6_2_h"
top: "conv7_1_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv7_1_relu"
type: "ReLU"
bottom: "conv7_1_h"
top: "conv7_1_h"
}
layer {
name: "conv7_2_h"
type: "Convolution"
bottom: "conv7_1_h"
top: "conv7_2_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv7_2_relu"
type: "ReLU"
bottom: "conv7_2_h"
top: "conv7_2_h"
}
layer {
name: "conv8_1_h"
type: "Convolution"
bottom: "conv7_2_h"
top: "conv8_1_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv8_1_relu"
type: "ReLU"
bottom: "conv8_1_h"
top: "conv8_1_h"
}
layer {
name: "conv8_2_h"
type: "Convolution"
bottom: "conv8_1_h"
top: "conv8_2_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv8_2_relu"
type: "ReLU"
bottom: "conv8_2_h"
top: "conv8_2_h"
}
layer {
name: "conv9_1_h"
type: "Convolution"
bottom: "conv8_2_h"
top: "conv9_1_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 1
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv9_1_relu"
type: "ReLU"
bottom: "conv9_1_h"
top: "conv9_1_h"
}
layer {
name: "conv9_2_h"
type: "Convolution"
bottom: "conv9_1_h"
top: "conv9_2_h"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv9_2_relu"
type: "ReLU"
bottom: "conv9_2_h"
top: "conv9_2_h"
}
layer {
name: "conv4_3_norm"
type: "Normalize"
bottom: "layer_256_1_bn1"
top: "conv4_3_norm"
norm_param {
across_spatial: false
scale_filler {
type: "constant"
value: 20
}
channel_shared: false
}
}
layer {
name: "conv4_3_norm_mbox_loc"
type: "Convolution"
bottom: "conv4_3_norm"
top: "conv4_3_norm_mbox_loc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv4_3_norm_mbox_loc_perm"
type: "Permute"
bottom: "conv4_3_norm_mbox_loc"
top: "conv4_3_norm_mbox_loc_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv4_3_norm_mbox_loc_flat"
type: "Flatten"
bottom: "conv4_3_norm_mbox_loc_perm"
top: "conv4_3_norm_mbox_loc_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv4_3_norm_mbox_conf"
type: "Convolution"
bottom: "conv4_3_norm"
top: "conv4_3_norm_mbox_conf"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 8 # 84
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv4_3_norm_mbox_conf_perm"
type: "Permute"
bottom: "conv4_3_norm_mbox_conf"
top: "conv4_3_norm_mbox_conf_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv4_3_norm_mbox_conf_flat"
type: "Flatten"
bottom: "conv4_3_norm_mbox_conf_perm"
top: "conv4_3_norm_mbox_conf_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv4_3_norm_mbox_priorbox"
type: "PriorBox"
bottom: "conv4_3_norm"
bottom: "data"
top: "conv4_3_norm_mbox_priorbox"
prior_box_param {
min_size: 30.0
max_size: 60.0
aspect_ratio: 2
flip: true
clip: false
variance: 0.1
variance: 0.1
variance: 0.2
variance: 0.2
step: 8
offset: 0.5
}
}
layer {
name: "fc7_mbox_loc"
type: "Convolution"
bottom: "fc7"
top: "fc7_mbox_loc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 24
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "fc7_mbox_loc_perm"
type: "Permute"
bottom: "fc7_mbox_loc"
top: "fc7_mbox_loc_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "fc7_mbox_loc_flat"
type: "Flatten"
bottom: "fc7_mbox_loc_perm"
top: "fc7_mbox_loc_flat"
flatten_param {
axis: 1
}
}
layer {
name: "fc7_mbox_conf"
type: "Convolution"
bottom: "fc7"
top: "fc7_mbox_conf"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 12 # 126
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "fc7_mbox_conf_perm"
type: "Permute"
bottom: "fc7_mbox_conf"
top: "fc7_mbox_conf_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "fc7_mbox_conf_flat"
type: "Flatten"
bottom: "fc7_mbox_conf_perm"
top: "fc7_mbox_conf_flat"
flatten_param {
axis: 1
}
}
layer {
name: "fc7_mbox_priorbox"
type: "PriorBox"
bottom: "fc7"
bottom: "data"
top: "fc7_mbox_priorbox"
prior_box_param {
min_size: 60.0
max_size: 111.0
aspect_ratio: 2
aspect_ratio: 3
flip: true
clip: false
variance: 0.1
variance: 0.1
variance: 0.2
variance: 0.2
step: 16
offset: 0.5
}
}
layer {
name: "conv6_2_mbox_loc"
type: "Convolution"
bottom: "conv6_2_h"
top: "conv6_2_mbox_loc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 24
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv6_2_mbox_loc_perm"
type: "Permute"
bottom: "conv6_2_mbox_loc"
top: "conv6_2_mbox_loc_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv6_2_mbox_loc_flat"
type: "Flatten"
bottom: "conv6_2_mbox_loc_perm"
top: "conv6_2_mbox_loc_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv6_2_mbox_conf"
type: "Convolution"
bottom: "conv6_2_h"
top: "conv6_2_mbox_conf"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 12 # 126
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv6_2_mbox_conf_perm"
type: "Permute"
bottom: "conv6_2_mbox_conf"
top: "conv6_2_mbox_conf_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv6_2_mbox_conf_flat"
type: "Flatten"
bottom: "conv6_2_mbox_conf_perm"
top: "conv6_2_mbox_conf_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv6_2_mbox_priorbox"
type: "PriorBox"
bottom: "conv6_2_h"
bottom: "data"
top: "conv6_2_mbox_priorbox"
prior_box_param {
min_size: 111.0
max_size: 162.0
aspect_ratio: 2
aspect_ratio: 3
flip: true
clip: false
variance: 0.1
variance: 0.1
variance: 0.2
variance: 0.2
step: 32
offset: 0.5
}
}
layer {
name: "conv7_2_mbox_loc"
type: "Convolution"
bottom: "conv7_2_h"
top: "conv7_2_mbox_loc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 24
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv7_2_mbox_loc_perm"
type: "Permute"
bottom: "conv7_2_mbox_loc"
top: "conv7_2_mbox_loc_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv7_2_mbox_loc_flat"
type: "Flatten"
bottom: "conv7_2_mbox_loc_perm"
top: "conv7_2_mbox_loc_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv7_2_mbox_conf"
type: "Convolution"
bottom: "conv7_2_h"
top: "conv7_2_mbox_conf"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 12 # 126
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv7_2_mbox_conf_perm"
type: "Permute"
bottom: "conv7_2_mbox_conf"
top: "conv7_2_mbox_conf_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv7_2_mbox_conf_flat"
type: "Flatten"
bottom: "conv7_2_mbox_conf_perm"
top: "conv7_2_mbox_conf_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv7_2_mbox_priorbox"
type: "PriorBox"
bottom: "conv7_2_h"
bottom: "data"
top: "conv7_2_mbox_priorbox"
prior_box_param {
min_size: 162.0
max_size: 213.0
aspect_ratio: 2
aspect_ratio: 3
flip: true
clip: false
variance: 0.1
variance: 0.1
variance: 0.2
variance: 0.2
step: 64
offset: 0.5
}
}
layer {
name: "conv8_2_mbox_loc"
type: "Convolution"
bottom: "conv8_2_h"
top: "conv8_2_mbox_loc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv8_2_mbox_loc_perm"
type: "Permute"
bottom: "conv8_2_mbox_loc"
top: "conv8_2_mbox_loc_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv8_2_mbox_loc_flat"
type: "Flatten"
bottom: "conv8_2_mbox_loc_perm"
top: "conv8_2_mbox_loc_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv8_2_mbox_conf"
type: "Convolution"
bottom: "conv8_2_h"
top: "conv8_2_mbox_conf"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 8 # 84
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv8_2_mbox_conf_perm"
type: "Permute"
bottom: "conv8_2_mbox_conf"
top: "conv8_2_mbox_conf_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv8_2_mbox_conf_flat"
type: "Flatten"
bottom: "conv8_2_mbox_conf_perm"
top: "conv8_2_mbox_conf_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv8_2_mbox_priorbox"
type: "PriorBox"
bottom: "conv8_2_h"
bottom: "data"
top: "conv8_2_mbox_priorbox"
prior_box_param {
min_size: 213.0
max_size: 264.0
aspect_ratio: 2
flip: true
clip: false
variance: 0.1
variance: 0.1
variance: 0.2
variance: 0.2
step: 100
offset: 0.5
}
}
layer {
name: "conv9_2_mbox_loc"
type: "Convolution"
bottom: "conv9_2_h"
top: "conv9_2_mbox_loc"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv9_2_mbox_loc_perm"
type: "Permute"
bottom: "conv9_2_mbox_loc"
top: "conv9_2_mbox_loc_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv9_2_mbox_loc_flat"
type: "Flatten"
bottom: "conv9_2_mbox_loc_perm"
top: "conv9_2_mbox_loc_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv9_2_mbox_conf"
type: "Convolution"
bottom: "conv9_2_h"
top: "conv9_2_mbox_conf"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 8 # 84
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "conv9_2_mbox_conf_perm"
type: "Permute"
bottom: "conv9_2_mbox_conf"
top: "conv9_2_mbox_conf_perm"
permute_param {
order: 0
order: 2
order: 3
order: 1
}
}
layer {
name: "conv9_2_mbox_conf_flat"
type: "Flatten"
bottom: "conv9_2_mbox_conf_perm"
top: "conv9_2_mbox_conf_flat"
flatten_param {
axis: 1
}
}
layer {
name: "conv9_2_mbox_priorbox"
type: "PriorBox"
bottom: "conv9_2_h"
bottom: "data"
top: "conv9_2_mbox_priorbox"
prior_box_param {
min_size: 264.0
max_size: 315.0
aspect_ratio: 2
flip: true
clip: false
variance: 0.1
variance: 0.1
variance: 0.2
variance: 0.2
step: 300
offset: 0.5
}
}
layer {
name: "mbox_loc"
type: "Concat"
bottom: "conv4_3_norm_mbox_loc_flat"
bottom: "fc7_mbox_loc_flat"
bottom: "conv6_2_mbox_loc_flat"
bottom: "conv7_2_mbox_loc_flat"
bottom: "conv8_2_mbox_loc_flat"
bottom: "conv9_2_mbox_loc_flat"
top: "mbox_loc"
concat_param {
axis: 1
}
}
layer {
name: "mbox_conf"
type: "Concat"
bottom: "conv4_3_norm_mbox_conf_flat"
bottom: "fc7_mbox_conf_flat"
bottom: "conv6_2_mbox_conf_flat"
bottom: "conv7_2_mbox_conf_flat"
bottom: "conv8_2_mbox_conf_flat"
bottom: "conv9_2_mbox_conf_flat"
top: "mbox_conf"
concat_param {
axis: 1
}
}
layer {
name: "mbox_priorbox"
type: "Concat"
bottom: "conv4_3_norm_mbox_priorbox"
bottom: "fc7_mbox_priorbox"
bottom: "conv6_2_mbox_priorbox"
bottom: "conv7_2_mbox_priorbox"
bottom: "conv8_2_mbox_priorbox"
bottom: "conv9_2_mbox_priorbox"
top: "mbox_priorbox"
concat_param {
axis: 2
}
}
layer {
name: "mbox_conf_reshape"
type: "Reshape"
bottom: "mbox_conf"
top: "mbox_conf_reshape"
reshape_param {
shape {
dim: 0
dim: -1
dim: 2
}
}
}
layer {
name: "mbox_conf_softmax"
type: "Softmax"
bottom: "mbox_conf_reshape"
top: "mbox_conf_softmax"
softmax_param {
axis: 2
}
}
layer {
name: "mbox_conf_flatten"
type: "Flatten"
bottom: "mbox_conf_softmax"
top: "mbox_conf_flatten"
flatten_param {
axis: 1
}
}
layer {
name: "detection_out"
type: "DetectionOutput"
bottom: "mbox_loc"
bottom: "mbox_conf_flatten"
bottom: "mbox_priorbox"
top: "detection_out"
include {
phase: TEST
}
detection_output_param {
num_classes: 2
share_location: true
background_label_id: 0
nms_param {
nms_threshold: 0.45
top_k: 400
}
code_type: CENTER_SIZE
keep_top_k: 200
confidence_threshold: 0.01
}
}
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment