Commit 5c72998c authored by I.K Seneviratne's avatar I.K Seneviratne

Merge branch 'monitoring_student_behavior_IT17138000' into 'QA_RELEASE'

Monitoring student behavior it17138000

See merge request !21
parents bb4b62f7 91f1c100
import cv2
import os
import re
import base64
import shutil
def saveImage(response):
dataUrlPattern = re.compile('data:image/(png|jpeg);base64,(.*)$')
base_path = os.path.join(os.path.abspath(__file__))
root_dir = os.path.dirname(os.path.dirname(base_path))
new_dir_name = "static\\FirstApp\\images\\{}".format(response["imageName"])
new_dir = os.path.join(root_dir, new_dir_name)
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
count = 0
for url in response["ImageURLS"]:
url = dataUrlPattern.match(url).group(2)
encoded = url.encode()
image = base64.b64decode(encoded)
imageName = response["imageName"] + '_img_' + format(count) + '.png'
new_file = os.path.join(new_dir, imageName)
count += 1
# saving the images (method 1)
with open(new_file, "wb") as f:
f.write(image)
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
...@@ -13,22 +13,16 @@ arbitrary media types. ...@@ -13,22 +13,16 @@ arbitrary media types.
""" """
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData
from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer
from .MongoModels import * from .MongoModels import *
from rest_framework.views import * from rest_framework.views import *
from .logic import activity_recognition as ar from .logic import activity_recognition as ar
from .logic import posenet_calculation as pc
from . import emotion_detector as ed from . import emotion_detector as ed
from .logic import id_generator as ig from .logic import id_generator as ig
from .logic import pdf_file_generator as pdf from .logic import pdf_file_generator as pdf
from .logic import head_gaze_estimation as hge from .logic import head_gaze_estimation as hge
from .logic import video_extraction as ve from .logic import video_extraction as ve
from .models import Teachers, Video, VideoMeta, RegisterUser
from .MongoModels import *
from .serializers import * from .serializers import *
import datetime import datetime
...@@ -240,16 +234,11 @@ class GetLectureActivityViewSet(APIView): ...@@ -240,16 +234,11 @@ class GetLectureActivityViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id) lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureActivitySerializer(lecture_activities, many=True) serializer = LectureActivitySerializer(lecture_activities, many=True)
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted
}) })
...@@ -296,60 +285,6 @@ class LectureActivityProcess(APIView): ...@@ -296,60 +285,6 @@ class LectureActivityProcess(APIView):
ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict) ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
class GetLectureActivityDetections(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
frame_name = request.query_params.get('frame_name')
detections = ar.get_detections(video_name, frame_name)
return Response({
"detections": detections
})
# the API class for getting student detections for a label
class GetLectureActvityDetectionsForLabel(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
label = request.query_params.get('label')
labelled_detections, detected_people = ar.get_detections_for_label(video_name, label)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class for getting students activity evaluations
class GetLectureActivityStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = ar.get_student_activity_evaluation(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class to retrieve individual student evaluation (activity)
class GetLectureActivityIndividualStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
student_name = request.query_params.get('student_name')
meta_data = ar.get_individual_student_evaluation(video_name, student_name)
return Response({
"response": meta_data
})
# API to retrieve activity detections for frames # API to retrieve activity detections for frames
class GetLectureActivityRecognitionsForFrames(APIView): class GetLectureActivityRecognitionsForFrames(APIView):
...@@ -488,45 +423,16 @@ class GetLectureEmotionReportViewSet(APIView): ...@@ -488,45 +423,16 @@ class GetLectureEmotionReportViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id) lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureEmotionSerializer(lecture_emotions, many=True) serializer = LectureEmotionSerializer(lecture_emotions, many=True)
print(len(serializer.data))
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
}) })
# the API class for getting students activity evaluations (emotions)
class GetLectureEmotionStudentEvaluations(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = ed.get_student_emotion_evaluations(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class to retrieve individual student evaluation (emotion)
class GetLectureEmotionIndividualStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
student_name = request.query_params.get('student_name')
meta_data = ed.get_individual_student_evaluation(video_name, student_name)
serialized = VideoMetaSerializer(meta_data)
return Response({
"response": serialized.data
})
# API to retrieve emotion detections for frames # API to retrieve emotion detections for frames
class GetLectureEmotionRecognitionsForFrames(APIView): class GetLectureEmotionRecognitionsForFrames(APIView):
...@@ -559,73 +465,6 @@ class GetLectureEmotionRecognitionsForFrames(APIView): ...@@ -559,73 +465,6 @@ class GetLectureEmotionRecognitionsForFrames(APIView):
}) })
##### POSE #####
class GetLectureVideoForPose(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
date = request.query_params.get('date')
index = int(request.query_params.get('index'))
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
return Response({
"response": serializer.data[index]
})
# API to retrieve one lecture activity
class GetLectureVideoExtractedFrames(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
# lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
# serializer = LectureActivitySerializer(lecture_activities, many=True)
return Response({
# "response": serializer.data,
"extracted": extracted
})
# API to retrieve individual student detections
class GetLectureVideoIndividualStudentFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = pc.get_pose_estimations(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# API to process pose estimation for an individual student
class ProcessIndividualStudentPoseEstimation(APIView):
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated, IsAdminUser]
def get(self):
pass
# POST method
def post(self, request):
video_name = request.data['video_name']
student = request.data['student']
poses = request.data['poses']
pc.calculate_pose_estimation_for_student(video_name, student, poses)
return Response({
"response": video_name
})
##### GAZE ESTIMATION SECTION ##### ##### GAZE ESTIMATION SECTION #####
class GetLectureGazeEstimationAvailaibility(APIView): class GetLectureGazeEstimationAvailaibility(APIView):
...@@ -702,8 +541,6 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -702,8 +541,6 @@ class GetLectureGazeEstimationViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
lecture_gaze_estimations = LectureGazeEstimation.objects.filter( lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id) lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureGazeEstimationSerializer(lecture_gaze_estimations, many=True) serializer = LectureGazeEstimationSerializer(lecture_gaze_estimations, many=True)
...@@ -1251,6 +1088,7 @@ class GetLectureGazeSummary(APIView): ...@@ -1251,6 +1088,7 @@ class GetLectureGazeSummary(APIView):
# =====OTHERS===== # =====OTHERS=====
# this API will retrieve the respective lecturer video name
class GetLecturerRecordedVideo(APIView): class GetLecturerRecordedVideo(APIView):
def get(self, request): def get(self, request):
...@@ -1263,10 +1101,12 @@ class GetLecturerRecordedVideo(APIView): ...@@ -1263,10 +1101,12 @@ class GetLecturerRecordedVideo(APIView):
lec_recorded_video_ser = LectureRecordedVideoSerializer(lec_recorded_video, many=True) lec_recorded_video_ser = LectureRecordedVideoSerializer(lec_recorded_video, many=True)
lec_recorded_video_data = lec_recorded_video_ser.data[0] lec_recorded_video_data = lec_recorded_video_ser.data[0]
# extract the lecturer video name
video_name = lec_recorded_video_data['lecture_video_name'] video_name = lec_recorded_video_data['lecture_video_name']
print('lecturer recorded video name: ', video_name) print('lecturer recorded video name: ', video_name)
# return the response
return Response({ return Response({
"video_name": video_name "video_name": video_name
}) })
...@@ -1276,16 +1116,22 @@ class GetLecturerRecordedVideo(APIView): ...@@ -1276,16 +1116,22 @@ class GetLecturerRecordedVideo(APIView):
class GetLectureActivityCorrelations(APIView): class GetLectureActivityCorrelations(APIView):
def get(self, request): def get(self, request):
# this variable defines the number of dates to be considered for activity correlations
option = request.query_params.get('option') option = request.query_params.get('option')
# the lecturer id
lecturer = request.query_params.get('lecturer') lecturer = request.query_params.get('lecturer')
int_option = int(option) int_option = int(option)
# get the current date
current_date = datetime.datetime.now().date() current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option) option_date = datetime.timedelta(days=int_option)
# subtract the current date by the time period given
previous_date = current_date - option_date previous_date = current_date - option_date
# this list contains the student activities for each lecture
individual_lec_activities = [] individual_lec_activities = []
# this list will contain the student activity-lecturer posture activity correlations
activity_correlations = [] activity_correlations = []
# retrieving lecture activities # retrieving lecture activities
......
...@@ -33,11 +33,21 @@ from .serializers import LectureEmotionSerializer ...@@ -33,11 +33,21 @@ from .serializers import LectureEmotionSerializer
import pandas as pd import pandas as pd
# emotion recognition method # emotion recognition method
# this method accepts:
# classifier: emotion recognition classifier (VGG model)
# face_classifier: face detection classifier (Haar-Cascade)
# image: image to be processed
# returns:
# label: the emotion recognition label
def emotion_recognition(classifier, face_classifier, image): def emotion_recognition(classifier, face_classifier, image):
# this label will contain the recognized emotion label
label = "" label = ""
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# the detected faces in the image
faces = face_classifier.detectMultiScale(gray, 1.3, 5) faces = face_classifier.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces: for (x, y, w, h) in faces:
...@@ -58,6 +68,13 @@ def emotion_recognition(classifier, face_classifier, image): ...@@ -58,6 +68,13 @@ def emotion_recognition(classifier, face_classifier, image):
return label return label
# this method will perform emotion recognition for a lecture
# this method accepts:
# video_path: the lecture video name
# returns:
# percentages: the student activity percentages for the lecture video
def detect_emotion(video): def detect_emotion(video):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video))
...@@ -136,135 +153,18 @@ def detect_emotion(video): ...@@ -136,135 +153,18 @@ def detect_emotion(video):
# for testing purposes # for testing purposes
print('ending the emotion recognition process') print('ending the emotion recognition process')
# return the data
return meta_data return meta_data
# to retrieve student evaluation for emotions
def get_student_emotion_evaluations(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# face_classifier = cv2.CascadeClassifier(
# os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
# classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
# classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
# label = emotion_recognition(classifier, face_classifier, image)
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = cs.custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = cs.custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# this method will retrieve individual student evaluations
def get_individual_student_evaluation(video_name, student_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# the object of type 'VideoMeta'
meta_data = VideoMeta()
# the class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
# taking a count on each label
count_frames = 0
count_angry = 0
count_happy = 0
count_sad = 0
count_neutral = 0
count_surprise = 0
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
for detections in os.listdir(FRAME_FOLDER): # this method will recognize the student emotions for each frame
# this method will accept:
# video_name: the lecture video name
# only take the images with the student name # returns:
if detections == student_name: # sorted_emotion_frame_recognitions: the list of sorted student emotion recognitions for each frame
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
label = emotion_recognition(classifier, face_classifier, image)
# check for the label of the image
if (label == 'Anger'):
count_angry += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Anger')
# cv2.imwrite(os.path.join(path, 'Anger-{0}.jpg'.format(count)), frame)
elif (label == 'Happy'):
count_happy += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Happy')
# cv2.imwrite(os.path.join(path, 'Happy-{0}.jpg'.format(count)), frame)
elif (label == 'Neutral'):
count_neutral += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Neutral')
# cv2.imwrite(os.path.join(path, 'Neutral-{0}.jpg'.format(count)), frame)
elif (label == 'Sad'):
count_sad += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Sad')
# cv2.imwrite(os.path.join(path, 'Sad-{0}.jpg'.format(count)), frame)
elif (label == 'Surprise'):
count_surprise += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Surprise')
# cv2.imwrite(os.path.join(path, 'Surprise-{0}.jpg'.format(count)), frame)
# incrementing the frame_count
count_frames += 1
# setting up the counted values
meta_data.frame_count = count_frames
meta_data.happy_count = count_happy
meta_data.sad_count = count_sad
meta_data.angry_count = count_angry
meta_data.neutral_count = count_neutral
meta_data.surprise_count = count_surprise
# calculating the percentages
meta_data.calcPercentages()
return meta_data
# this method will
def get_frame_emotion_recognition(video_name): def get_frame_emotion_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
...@@ -300,6 +200,7 @@ def get_frame_emotion_recognition(video_name): ...@@ -300,6 +200,7 @@ def get_frame_emotion_recognition(video_name):
# for testing purposes # for testing purposes
print('starting the emotion frame recognition process') print('starting the emotion frame recognition process')
# looping through the frames
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
ret, image = cap.read() ret, image = cap.read()
...@@ -375,17 +276,25 @@ def get_frame_emotion_recognition(video_name): ...@@ -375,17 +276,25 @@ def get_frame_emotion_recognition(video_name):
frame_count += 1 frame_count += 1
# sort the recognitions based on the frame number # sort the recognitions based on the frame number
sorted_activity_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions) sorted_emotion_frame_recognitions = cs.custom_object_sorter(frame_emotion_recognitions)
# for testing purposes # for testing purposes
print('ending the emotion frame recognition process') print('ending the emotion frame recognition process')
# return the detected frame percentages # return the detected frame percentages
return sorted_activity_frame_recognitions return sorted_emotion_frame_recognitions
# this method will get the student emotion recognition summary for period
# this method accepts the following parameter
# emotions: the database records retrieved within the given time period
# returns:
# percentages: average percentages for each student activity recognition label
# individual_lec_emotions: contain the lecture emotion recognition details for each individual lecture
# emotion_labels: the emotion labels
# this method will retrieve student activity summary for given time period
def get_student_emotion_summary_for_period(emotions): def get_student_emotion_summary_for_period(emotions):
# declare variables to add percentage values # declare variables to add percentage values
...@@ -399,8 +308,10 @@ def get_student_emotion_summary_for_period(emotions): ...@@ -399,8 +308,10 @@ def get_student_emotion_summary_for_period(emotions):
# get the number of activties to calculate average # get the number of activties to calculate average
no_of_emotions = len(emotions) no_of_emotions = len(emotions)
# this list will contain the emotion recognition details for each lecture
individual_lec_emotions = [] individual_lec_emotions = []
# emotion labels
emotion_labels = ["happy_perct", "sad_perct", "angry_perct", "disgust_perct", "surprise_perct", "neutral_perct"] emotion_labels = ["happy_perct", "sad_perct", "angry_perct", "disgust_perct", "surprise_perct", "neutral_perct"]
# iterate through the activities # iterate through the activities
...@@ -433,6 +344,7 @@ def get_student_emotion_summary_for_period(emotions): ...@@ -433,6 +344,7 @@ def get_student_emotion_summary_for_period(emotions):
surprise_average_perct = round((surprise_perct_combined / no_of_emotions), 1) surprise_average_perct = round((surprise_perct_combined / no_of_emotions), 1)
neutral_average_perct = round((neutral_perct_combined / no_of_emotions), 1) neutral_average_perct = round((neutral_perct_combined / no_of_emotions), 1)
# this dictionary will contain the student emotion average percentage values
percentages = {} percentages = {}
percentages["happy_perct"] = happy_average_perct percentages["happy_perct"] = happy_average_perct
percentages["sad_perct"] = sad_average_perct percentages["sad_perct"] = sad_average_perct
...@@ -441,12 +353,21 @@ def get_student_emotion_summary_for_period(emotions): ...@@ -441,12 +353,21 @@ def get_student_emotion_summary_for_period(emotions):
percentages["surprise_perct"] = surprise_average_perct percentages["surprise_perct"] = surprise_average_perct
percentages["neutral_perct"] = neutral_average_perct percentages["neutral_perct"] = neutral_average_perct
# return the values
return percentages, individual_lec_emotions, emotion_labels return percentages, individual_lec_emotions, emotion_labels
# this method will retrieve activity frame groupings for a lecture # this method will get the lecture student emotion frame groupings
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): # this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student emotion labels for each frame group
# returns:
# frame_group_dict: the modified frame group dictionary
# emotion_labels: student emotion labels
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
...@@ -463,13 +384,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -463,13 +384,11 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
print("[INFO] loading model...") print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file) net = cv2.dnn.readNetFromCaffe(config_file, model_file)
# capture the video
cap = cv2.VideoCapture(VIDEO_DIR) cap = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) no_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# initializing the count variables # initializing the count variables
frame_count = 0 frame_count = 0
...@@ -574,15 +493,7 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -574,15 +493,7 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_neutral_count = frame_group_details['neutral_count'] frame_group_neutral_count = frame_group_details['neutral_count']
group_detection_count = frame_group_details['detection_count'] group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count) # calculate the frame group emotion percentages
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
frame_group_happy_perct = float(frame_group_happy_count / group_detection_count) * 100 frame_group_happy_perct = float(frame_group_happy_count / group_detection_count) * 100
frame_group_sad_perct = float(frame_group_sad_count / group_detection_count) * 100 frame_group_sad_perct = float(frame_group_sad_count / group_detection_count) * 100
frame_group_angry_perct = float(frame_group_angry_count / group_detection_count) * 100 frame_group_angry_perct = float(frame_group_angry_count / group_detection_count) * 100
...@@ -612,7 +523,15 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -612,7 +523,15 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
return frame_group_dict, emotion_labels return frame_group_dict, emotion_labels
# this section will handle some database operations # THIS SECTION WILL HANDLE SOME DATABASE OPERATIONS
# this method will save frame detections to the database
# this method will accept
# video_name: lecture video name to be processed
# returns
# frame_detections: the student emotion frame detections
def save_frame_recognitions(video_name): def save_frame_recognitions(video_name):
# for testing purposes # for testing purposes
...@@ -662,7 +581,12 @@ def save_frame_recognitions(video_name): ...@@ -662,7 +581,12 @@ def save_frame_recognitions(video_name):
return frame_detections return frame_detections
# this method will save the emotion frame groupings to the database # this method will save gaze frame groupings to the database
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student emotion labels for each frame group
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes # for testing purposes
...@@ -704,7 +628,15 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -704,7 +628,15 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_emotion_frame_groupings.save() new_lec_emotion_frame_groupings.save()
# this method will get emotion correlations
# this method will get student emotion correlations
# this method accepts:
# individual_lec_emotions: the student emotion details for each individual lecture
# lec_recorded_activity_data: the lecturer posture recognition details
# returns:
# correlations: the lecture student emotions and lecturer posture recognition correlations
def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data): def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data):
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
......
...@@ -13,11 +13,6 @@ main methods include ...@@ -13,11 +13,6 @@ main methods include
""" """
import tensorflow as tf import tensorflow as tf
import tensorflow.keras import tensorflow.keras
from PIL import Image, ImageOps from PIL import Image, ImageOps
...@@ -34,6 +29,13 @@ from . import utilities as ut ...@@ -34,6 +29,13 @@ from . import utilities as ut
import pandas as pd import pandas as pd
# this method will perform gaze estimation for a lecture
# this method accepts:
# video_path: the lecture video name
# returns:
# percentages: the student activity percentages for the lecture video
def activity_recognition(video_path): def activity_recognition(video_path):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_path))
...@@ -55,11 +57,12 @@ def activity_recognition(video_path): ...@@ -55,11 +57,12 @@ def activity_recognition(video_path):
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# class_labels = ['Phone checking', 'Talking with friends', 'note taking'] # define the student activity labels
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking'] class_labels = ['Phone checking', 'Listening', 'Note taking']
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
# compile the model
model.compile(optimizer='adam', model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']) metrics=['accuracy'])
...@@ -70,6 +73,8 @@ def activity_recognition(video_path): ...@@ -70,6 +73,8 @@ def activity_recognition(video_path):
# iteration # iteration
video = cv2.VideoCapture(VIDEO_DIR) video = cv2.VideoCapture(VIDEO_DIR)
no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) no_of_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
# initialize the frame count and student activity count variables
frame_count = 0 frame_count = 0
total_detections = 0 total_detections = 0
phone_checking_count = 0 phone_checking_count = 0
...@@ -79,10 +84,12 @@ def activity_recognition(video_path): ...@@ -79,10 +84,12 @@ def activity_recognition(video_path):
# for testing purposes # for testing purposes
print('starting the activity recognition process') print('starting the activity recognition process')
# looping through the frames
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
ret, image = video.read() ret, image = video.read()
image = cv2.resize(image, size) image = cv2.resize(image, size)
# perform person detection on the extracted image
detections = person_detection(image, net) detections = person_detection(image, net)
# this is for testing purposes # this is for testing purposes
...@@ -92,8 +99,10 @@ def activity_recognition(video_path): ...@@ -92,8 +99,10 @@ def activity_recognition(video_path):
# if there are any person detections # if there are any person detections
if (len(detections) > 0): if (len(detections) > 0):
# increment the total detections in the entire video
total_detections += len(detections) total_detections += len(detections)
# initialize the detection count
detection_count = 0 detection_count = 0
# looping through the person detections of the frame # looping through the person detections of the frame
...@@ -120,33 +129,41 @@ def activity_recognition(video_path): ...@@ -120,33 +129,41 @@ def activity_recognition(video_path):
elif (label == class_labels[2]): elif (label == class_labels[2]):
note_taking_count += 1 note_taking_count += 1
# increment the detection count
detection_count += 1 detection_count += 1
# increment the frame count
frame_count += 1 frame_count += 1
# calculating the percentages for each label # calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0 phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
# talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0 note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0 listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary # assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct percentages["phone_perct"] = phone_perct
# percentages["talking_perct"] = talking_perct
percentages["writing_perct"] = note_perct percentages["writing_perct"] = note_perct
percentages["listening_perct"] = listening_perct percentages["listening_perct"] = listening_perct
# for testing purposes # for testing purposes
print('activity recognition process is over') print('activity recognition process is over')
# return the percentages
return percentages return percentages
# this method will perform the person detection for a given image
# this method accepts:
# image: image that needs to be processed
# net: the person detection model, which is a caffe implemented deep learning model
# returns:
# detected_person: this list contains the bounding box coordinates of the person detections in the input image
def person_detection(image, net): def person_detection(image, net):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# set the threshold balue
threshold = 0.2 threshold = 0.2
detected_person = [] detected_person = []
...@@ -156,8 +173,8 @@ def person_detection(image, net): ...@@ -156,8 +173,8 @@ def person_detection(image, net):
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep", "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"] "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# initialize the person count
person_count = 0 person_count = 0
# load the input image and construct an input blob for the image # load the input image and construct an input blob for the image
...@@ -192,224 +209,52 @@ def person_detection(image, net): ...@@ -192,224 +209,52 @@ def person_detection(image, net):
# display the prediction # display the prediction
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100) label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
# print("[INFO] {}".format(label)) # if the detected object belongs to the 'person' class
if (format(label).__contains__("person")): if (format(label).__contains__("person")):
startX = 0 if startX < 0 else startX startX = 0 if startX < 0 else startX
startY = 0 if startY < 0 else startY startY = 0 if startY < 0 else startY
# extract the person
person = image[startY:startY + endY, startX:startX + endX] person = image[startY:startY + endY, startX:startX + endX]
detected_person.append(person) detected_person.append(person)
person_count += 1 person_count += 1
# return the detection person list
return detected_person return detected_person
# retrieving the extracted frames and detections for a given video # this method will recognize the activity for each frame
def getExtractedFrames(folder_name): # this method will accept:
image_list = [] # video_name: the lecture video name
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(folder_name))
# listing all the images in the directory
for frame_folders in os.listdir(EXTRACTED_DIR):
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame_folders)
frame_details = {}
frame_details['frame'] = frame_folders
detection_details = []
for detections in os.listdir(FRAME_FOLDER):
detection_details.append(detections)
frame_details['detections'] = detection_details
image_list.append(frame_details)
# checking for the number of frames
if (len(image_list) > 0):
image_list = custom_object_sorter(image_list)
return image_list
else:
return "No extracted frames were found"
# get detections for a given frame name
def get_detections(video_name, frame_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_name)
detections = []
for detection in os.listdir(FRAME_DIR):
if 'frame' not in detection:
detections.append(detection)
return detections
# get detections for a given class name
def get_detections_for_label(video_name, label_index):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
class_labels = ['Phone checking', 'Talking with friends', 'note taking']
label_index = int(label_index)
given_label = class_labels[label_index]
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
# checking for equality in selected label and given label
if (label == given_label):
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# to get the student evaluations
def get_student_activity_evaluation(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
class_labels = ['Phone checking', 'Talking with friends', 'note taking']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference # returns:
prediction = model.predict(data) # sorted_activity_frame_recognitions: the list of sorted student activity recognitions for each frame
label = class_labels[prediction.argmax()]
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# recognize the activity for each frame
def get_frame_activity_recognition(video_name): def get_frame_activity_recognition(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name)) VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5") # CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5") CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
ACTIVITY_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\activity")
# files required for person detection # files required for person detection
config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt") config_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.prototxt.txt")
model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel") model_file = os.path.join(BASE_DIR, "FirstApp\\classifiers\\MobileNetSSD_deploy.caffemodel")
# load our serialized persosn detection model from disk # load our serialized person detection model from disk
print("[INFO] loading model...") print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(config_file, model_file) net = cv2.dnn.readNetFromCaffe(config_file, model_file)
np.set_printoptions(suppress=True) np.set_printoptions(suppress=True)
# class_labels = ['Phone checking', 'Talking with friends', 'note taking']
# class labels # class labels
class_labels = ['Phone checking', 'Listening', 'Note taking'] class_labels = ['Phone checking', 'Listening', 'Note taking']
# load the activity recogntion model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR) model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
# compile the model
model.compile(optimizer='adam', model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']) metrics=['accuracy'])
...@@ -519,87 +364,16 @@ def get_frame_activity_recognition(video_name): ...@@ -519,87 +364,16 @@ def get_frame_activity_recognition(video_name):
return sorted_activity_frame_recognitions return sorted_activity_frame_recognitions
# this method will get the student activity recognition summary for period
# this method accepts the following parameter
# activities: the database records retrieved within the given time period
# this method will retrieve individual student evaluation # returns:
def get_individual_student_evaluation(video_name, student_name): # percentages: average percentages for each student activity recognition label
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # individual_lec_activties: contain the lecture activity recognition details for each individual lecture
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name)) # activity_labels: the activity labels
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_03.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_02.h5")
# CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_04.h5")
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers\\student_activity_version_05.h5")
np.set_printoptions(suppress=True)
# load the model
model = tensorflow.keras.models.load_model(CLASSIFIER_DIR)
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
size = (224, 224)
# initializing the count variables
frame_count = 0
phone_count = 0
note_count = 0
listen_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
for detections in os.listdir(FRAME_FOLDER):
# only take the images with the student name
if detections == student_name:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
image = cv2.resize(image, size)
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
label = class_labels[prediction.argmax()]
# checking for the label
if label == class_labels[0]:
phone_count += 1
elif label == class_labels[1]:
listen_count += 1
elif label == class_labels[2]:
note_count += 1
# increment the frame count
frame_count += 1
# calculating the percentages
phone_perct = float(phone_count / frame_count) * 100
writing_perct = float(note_count / frame_count) * 100
listening_perct = float(listen_count / frame_count) * 100
# this dictionary will be returned
percentages = {}
percentages['phone_perct'] = phone_perct
percentages['writing_perct'] = writing_perct
percentages['listening_perct'] = listening_perct
return percentages
# this method will retrieve student activity summary for given time period
def get_student_activity_summary_for_period(activities): def get_student_activity_summary_for_period(activities):
# declare variables to add percentage values # declare variables to add percentage values
phone_checking_perct_combined = 0.0 phone_checking_perct_combined = 0.0
...@@ -609,8 +383,10 @@ def get_student_activity_summary_for_period(activities): ...@@ -609,8 +383,10 @@ def get_student_activity_summary_for_period(activities):
# get the number of activties to calculate average # get the number of activties to calculate average
no_of_activities = len(activities) no_of_activities = len(activities)
# this list will contain the student activity details for each lecture
individual_lec_activities = [] individual_lec_activities = []
# activity labels
activity_labels = ["phone_perct", "listening_perct", "writing_perct"] activity_labels = ["phone_perct", "listening_perct", "writing_perct"]
# iterate through the activities # iterate through the activities
...@@ -637,10 +413,20 @@ def get_student_activity_summary_for_period(activities): ...@@ -637,10 +413,20 @@ def get_student_activity_summary_for_period(activities):
percentages["listening_perct"] = listening_average_perct percentages["listening_perct"] = listening_average_perct
percentages["writing_perct"] = note_taking_average_perct percentages["writing_perct"] = note_taking_average_perct
# return the values
return percentages, individual_lec_activities, activity_labels return percentages, individual_lec_activities, activity_labels
# this method will retrieve activity frame groupings for a lecture # this method will get the lecture student activity frame groupings
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student activity labels for each frame group
# returns:
# frame_group_dict: the modified frame group dictionary
# activity_labels: student activity labels
def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name)) EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
...@@ -753,14 +539,6 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -753,14 +539,6 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_note_count = frame_group_details['note_count'] frame_group_note_count = frame_group_details['note_count']
group_detection_count = frame_group_details['detection_count'] group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count)
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100 frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100
frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100 frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100
...@@ -777,14 +555,20 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -777,14 +555,20 @@ def activity_frame_groupings(video_name, frame_landmarks, frame_group_dict):
frame_group_dict[key].pop('note_count') frame_group_dict[key].pop('note_count')
frame_group_dict[key].pop('detection_count') frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
activity_labels = ['phone_perct', 'listen_perct', 'note_perct'] activity_labels = ['phone_perct', 'listen_perct', 'note_perct']
# return the dictionary # return the dictionary
return frame_group_dict, activity_labels return frame_group_dict, activity_labels
# this section will handle saving activity entities to the database # this method will save frame detections to the database
# this method will accept
# video_name: lecture video name to be processed
# returns
# frame_detections: the student activity frame detections
def save_frame_recognition(video_name): def save_frame_recognition(video_name):
# for testing purposes # for testing purposes
...@@ -831,7 +615,12 @@ def save_frame_recognition(video_name): ...@@ -831,7 +615,12 @@ def save_frame_recognition(video_name):
return frame_detections return frame_detections
# this method will save the activity frame groupings to the database # this method will save gaze frame groupings to the database
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant student activity labels for each frame group
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes # for testing purposes
...@@ -874,7 +663,15 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -874,7 +663,15 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
new_lec_activity_frame_groupings.save() new_lec_activity_frame_groupings.save()
# this method will get activity correlations
# this method will get student activity correlations
# this method accepts:
# individual_lec_activities: the student activity details for each individual lecture
# lec_recorded_activity_data: the lecturer posture recognition details
# returns:
# correlations: the lecture student activities and lecturer posture recognition correlations
def get_activity_correlations(individual_lec_activities, lec_recorded_activity_data): def get_activity_correlations(individual_lec_activities, lec_recorded_activity_data):
# this variable will be used to store the correlations # this variable will be used to store the correlations
......
...@@ -147,6 +147,12 @@ def head_pose_points(img, rotation_vector, translation_vector, camera_matrix): ...@@ -147,6 +147,12 @@ def head_pose_points(img, rotation_vector, translation_vector, camera_matrix):
# this method will perform gaze estimation for a lecture # this method will perform gaze estimation for a lecture
# this method accepts:
# video_path: the lecture video name
# returns:
# percentages: the gaze estimation percentages for the lecture video
def process_gaze_estimation(video_path): def process_gaze_estimation(video_path):
# get the base directory # get the base directory
...@@ -161,12 +167,15 @@ def process_gaze_estimation(video_path): ...@@ -161,12 +167,15 @@ def process_gaze_estimation(video_path):
# load the face detection model # load the face detection model
face_model = get_face_detector() face_model = get_face_detector()
# load the facial landamrk model # load the facial landamrk model
landmark_model = get_landmark_model() landmark_model = get_landmark_model()
# capture the video
cap = cv2.VideoCapture(VIDEO_PATH) cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read() ret, img = cap.read()
size = img.shape size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points. # 3D model points.
model_points = np.array([ model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip (0.0, 0.0, 0.0), # Nose tip
...@@ -210,12 +219,10 @@ def process_gaze_estimation(video_path): ...@@ -210,12 +219,10 @@ def process_gaze_estimation(video_path):
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
if ret == True: if ret == True:
faces = find_faces(img, face_model) faces = find_faces(img, face_model)
# print('no of faces found: ', len(faces))
student_count = 0
# iterate through each detected face # iterate through each detected face
for face in faces: for face in faces:
...@@ -226,8 +233,6 @@ def process_gaze_estimation(video_path): ...@@ -226,8 +233,6 @@ def process_gaze_estimation(video_path):
isLookingLeft = False isLookingLeft = False
isLookingFront = False isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates # retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face) marks, facebox = detect_marks(img, landmark_model, face)
...@@ -240,6 +245,7 @@ def process_gaze_estimation(video_path): ...@@ -240,6 +245,7 @@ def process_gaze_estimation(video_path):
marks[48], # Left Mouth corner marks[48], # Left Mouth corner
marks[54] # Right mouth corner marks[54] # Right mouth corner
], dtype="double") ], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_UPNP) dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
...@@ -268,57 +274,36 @@ def process_gaze_estimation(video_path): ...@@ -268,57 +274,36 @@ def process_gaze_estimation(video_path):
except: except:
ang2 = 90 ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1 # checking for angle 1
if ang1 >= THRESHOLD: if ang1 >= THRESHOLD:
# cv2.putText(img, 'looking down', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingDown = True isLookingDown = True
elif ang1 <= -THRESHOLD: elif ang1 <= -THRESHOLD:
# cv2.putText(img, 'looking up', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingUp = True isLookingUp = True
else: else:
# cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingFront = True isLookingFront = True
# checking for angle 2 # checking for angle 2
if ang2 >= THRESHOLD: if ang2 >= THRESHOLD:
# cv2.putText(img, 'looking right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingRight = True isLookingRight = True
elif ang2 <= -THRESHOLD: elif ang2 <= -THRESHOLD:
# cv2.putText(img, 'looking left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingLeft = True isLookingLeft = True
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
if isLookingDown & isLookingRight: if isLookingDown & isLookingRight:
# cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1 head_down_right_count += 1
elif isLookingDown & isLookingLeft: elif isLookingDown & isLookingLeft:
# cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1 head_down_left_count += 1
elif isLookingUp & isLookingRight: elif isLookingUp & isLookingRight:
# cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1 head_up_right_count += 1
elif isLookingUp & isLookingLeft: elif isLookingUp & isLookingLeft:
# cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1 head_up_left_count += 1
elif isLookingFront: elif isLookingFront:
# cv2.putText(img, 'Head front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1 head_front_count += 1
# indicate the student name
# cv2.putText(img, student_name, (facebox[2], facebox[3]), font, 2, (255, 255, 128), 3)
# increment the face count # increment the face count
face_count += 1 face_count += 1
# naming the new image
# image_name = "frame-{}.png".format(frame_count)
#
# # new image path
# image_path = os.path.join(VIDEO_DIR, image_name)
# save the new image
# cv2.imwrite(image_path, img)
# for testing purposes # for testing purposes
...@@ -331,10 +316,6 @@ def process_gaze_estimation(video_path): ...@@ -331,10 +316,6 @@ def process_gaze_estimation(video_path):
break break
# after extracting the frames, save the changes to static content
# p = os.popen("python manage.py collectstatic", "w")
# p.write("yes")
# calculate percentages # calculate percentages
head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100 head_up_right_perct = (Decimal(head_up_right_count) / Decimal(face_count)) * 100
head_up_left_perct = (Decimal(head_up_left_count) / Decimal(face_count)) * 100 head_up_left_perct = (Decimal(head_up_left_count) / Decimal(face_count)) * 100
...@@ -361,27 +342,17 @@ def process_gaze_estimation(video_path): ...@@ -361,27 +342,17 @@ def process_gaze_estimation(video_path):
# return the dictionary # return the dictionary
return percentages return percentages
# this method will retrieve extracted frames
def getExtractedFrames(lecture_video_name):
image_list = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(lecture_video_name))
# listing all the images in the directory
for image_path in os.listdir(EXTRACTED_DIR):
image_list.append(image_path)
# checking for the number of frames
if (len(image_list) > 0):
image_list = custom_sort(image_list)
return image_list
else: # this method will retrieve lecture gaze estimation for each frame
return "No extracted frames were found" # this method accepts the following parameter
# video_name: the lecture video name that needs to be processed
# returns:
# frame_detections: the list of detections containing each frame
# frame_rate: frame rate of the video
# this method will retrieve lecture gaze estimation for each frame
def get_lecture_gaze_estimation_for_frames(video_name): def get_lecture_gaze_estimation_for_frames(video_name):
# get the base directory # get the base directory
...@@ -391,18 +362,22 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -391,18 +362,22 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# play the video # play the video
video = cv2.VideoCapture(VIDEO_PATH) video = cv2.VideoCapture(VIDEO_PATH)
# get the frame rate
frame_rate = video.get(cv2.CAP_PROP_FPS) frame_rate = video.get(cv2.CAP_PROP_FPS)
# this list will contain the frame detections
frame_detections = [] frame_detections = []
# load the face model
face_model = get_face_detector() face_model = get_face_detector()
# load the face landmark model
landmark_model = get_landmark_model() landmark_model = get_landmark_model()
# capture the video
cap = cv2.VideoCapture(VIDEO_PATH) cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read() ret, img = cap.read()
size = img.shape size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points. # 3D model points.
model_points = np.array([ model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip (0.0, 0.0, 0.0), # Nose tip
...@@ -454,7 +429,6 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -454,7 +429,6 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# find the number of faces # find the number of faces
faces = find_faces(img, face_model) faces = find_faces(img, face_model)
student_count = 0
# iterate through each detected face # iterate through each detected face
for face in faces: for face in faces:
...@@ -466,8 +440,6 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -466,8 +440,6 @@ def get_lecture_gaze_estimation_for_frames(video_name):
isLookingLeft = False isLookingLeft = False
isLookingFront = False isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates # retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face) marks, facebox = detect_marks(img, landmark_model, face)
...@@ -507,24 +479,18 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -507,24 +479,18 @@ def get_lecture_gaze_estimation_for_frames(video_name):
except: except:
ang2 = 90 ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1 # checking for angle 1
if ang1 >= THRESHOLD: if ang1 >= THRESHOLD:
# cv2.putText(img, 'looking down', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingDown = True isLookingDown = True
elif ang1 <= -THRESHOLD: elif ang1 <= -THRESHOLD:
# cv2.putText(img, 'looking up', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingUp = True isLookingUp = True
else: else:
# cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingFront = True isLookingFront = True
# checking for angle 2 # checking for angle 2
if ang2 >= THRESHOLD: if ang2 >= THRESHOLD:
# cv2.putText(img, 'looking right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingRight = True isLookingRight = True
elif ang2 <= -THRESHOLD: elif ang2 <= -THRESHOLD:
# cv2.putText(img, 'looking left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
isLookingLeft = True isLookingLeft = True
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
...@@ -585,6 +551,14 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -585,6 +551,14 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# this method will get the student gaze estimation summary for period # this method will get the student gaze estimation summary for period
# this method accepts the following parameter
# gaze_estimation_data: the database records retrieved within the given time period
# returns:
# percentages: average percentages for each gaze estimation label
# individual_lec_gaze_estimations: contain the lecture gaze estimation details for each individual lecture
# gaze_estimation_labels: the gaze estimation labels
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values # declare variables to add percentage values
...@@ -597,8 +571,10 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): ...@@ -597,8 +571,10 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# get the number of activties to calculate average # get the number of activties to calculate average
no_of_gaze_estimations = len(gaze_estimation_data) no_of_gaze_estimations = len(gaze_estimation_data)
# this list will contain the lecture gaze estimation details for each individual lecture
individual_lec_gaze_estimations = [] individual_lec_gaze_estimations = []
# define the gaze estimation labels
gaze_estimation_labels = ["looking_up_and_right_perct", "looking_up_and_left_perct", "looking_down_and_right_perct", "looking_down_and_left_perct", "looking_front_perct"] gaze_estimation_labels = ["looking_up_and_right_perct", "looking_up_and_left_perct", "looking_down_and_right_perct", "looking_down_and_left_perct", "looking_front_perct"]
# iterate through the activities # iterate through the activities
...@@ -633,21 +609,31 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data): ...@@ -633,21 +609,31 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
percentages["looking_down_and_left_perct"] = looking_down_left_average_perct percentages["looking_down_and_left_perct"] = looking_down_left_average_perct
percentages["looking_front_perct"] = looking_front_average_perct percentages["looking_front_perct"] = looking_front_average_perct
# return the values
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
# this method will get the lecture gaze estimation frame groupings # this method will get the lecture gaze estimation frame groupings
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant gaze estimation labels for each frame group
# returns:
# frame_group_dict: the modified frame group dictionary
# labels: gaze estimation labels
def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict): def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name)) VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
print('video path: ', VIDEO_PATH)
# load the face detection model # load the face detection model
face_model = get_face_detector() face_model = get_face_detector()
# load the facial landamrk model # load the facial landamrk model
landmark_model = get_landmark_model() landmark_model = get_landmark_model()
# capture the video
cap = cv2.VideoCapture(VIDEO_PATH) cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read() ret, img = cap.read()
size = img.shape size = img.shape
...@@ -712,7 +698,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -712,7 +698,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
head_up_left_count = 0 head_up_left_count = 0
head_down_right_count = 0 head_down_right_count = 0
head_down_left_count = 0 head_down_left_count = 0
face_count = 0
detection_count = 0 detection_count = 0
...@@ -770,7 +755,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -770,7 +755,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
except: except:
ang2 = 90 ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1 # checking for angle 1
if ang1 >= THRESHOLD: if ang1 >= THRESHOLD:
isLookingDown = True isLookingDown = True
...@@ -843,7 +827,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -843,7 +827,6 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
frame_group_downleft_count = frame_group_details['downleft_count'] frame_group_downleft_count = frame_group_details['downleft_count']
frame_group_front_count = frame_group_details['front_count'] frame_group_front_count = frame_group_details['front_count']
print('detection count: ', frame_group_details['detection_count'])
group_detection_count = 1 if frame_group_details['detection_count'] == 0 else frame_group_details['detection_count'] group_detection_count = 1 if frame_group_details['detection_count'] == 0 else frame_group_details['detection_count']
...@@ -881,7 +864,15 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic ...@@ -881,7 +864,15 @@ def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dic
return frame_group_dict, labels return frame_group_dict, labels
# this section will handle some database operations ##### THIS SECTON WILL HANDLE SOME DATABASE OPERATIONS #####
# this method will save frame detections to the database
# this method will accept
# video_name: lecture video name to be processed
# returns
# frame_detections: the gaze estimation frame detections
def save_frame_detections(video_name): def save_frame_detections(video_name):
# for testing purposes # for testing purposes
...@@ -932,7 +923,13 @@ def save_frame_detections(video_name): ...@@ -932,7 +923,13 @@ def save_frame_detections(video_name):
return frame_detections return frame_detections
# this method will save gaze frame groupings to the database # this method will save gaze frame groupings to the database
# this method accepts:
# video_name: the lecture video name
# frame_landmarks: the specific frames in the extracted set of frames from the lecture video
# frame_group_dict: the dictionary which contains the frame groups and the relevant gaze estimation labels for each frame group
def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# for testing purposes # for testing purposes
...@@ -976,6 +973,13 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -976,6 +973,13 @@ def save_frame_groupings(video_name, frame_landmarks, frame_group_dict):
# this method will get gaze estimation correlations # this method will get gaze estimation correlations
# this method accepts:
# individual_lec_gaze: the gaze estimation details for each individual lecture
# lec_recorded_activity_data: the lecturer posture recognition details
# returns:
# correlations: the lecture gaze estimation and lecturer posture recognition correlations
def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data): def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
...@@ -1024,8 +1028,6 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data): ...@@ -1024,8 +1028,6 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# calculate the correlation # calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit) pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
for i in range(limit): for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately # this dictionary will get the pandas.Series object's indices and values separately
......
import os
import cv2
import numpy as np
import shutil
from .facial_landmarks import get2DPoints
from .classes import pose
# Read Image
def estimatePose(request):
directory = request['directory']
images = request['images']
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
IMAGE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\images")
SPEC_DIR = os.path.join(IMAGE_DIR, "{}".format(directory))
# new directory will be created to store pose estimations
new_dir_name = "static\\FirstApp\\poses\\{}".format(directory)
new_dir = os.path.join(BASE_DIR, new_dir_name)
face_count_response = 0
pose_response_list = []
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
for im in images:
IMAGE_PATH = os.path.join(SPEC_DIR, "{}".format(im))
image = cv2.imread(IMAGE_PATH)
size = image.shape
left_corner, right_corner, nose_tip, right_mouth, left_mouth, chin, face_center_top, face_center_bottom, face_count = get2DPoints(image)
# if faces are found
if left_corner is not None:
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# print("Camera Matrix :\n {0}".format(camera_matrix))
for i in range (face_count):
text = ''
# 2D image points. If you change the image, you need to change vector
image_points = np.array([
nose_tip[i],
chin[i],
left_corner[i],
right_corner[i],
left_mouth[i],
right_mouth[i]
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector,
camera_matrix, dist_coeffs)
# for p in image_points:
# cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
if (p2[0] < face_center_top[i][0]):
text = 'RIGHT'
else:
text = 'LEFT'
cv2.putText(image, text, p2, cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
cv2.line(image, p1, p2, (255, 0, 0), 2)
# saving the image
new_file = os.path.join(new_dir, im)
cv2.imwrite(new_file, image)
face_count_response += 1
# create a response object for the image
pose_response = {}
pose_response["directory"] = directory
pose_response["image"] = im
pose_response["label"] = text
pose_response_list.append(pose_response)
else:
print('No faces found')
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
# returning the static path
STATIC_POSE = os.path.join(BASE_DIR, "assets\\FirstApp\\pose")
STATIC_SPEC = os.path.join(STATIC_POSE, "{}".format(directory))
# if no images were created
if (face_count_response < 1):
shutil.rmtree(new_dir)
return "No faces were found"
return pose_response_list
\ No newline at end of file
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import cv2
import os
import math
import shutil
from . import custom_sorter as cs
def get_pose_estimations(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# face_classifier = cv2.CascadeClassifier(
# os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
# classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
# classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
# label = emotion_recognition(classifier, face_classifier, image)
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = cs.custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = cs.custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# calculate pose estimations for a student
def calculate_pose_estimation_for_student(video_name, student, poses):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
POSE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\poses")
POSE_VIDEO_DIR = os.path.join(POSE_DIR, video_name)
pose_count = 0
# checking whether the pose directory
if os.path.isdir(POSE_VIDEO_DIR) == False:
# create the pose directory
os.mkdir(POSE_VIDEO_DIR)
# loop through each frame of the directory
for frame in os.listdir(VIDEO_DIR):
FRAME_FOLDER = os.path.join(VIDEO_DIR, frame)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# detection image
detection_img = cv2.imread(DETECTION_PATH)
# checking for the given student
if detection == student:
# select the correct pose detection
pose = poses[pose_count]
# extract the coordinates
x1 = int(pose['keypoints'][5]['position']['x'])
y1 = int(pose['keypoints'][5]['position']['y'])
x2 = int(pose['keypoints'][6]['position']['x'])
y2 = int(pose['keypoints'][6]['position']['y'])
# extract the head positions
x_diff = x1 - x2
y_diff = y1 - y2
x_pow = math.pow(x_diff, 2)
y_pow = math.pow(y_diff, 2)
summation = x_pow + y_pow
distance = int(math.sqrt(summation))
# defining the hyperparameter
param = 0.6
fraction = int(math.floor(distance * param)) if int(math.floor(distance * param)) > 0 else 1
middle_x = x2 + fraction
# middle_y = y2 - 20
middle_y = y2
head_x = middle_x
head_y = 0 if (middle_y - fraction) < 0 else (middle_y - fraction)
left_upper_x = 0 if (middle_x - fraction) < 0 else (middle_x - fraction)
# extract the new image
new_img = detection_img[head_y:head_y+fraction, left_upper_x:left_upper_x+distance]
# new directory name
# new_img_dir = os.path.join(POSE_VIDEO_DIR, frame)
new_img_dir = os.path.join(POSE_VIDEO_DIR, detection)
# check if the directory exists
if os.path.isdir(new_img_dir) == False:
# create the new directory
os.mkdir(new_img_dir)
# create new image name
frame_name = frame + ".png"
new_img_path = os.path.join(new_img_dir, frame_name)
# saving the new image
cv2.imwrite(new_img_path, new_img)
# increment the count
pose_count += 1
print('saving the image')
...@@ -101,26 +101,14 @@ urlpatterns = [ ...@@ -101,26 +101,14 @@ urlpatterns = [
# lecture activity API (to retrieve a lecture activity) # lecture activity API (to retrieve a lecture activity)
url(r'^process-lecture-activity/$', api.LectureActivityProcess.as_view()), url(r'^process-lecture-activity/$', api.LectureActivityProcess.as_view()),
# lecture activity detection API (to retrieve detections for a given lecture activity frame)
url(r'^get-lecture-activity-frame-detection/$', api.GetLectureActivityDetections.as_view()),
# lecture activity detection for label API (to retrieve detections for a certain label)
url(r'^get-lecture-activity-detection-for-label/$', api.GetLectureActvityDetectionsForLabel.as_view()),
# lecture activity detection for label API (to retrieve detections for a certain label)
url(r'^get-lecture-activity-student-evaluation/$', api.GetLectureActivityStudentEvaluation.as_view()),
# lecture activity detection for frames API (to retrieve detections for each frame in lecture video) # lecture activity detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-activity-for-frame/$', api.GetLectureActivityRecognitionsForFrames.as_view()), url(r'^get-lecture-activity-for-frame/$', api.GetLectureActivityRecognitionsForFrames.as_view()),
# lecture activity evaluation for individual students
url(r'^get-lecture-activity-individual-student-evaluation/$',
api.GetLectureActivityIndividualStudentEvaluation.as_view()),
# lecture activity report generation # lecture activity report generation
url(r'^lecture-activity-report-generation/$', url(r'^lecture-activity-report-generation/$',
api.GenerateActivityReport.as_view()), api.GenerateActivityReport.as_view()),
###### EMOTION Section ##### ###### EMOTION Section #####
# getting lecture emotion record availability # getting lecture emotion record availability
url(r'^get-lecture-emotion-availability/$', api.GetLectureEmotionAvailability.as_view()), url(r'^get-lecture-emotion-availability/$', api.GetLectureEmotionAvailability.as_view()),
...@@ -131,30 +119,10 @@ urlpatterns = [ ...@@ -131,30 +119,10 @@ urlpatterns = [
# process a lecture emotion record # process a lecture emotion record
url(r'^process-lecture-emotion/$', api.LectureEmotionProcess.as_view()), url(r'^process-lecture-emotion/$', api.LectureEmotionProcess.as_view()),
# lecture emotion evaluation for students
url(r'^get-lecture-emotion-student-evaluation/$', api.GetLectureEmotionStudentEvaluations.as_view()),
# lecture emotion evaluation for students
url(r'^get-lecture-emotion-individual-student-evaluation/$',
api.GetLectureEmotionIndividualStudentEvaluation.as_view()),
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video) # lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()), url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section #####
# lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
# lecture video extracted frames API (for Pose estimation)
url(r'^get-lecture-video-extracted-frames/$', api.GetLectureVideoExtractedFrames.as_view()),
# lecture video individual student extracted frames API (for Pose estimation)
url(r'^get-lecture-video-individual-student-frames/$', api.GetLectureVideoIndividualStudentFrames.as_view()),
# lecture video individual student process pose estimation API (for Pose estimation)
url(r'^process-lecture-video-individual-pose-estimation', api.ProcessIndividualStudentPoseEstimation.as_view()),
##### GAZE Section ##### ##### GAZE Section #####
# lecture video Gaze estimation # lecture video Gaze estimation
......
...@@ -29,20 +29,8 @@ from django.contrib.auth import ( ...@@ -29,20 +29,8 @@ from django.contrib.auth import (
logout, logout,
) )
from django.contrib.auth.decorators import login_required from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import viewsets
from . models import Teachers, Video, VideoMeta, RegisterUser
from . MongoModels import *
from . serializers import * from . serializers import *
from . emotion_detector import detect_emotion
from . ImageOperations import saveImage
from . logic import head_pose_estimation
from . logic import video_extraction
from . forms import * from . forms import *
import cv2
import os import os
from datetime import datetime from datetime import datetime
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment