Commit 1653846c authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the full implementation of the lecture emotion graphical...

Committing the full implementation of the lecture emotion graphical representation and partial implementation of lecture Gaze estimation in the lecturer Home page.
parent 1cabfb25
......@@ -253,11 +253,11 @@ class LectureGazeEstimation(models.Model):
# this abstract class will define the lecture gaze frame group percentages
class LectureGazeFrameGroupPercentages(models.Model):
looking_up_and_right_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_up_and_left_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_down_and_right_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_down_and_left_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_front_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
upright_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
upleft_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
downright_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
downleft_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
front_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
class Meta:
abstract = True
......
This diff is collapsed.
......@@ -398,4 +398,153 @@ def get_student_emotion_summary_for_period(emotions):
percentages["surprise_perct"] = surprise_average_perct
percentages["neutral_perct"] = neutral_average_perct
return percentages, individual_lec_emotions, emotion_labels
\ No newline at end of file
return percentages, individual_lec_emotions, emotion_labels
# this method will retrieve activity frame groupings for a lecture
def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
# load the models
face_classifier = cv2.CascadeClassifier(
os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path)
# initializing the count variables
frame_count = 0
# class labels
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
# get the frame differences for each frame group
frame_group_diff = {}
# loop through the frame group dictionary
for key in frame_group_dict.keys():
split_values = key.split("-")
value1 = int(split_values[0])
value2 = int(split_values[1])
diff = value2 - value1
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
for frame in os.listdir(EXTRACTED_DIR):
# getting the frame folder
FRAME_FOLDER = os.path.join(EXTRACTED_DIR, frame)
# initializing the variables
happy_count = 0
sad_count = 0
angry_count = 0
surprise_count = 0
neutral_count = 0
detection_count = 0
# looping through the detections in each frame
for detections in os.listdir(FRAME_FOLDER):
# checking whether the image contains only one person
if "frame" not in detections:
# get the label for this image
IMAGE_PATH = os.path.join(FRAME_FOLDER, detections)
image = cv2.imread(IMAGE_PATH)
# run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, image)
# increment the count based on the label
if label == class_labels[0]:
angry_count += 1
if label == class_labels[1]:
happy_count += 1
if label == class_labels[2]:
neutral_count += 1
if label == class_labels[3]:
sad_count += 1
if label == class_labels[4]:
surprise_count += 1
# increment the detection count
detection_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
# checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['happy_count'] += happy_count
frame_group_dict[frame_name]['sad_count'] += sad_count
frame_group_dict[frame_name]['angry_count'] += angry_count
frame_group_dict[frame_name]['surprise_count'] += surprise_count
frame_group_dict[frame_name]['neutral_count'] += neutral_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_happy_count = frame_group_details['happy_count']
frame_group_sad_count = frame_group_details['sad_count']
frame_group_angry_count = frame_group_details['angry_count']
frame_group_surprise_count = frame_group_details['surprise_count']
frame_group_neutral_count = frame_group_details['neutral_count']
group_detection_count = frame_group_details['detection_count']
# print('frame group phone count: ', frame_group_phone_count)
# print('frame group listen count: ', frame_group_listen_count)
# print('frame group note count: ', frame_group_note_count)
# print('frame group detection count: ', group_detection_count)
frame_diff = int(frame_group_diff[key])
# print('frame difference: ', frame_diff)
frame_group_happy_perct = float(frame_group_happy_count / group_detection_count) * 100
frame_group_sad_perct = float(frame_group_sad_count / group_detection_count) * 100
frame_group_angry_perct = float(frame_group_angry_count / group_detection_count) * 100
frame_group_surprise_perct = float(frame_group_surprise_count / group_detection_count) * 100
frame_group_neutral_perct = float(frame_group_neutral_count / group_detection_count) * 100
# assign the values to the same dictionary
frame_group_dict[key]['happy_perct'] = round(frame_group_happy_perct, 1)
frame_group_dict[key]['sad_perct'] = round(frame_group_sad_perct, 1)
frame_group_dict[key]['angry_perct'] = round(frame_group_angry_perct, 1)
frame_group_dict[key]['surprise_perct'] = round(frame_group_surprise_perct, 1)
frame_group_dict[key]['neutral_perct'] = round(frame_group_neutral_perct, 1)
# removing irrelevant items from the dictionary
frame_group_dict[key].pop('happy_count')
frame_group_dict[key].pop('sad_count')
frame_group_dict[key].pop('angry_count')
frame_group_dict[key].pop('surprise_count')
frame_group_dict[key].pop('neutral_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
emotion_labels = ['happy_perct', 'sad_perct', 'angry_perct', 'surprise_perct', 'neutral_perct']
# return the dictionary
return frame_group_dict, emotion_labels
\ No newline at end of file
......@@ -557,6 +557,7 @@ def get_lecture_gaze_esrimation_for_frames(video_name):
return frame_detections, frame_rate
# this method will get the student gaze estimation summary for period
def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
......@@ -608,4 +609,231 @@ def get_student_gaze_estimation_summary_for_period(gaze_estimation_data):
percentages["looking_down_and_left_perct"] = looking_down_left_perct_combined
percentages["looking_front_perct"] = looking_front_average_perct
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
\ No newline at end of file
return percentages, individual_lec_gaze_estimations, gaze_estimation_labels
# this method will get the lecture gaze estimation frame groupings
def gaze_estimation_frame_groupings(video_name, frame_landmarks, frame_group_dict):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\gaze\\{}".format(video_name))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
# load the face detection model
face_model = get_face_detector()
# load the facial landamrk model
landmark_model = get_landmark_model()
cap = cv2.VideoCapture(VIDEO_PATH)
ret, img = cap.read()
size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# define a variable to count the frames
frame_count = 0
# set a threshold angle
# THRESHOLD = 15
THRESHOLD = 22
# THRESHOLD = 30
# THRESHOLD = 45
# THRESHOLD = 48
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# initializing the count variables
frame_count = 0
# class labels
class_labels = ['Phone checking', 'Listening', 'Note taking']
# get the frame differences for each frame group
frame_group_diff = {}
# loop through the frame group dictionary
for key in frame_group_dict.keys():
split_values = key.split("-")
value1 = int(split_values[0])
value2 = int(split_values[1])
diff = value2 - value1
# assign the difference
frame_group_diff[key] = diff if diff > 0 else 1
# looping through the frames
while True:
ret, image = cap.read()
if ret == True:
# initializing the variables
# setting up the count variables
head_front_count = 0
head_up_right_count = 0
head_up_left_count = 0
head_down_right_count = 0
head_down_left_count = 0
face_count = 0
detection_count = 0
# prediction happens here
# find the number of faces
faces = find_faces(img, face_model)
student_count = 0
# iterate through each detected face
for face in faces:
# declaring boolean variables
isLookingUp = False
isLookingDown = False
isLookingRight = False
isLookingLeft = False
isLookingFront = False
# deriving the student name to display in the image
student_name = 'student-{}'.format(student_count)
# retrieving the facial landmarks and face bounding box coordinates
marks, facebox = detect_marks(img, landmark_model, face)
# mark_detector.draw_marks(img, marks, color=(0, 255, 0))
image_points = np.array([
marks[30], # Nose tip
marks[8], # Chin
marks[36], # Left eye left corner
marks[45], # Right eye right corne
marks[48], # Left Mouth corner
marks[54] # Right mouth corner
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix,
dist_coeffs, flags=cv2.SOLVEPNP_UPNP)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector,
translation_vector, camera_matrix, dist_coeffs)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
x1, x2 = head_pose_points(img, rotation_vector, translation_vector, camera_matrix)
# measuring the angles
try:
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
ang1 = int(math.degrees(math.atan(m)))
except:
ang1 = 90
try:
m = (x2[1] - x1[1]) / (x2[0] - x1[0])
ang2 = int(math.degrees(math.atan(-1 / m)))
except:
ang2 = 90
# print('angle 1: {}, angle 2: {}'.format(ang1, ang2))
# checking for angle 1
if ang1 >= THRESHOLD:
isLookingDown = True
elif ang1 <= -THRESHOLD:
isLookingUp = True
else:
isLookingFront = True
# checking for angle 2
if ang2 >= THRESHOLD:
isLookingRight = True
elif ang2 <= -THRESHOLD:
isLookingLeft = True
# checking for vertical and horizontal directions
if isLookingDown & isLookingRight:
head_down_right_count += 1
elif isLookingDown & isLookingLeft:
head_down_left_count += 1
elif isLookingUp & isLookingRight:
head_up_right_count += 1
elif isLookingUp & isLookingLeft:
head_up_left_count += 1
elif isLookingFront:
head_front_count += 1
# increment the face count
face_count += 1
# increment the detection count
detection_count += 1
# finding the time landmark that the current frame is in
for i in frame_landmarks:
index = frame_landmarks.index(i)
j = index + 1
# checking whether the next index is within the range
if j < len(frame_landmarks):
next_value = frame_landmarks[j]
# checking the correct time landmark range
if (frame_count >= i) & (frame_count < next_value):
frame_name = "{}-{}".format(i, next_value)
frame_group_dict[frame_name]['upright_count'] += head_up_right_count
frame_group_dict[frame_name]['upleft_count'] += head_up_left_count
frame_group_dict[frame_name]['downright_count'] += head_down_right_count
frame_group_dict[frame_name]['downleft_count'] += head_down_left_count
frame_group_dict[frame_name]['front_count'] += head_front_count
frame_group_dict[frame_name]['detection_count'] += detection_count
# increment the frame count
frame_count += 1
# calculate the percentage values
for key in frame_group_dict.keys():
frame_group_details = frame_group_dict[key]
frame_group_phone_count = frame_group_details['phone_count']
frame_group_listen_count = frame_group_details['listen_count']
frame_group_note_count = frame_group_details['note_count']
group_detection_count = frame_group_details['detection_count']
frame_group_phone_perct = float(frame_group_phone_count / group_detection_count) * 100
frame_group_listen_perct = float(frame_group_listen_count / group_detection_count) * 100
frame_group_note_perct = float(frame_group_note_count / group_detection_count) * 100
# assign the values to the same dictionary
frame_group_dict[key]['phone_perct'] = round(frame_group_phone_perct, 1)
frame_group_dict[key]['listen_perct'] = round(frame_group_listen_perct, 1)
frame_group_dict[key]['note_perct'] = round(frame_group_note_perct, 1)
# removing irrelevant items from the dictionary
frame_group_dict[key].pop('phone_count')
frame_group_dict[key].pop('listen_count')
frame_group_dict[key].pop('note_count')
frame_group_dict[key].pop('detection_count')
# print('frame group dict: ', frame_group_dict)
# return the dictionary
return frame_group_dict
......@@ -120,7 +120,7 @@ def getTimeLandmarks(video_name):
# this method will retrieve the time landmarks for a lecture video
def getFrameLandmarks(video_name):
def getFrameLandmarks(video_name, category):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_PATH = os.path.join(BASE_DIR, "assets\\FirstApp\\videos\\{}".format(video_name))
......@@ -130,8 +130,8 @@ def getFrameLandmarks(video_name):
int_no_of_frames = int(no_of_frames)
fps = int(video.get(cv2.CAP_PROP_FPS))
# calculating the duration in seconds
duration = int(no_of_frames / fps)
# list of categories
categories = ["Activity", "Emotion", "Gaze"]
# define the number of time gaps required
THRESHOLD_GAP = 5
......@@ -175,9 +175,21 @@ def getFrameLandmarks(video_name):
# define a dictionary to hold the frame groups
frame_group_dict = {}
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'phone_count': 0, 'listen_count': 0, 'note_count': 0, 'detection_count': 0}
# checking for the category
if category == categories[0]:
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'phone_count': 0, 'listen_count': 0, 'note_count': 0, 'detection_count': 0}
elif category == categories[1]:
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'happy_count': 0, 'sad_count': 0, 'angry_count': 0, 'surprise_count': 0, 'neutral_count': 0, 'detection_count': 0}
elif category == categories[2]:
# loop through the group names to create a dictionary
for name in frame_group_list:
frame_group_dict[name] = {'upright_count': 0, 'upleft_count': 0, 'downright_count': 0, 'downleft_count': 0,
'front_count': 0, 'detection_count': 0}
return frame_landmarks, frame_group_dict
\ No newline at end of file
......@@ -407,7 +407,7 @@
animationEnabled: true,
theme: "light2",
title: {
text: "Student Behavior"
text: "Student Activity Behavior"
},
axisX: {
title: "Duration",
......@@ -445,25 +445,25 @@
function renderEmotionChart(emotion_labels) {
//get the activity label length
{#let activity_label_length = activity_labels.length;#}
let emotion_label_length = emotion_labels.length;
let data = [];
/*
//loop through the activity labels
for (let k = 0; k < activity_label_length; k++) {
let label = activity_labels[k];
for (let k = 0; k < emotion_label_length; k++) {
let label = emotion_labels[k];
let datapoints = [];
let count = 0;
//loop through the activity frame groups
for (let key in lecture_activity_frame_group_percentages) {
let frame_group_details = lecture_activity_frame_group_percentages[key];
for (let key in lecture_emotion_frame_group_percentages) {
let frame_group_details = lecture_emotion_frame_group_percentages[key];
let activity_perct = frame_group_details[label];
let emotion_perct = frame_group_details[label];
let point = {label: lecture_video_time_landmarks[count], y: activity_perct};
let point = {label: lecture_video_time_landmarks[count], y: emotion_perct};
datapoints.push(point);
......@@ -484,14 +484,14 @@
data.push(obj);
}
*/
var chart = new CanvasJS.Chart("EmotionChartContainer", {
animationEnabled: true,
theme: "light2",
title: {
text: "Student Behavior"
text: "Student Emotion Behavior"
},
axisX: {
title: "Duration",
......@@ -529,25 +529,25 @@
function renderGazeChart(gaze_labels) {
//get the activity label length
{#let activity_label_length = activity_labels.length;#}
let gaze_label_length = gaze_labels.length;
let data = [];
/*
//loop through the activity labels
for (let k = 0; k < activity_label_length; k++) {
let label = activity_labels[k];
for (let k = 0; k < gaze_label_length; k++) {
let label = gaze_labels[k];
let datapoints = [];
let count = 0;
//loop through the activity frame groups
for (let key in lecture_activity_frame_group_percentages) {
let frame_group_details = lecture_activity_frame_group_percentages[key];
for (let key in lecture_gaze_frame_group_percentages) {
let frame_group_details = lecture_gaze_frame_group_percentages[key];
let activity_perct = frame_group_details[label];
let gaze_perct = frame_group_details[label];
let point = {label: lecture_video_time_landmarks[count], y: activity_perct};
let point = {label: lecture_video_time_landmarks[count], y: gaze_perct};
datapoints.push(point);
......@@ -568,14 +568,14 @@
data.push(obj);
}
*/
var chart = new CanvasJS.Chart("GazeChartContainer", {
animationEnabled: true,
theme: "light2",
title: {
text: "Student Behavior"
text: "Student Gaze estimation Behavior"
},
axisX: {
title: "Duration",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment