Commit 0e91389b authored by I.K Seneviratne's avatar I.K Seneviratne

Initial commit to the monitoring_student_behavior branch

parent 2a3db16a
......@@ -128,7 +128,7 @@ class LectureActivity(models.Model):
# Lecture emotion report
class LectureEmotionReport(models.Model):
lecture_emotion_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE, default=0)
happy_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
sad_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
angry_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
......@@ -143,6 +143,14 @@ class LectureEmotionReport(models.Model):
# POSE section
# lecture pose estimation
class LecturePoseEstimation(models.Model):
lecture_pose_id = models.CharField(max_length=10)
class LectureGazeEstimation(models.Model):
lecture_gaze_id = models.CharField(max_length=10)
lecture_video_id = models.ForeignKey(LectureVideo, on_delete=models.CASCADE)
looking_up_and_right_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_up_and_left_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_down_and_right_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_down_and_left_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
looking_front_perct = models.DecimalField(default=0.0, max_digits=3, decimal_places=1)
def __str__(self):
return self.lecture_gaze_id
\ No newline at end of file
This diff is collapsed.
......@@ -66,7 +66,6 @@ def detect_emotion(video):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
print('number of faces: ', len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
......@@ -104,13 +103,9 @@ def detect_emotion(video):
elif (label == 'Sad'):
count_sad += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Sad')
# cv2.imwrite(os.path.join(path, 'Sad-{0}.jpg'.format(count)), frame)
elif (label == 'Surprise'):
count_surprise += 1
# path = os.path.join(BASE_DIR, 'static\\images\\Surprise')
# cv2.imwrite(os.path.join(path, 'Surprise-{0}.jpg'.format(count)), frame)
label_position = (x, y)
# cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
......@@ -118,13 +113,9 @@ def detect_emotion(video):
else:
cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 3)
# cv2.imshow('Emotion Detector',frame)
count_frames += 1
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# setting up the counted values
meta_data.frame_count = count_frames
meta_data.happy_count = count_happy
meta_data.sad_count = count_sad
......@@ -328,11 +319,11 @@ def get_frame_emotion_recognition(video_name):
# calculating the percentages for the frame
happy_perct = float(happy_count / detection_count) * 100
sad_perct = float(sad_count / detection_count) * 100
angry_perct = float(angry_count / detection_count) * 100
neutral_perct = float(neutral_count / detection_count) * 100
surprise_perct = float(surprise_count / detection_count) * 100
happy_perct = float(happy_count / detection_count) * 100 if detection_count > 0 else 0
sad_perct = float(sad_count / detection_count) * 100 if detection_count > 0 else 0
angry_perct = float(angry_count / detection_count) * 100 if detection_count > 0 else 0
neutral_perct = float(neutral_count / detection_count) * 100 if detection_count > 0 else 0
surprise_perct = float(surprise_count / detection_count) * 100 if detection_count > 0 else 0
# this dictionary will be returned
frame_details['happy_perct'] = happy_perct
......@@ -349,3 +340,62 @@ def get_frame_emotion_recognition(video_name):
# return the detected frame percentages
return sorted_activity_frame_recognitions
# this method will retrieve student activity summary for given time period
def get_student_emotion_summary_for_period(emotions):
# declare variables to add percentage values
happy_perct_combined = 0.0
sad_perct_combined = 0.0
angry_perct_combined = 0.0
disgust_perct_combined = 0.0
surprise_perct_combined = 0.0
neutral_perct_combined = 0.0
# get the number of activties to calculate average
no_of_emotions = len(emotions)
individual_lec_emotions = []
emotion_labels = ["happy_perct", "sad_perct", "angry_perct", "disgust_perct", "surprise_perct", "neutral_perct"]
# iterate through the activities
for emotion in emotions:
individual_emotion = {}
individual_emotion["happy_perct"] = float(emotion['happy_perct'])
individual_emotion["sad_perct"] = float(emotion['sad_perct'])
individual_emotion["angry_perct"] = float(emotion['angry_perct'])
individual_emotion["disgust_perct"] = float(emotion['disgust_perct'])
individual_emotion["surprise_perct"] = float(emotion['surprise_perct'])
individual_emotion["neutral_perct"] = float(emotion['neutral_perct'])
happy_perct_combined += float(emotion['happy_perct'])
sad_perct_combined += float(emotion['sad_perct'])
angry_perct_combined += float(emotion['angry_perct'])
disgust_perct_combined += float(emotion['disgust_perct'])
surprise_perct_combined += float(emotion['surprise_perct'])
neutral_perct_combined += float(emotion['neutral_perct'])
# append to the list
individual_lec_emotions.append(individual_emotion)
# calculate the average percentages
happy_average_perct = round((happy_perct_combined / no_of_emotions), 1)
sad_average_perct = round((sad_perct_combined / no_of_emotions), 1)
angry_average_perct = round((angry_perct_combined / no_of_emotions), 1)
disgust_average_perct = round((disgust_perct_combined / no_of_emotions), 1)
surprise_average_perct = round((surprise_perct_combined / no_of_emotions), 1)
neutral_average_perct = round((neutral_perct_combined / no_of_emotions), 1)
percentages = {}
percentages["happy_perct"] = happy_average_perct
percentages["sad_perct"] = sad_average_perct
percentages["angry_perct"] = angry_average_perct
percentages["disgust_perct"] = disgust_average_perct
percentages["surprise_perct"] = surprise_average_perct
percentages["neutral_perct"] = neutral_average_perct
return percentages, individual_lec_emotions, emotion_labels
\ No newline at end of file
......@@ -51,7 +51,7 @@ def activity_recognition(video_path):
note_taking_count = 0
listening_count = 0
# video activity didrectory
# video activity directory
VIDEO_ACTIVITY_DIR = os.path.join(ACTIVITY_DIR, video_path)
# creating the directory for the video
......@@ -75,7 +75,6 @@ def activity_recognition(video_path):
os.mkdir(FRAME_DIR)
image = cv2.resize(image, size)
# image = ImageOps.fit(image, size, Image.ANTIALIAS)
detections = person_detection(image, net)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
......@@ -130,10 +129,10 @@ def activity_recognition(video_path):
p.write("yes")
# calculating the percentages for each label
phone_perct = float(phone_checking_count / total_detections) * 100
talking_perct = float(talking_count / total_detections) * 100
note_perct = float(note_taking_count / total_detections) * 100
listening_perct = float(listening_count / total_detections) * 100
phone_perct = float(phone_checking_count / total_detections) * 100 if total_detections > 0 else 0
talking_perct = float(talking_count / total_detections) * 100 if total_detections > 0 else 0
note_perct = float(note_taking_count / total_detections) * 100 if total_detections > 0 else 0
listening_perct = float(listening_count / total_detections) * 100 if total_detections > 0 else 0
# assigning the percentages to the dictionary
percentages["phone_perct"] = phone_perct
......@@ -572,3 +571,47 @@ def get_individual_student_evaluation(video_name, student_name):
percentages['listening_perct'] = listening_perct
return percentages
# this method will retrieve student activity summary for given time period
def get_student_activity_summary_for_period(activities):
# declare variables to add percentage values
phone_checking_perct_combined = 0.0
listening_perct_combined = 0.0
note_taking_perct_combined = 0.0
# get the number of activties to calculate average
no_of_activities = len(activities)
individual_lec_activities = []
activity_labels = ["phone_perct", "listening_perct", "writing_perct"]
# iterate through the activities
for activity in activities:
individual_activity = {}
individual_activity["phone_perct"] = float(activity['phone_perct'])
individual_activity["listening_perct"] = float(activity['listening_perct'])
individual_activity["writing_perct"] = float(activity['writing_perct'])
phone_checking_perct_combined += float(activity['phone_perct'])
listening_perct_combined += float(activity['listening_perct'])
note_taking_perct_combined += float(activity['writing_perct'])
# append to the list
individual_lec_activities.append(individual_activity)
# calculate the average percentages
phone_checking_average_perct = round((phone_checking_perct_combined / no_of_activities), 1)
listening_average_perct = round((listening_perct_combined / no_of_activities), 1)
note_taking_average_perct = round((note_taking_perct_combined / no_of_activities), 1)
percentages = {}
percentages["phone_perct"] = phone_checking_average_perct
percentages["listening_perct"] = listening_average_perct
percentages["writing_perct"] = note_taking_average_perct
return percentages, individual_lec_activities, activity_labels
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 17:52:00 2020
@author: hp
"""
import cv2
import numpy as np
import os
def get_face_detector(modelFile = "models/res10_300x300_ssd_iter_140000.caffemodel",
configFile = "models/deploy.prototxt"):
"""
Get the face detection caffe model of OpenCV's DNN module
Parameters
----------
modelFile : string, optional
Path to model file. The default is "models/res10_300x300_ssd_iter_140000.caffemodel".
configFile : string, optional
Path to config file. The default is "models/deploy.prototxt".
Returns
-------
model : dnn_Net
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers")
modelFile = os.path.join(CLASSIFIER_DIR, "res10_300x300_ssd_iter_140000.caffemodel")
configFile = os.path.join(CLASSIFIER_DIR, "deploy.prototxt")
model = cv2.dnn.readNetFromCaffe(configFile, modelFile)
return model
def find_faces(img, model):
"""
Find the faces in an image
Parameters
----------
img : np.uint8
Image to find faces from
model : dnn_Net
Face detection model
Returns
-------
faces : list
List of coordinates of the faces detected in the image
"""
h, w = img.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
res = model.forward()
faces = []
for i in range(res.shape[2]):
confidence = res[0, 0, i, 2]
if confidence > 0.5:
box = res[0, 0, i, 3:7] * np.array([w, h, w, h])
(x, y, x1, y1) = box.astype("int")
faces.append([x, y, x1, y1])
return faces
def draw_faces(img, faces):
"""
Draw faces on image
Parameters
----------
img : np.uint8
Image to draw faces on
faces : List of face coordinates
Coordinates of faces to draw
Returns
-------
None.
"""
for x, y, x1, y1 in faces:
cv2.rectangle(img, (x, y), (x1, y1), (0, 0, 255), 3)
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 19:47:08 2020
@author: hp
"""
import cv2
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os
def get_landmark_model():
"""
Get the facial landmark model.
Original repository: https://github.com/yinguobing/cnn-facial-landmark
Parameters
----------
saved_model : string, optional
Path to facial landmarks model. The default is 'models/pose_model'.
Returns
-------
model : Tensorflow model
Facial landmarks model
"""
# define the location of the pose model
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
saved_model = os.path.join(BASE_DIR, "FirstApp\\classifiers\\pose_model")
model = keras.models.load_model(saved_model)
return model
def get_square_box(box):
"""Get a square box out of the given box, by expanding it."""
left_x = box[0]
top_y = box[1]
right_x = box[2]
bottom_y = box[3]
box_width = right_x - left_x
box_height = bottom_y - top_y
# Check if box is already a square. If not, make it a square.
diff = box_height - box_width
delta = int(abs(diff) / 2)
if diff == 0: # Already a square.
return box
elif diff > 0: # Height > width, a slim box.
left_x -= delta
right_x += delta
if diff % 2 == 1:
right_x += 1
else: # Width > height, a short box.
top_y -= delta
bottom_y += delta
if diff % 2 == 1:
bottom_y += 1
# Make sure box is always square.
assert ((right_x - left_x) == (bottom_y - top_y)), 'Box is not square.'
return [left_x, top_y, right_x, bottom_y]
def move_box(box, offset):
"""Move the box to direction specified by vector offset"""
left_x = box[0] + offset[0]
top_y = box[1] + offset[1]
right_x = box[2] + offset[0]
bottom_y = box[3] + offset[1]
return [left_x, top_y, right_x, bottom_y]
def detect_marks(img, model, face):
"""
Find the facial landmarks in an image from the faces
Parameters
----------
img : np.uint8
The image in which landmarks are to be found
model : Tensorflow model
Loaded facial landmark model
face : list
Face coordinates (x, y, x1, y1) in which the landmarks are to be found
Returns
-------
marks : numpy array
facial landmark points
"""
offset_y = int(abs((face[3] - face[1]) * 0.1))
box_moved = move_box(face, [0, offset_y])
facebox = get_square_box(box_moved)
# reassigning the facebox values
facebox[0] = facebox[0] if facebox[0] > 0 else 0
facebox[1] = facebox[1] if facebox[1] > 0 else 0
facebox[2] = facebox[2] if facebox[2] > 0 else 0
facebox[3] = facebox[3] if facebox[3] > 0 else 0
# draw a bounding box
cv2.rectangle(img, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0), 2)
face_img = img[facebox[1]: facebox[3],
facebox[0]: facebox[2]]
marks = np.zeros((68, 2))
# if the list length is more than 0
if len(face_img) > 0:
face_img = cv2.resize(face_img, (128, 128))
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
# # Actual detection.
predictions = model.signatures["predict"](
tf.constant([face_img], dtype=tf.uint8))
# Convert predictions to landmarks.
marks = np.array(predictions['output']).flatten()[:136]
marks = np.reshape(marks, (-1, 2))
marks *= (facebox[2] - facebox[0])
marks[:, 0] += facebox[0]
marks[:, 1] += facebox[1]
marks = marks.astype(np.uint)
# return marks
# return detected facial marks and face coordinates
return marks, facebox
def draw_marks(image, marks, color=(0, 255, 0)):
"""
Draw the facial landmarks on an image
Parameters
----------
image : np.uint8
Image on which landmarks are to be drawn.
marks : list or numpy array
Facial landmark points
color : tuple, optional
Color to which landmarks are to be drawn with. The default is (0, 255, 0).
Returns
-------
None.
"""
for mark in marks:
cv2.circle(image, (mark[0], mark[1]), 2, color, -1, cv2.LINE_AA)
\ No newline at end of file
This diff is collapsed.
import jinja2 as ji
import pdfkit
import os
# from DemoProject import jinja2
from integrated_slpes import jinja2
def generate_pdf_file(object):
# templateLoader = jinja2.FileSystemLoader(searchpath="../")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEMPLATE_DIR = os.path.join(BASE_DIR, "FirstApp\\templates\\FirstApp")
HTML_PATH = os.path.join(TEMPLATE_DIR, "pdf_template_1.html")
PDF_DIRECTORY = os.path.join(BASE_DIR, "FirstApp\\files")
templateLoader = ji.FileSystemLoader(TEMPLATE_DIR)
new_env = jinja2.environment()
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "pdf_template.html"
template = templateEnv.get_template(TEMPLATE_FILE)
print('variables: ', templateEnv.globals['dict'])
# render the template
outputText = template.render(lecturer_name=object['lecturer_name'], subject=object['subject_name'], date=object['date'], static=new_env.globals['static'])
html_file = open(HTML_PATH, "w")
html_file.write(outputText)
html_file.close()
# create a new pdf file path
NEW_PDF_PATH = os.path.join(PDF_DIRECTORY, "activity.pdf")
asset_path = os.path.join('D:/SLIIT/Year 4/CDAP/project/2020-101/assets/FirstApp/css/sb-admin-2.min.css')
network_path = "file:/" + asset_path
# options = {'enable-local-file-access': network_path}
options = {'enable-local-file-access': asset_path, 'load-error-handling': 'ignore'}
# create a new pdf file
pdfkit.from_file(HTML_PATH, NEW_PDF_PATH, options=options)
......@@ -117,10 +117,6 @@ def calculate_pose_estimation_for_student(video_name, student, poses):
left_upper_x = 0 if (middle_x - fraction) < 0 else (middle_x - fraction)
print('head_y: ', head_y)
print('fraction: ', fraction)
print('distance: ', distance)
print('left_upper_x: ', left_upper_x)
# extract the new image
new_img = detection_img[head_y:head_y+fraction, left_upper_x:left_upper_x+distance]
......
# Generated by Django 2.2.11 on 2020-08-25 11:28
import FirstApp.MongoModels
from django.db import migrations, models
import django.db.models.deletion
import djongo.models.fields
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0006_lecturercredentials'),
]
operations = [
migrations.RenameField(
model_name='lectureemotionreport',
old_name='lecture_id',
new_name='lecture_emotion_id',
),
migrations.AlterField(
model_name='lectureemotionreport',
name='angry_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='disgust_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='happy_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='neutral_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='sad_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='lectureemotionreport',
name='surprise_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.CreateModel(
name='LectureVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_video_id', models.CharField(max_length=10)),
('date', models.DateField()),
('video_name', models.CharField(max_length=50)),
('video_length', models.DurationField()),
('lecturer', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Lecturer')),
('subject', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Subject')),
],
),
migrations.CreateModel(
name='LectureGazeEstimation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_gaze_id', models.CharField(max_length=10)),
('lecture_video_id',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo')),
],
),
migrations.CreateModel(
name='LectureActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lecture_activity_id', models.CharField(max_length=10)),
('talking_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('listening_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('writing_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('phone_perct', models.DecimalField(decimal_places=1, default=0.0, max_digits=3)),
('lecture_video_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo')),
],
),
migrations.CreateModel(
name='FacultyTimetable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timetable_id', models.CharField(max_length=10)),
('timetable', djongo.models.fields.ArrayField(model_container=FirstApp.MongoModels.DateTimeTable)),
('faculty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='FirstApp.Faculty')),
],
),
migrations.AddField(
model_name='lectureemotionreport',
name='lecture_video_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='FirstApp.LectureVideo'),
),
]
# Generated by Django 2.2.11 on 2020-08-25 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0007_auto_20200825_1658'),
]
operations = [
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_down_and_left_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_down_and_right_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_front_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_up_and_left_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
migrations.AddField(
model_name='lecturegazeestimation',
name='looking_up_and_right_perct',
field=models.DecimalField(decimal_places=1, default=0.0, max_digits=3),
),
]
......@@ -226,3 +226,12 @@ class VideoMetaSerializer(serializers.ModelSerializer):
class Meta:
model = VideoMeta
fields = '__all__'
# lecture gaze serializer
class LectureGazeEstimationSerializer(serializers.ModelSerializer):
lecture_video_id = LectureVideoSerializer()
class Meta:
model = LectureGazeEstimation
fields = '__all__'
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
......@@ -3,6 +3,8 @@
<head>
{% load static %}
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
......@@ -12,11 +14,11 @@
<title>SB Admin 2 - Charts</title>
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href="{% static 'FirstApp/vendor/fontawesome-free/css/all.min.css' %}" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
<link href="{% static 'FirstApp/css/sb-admin-2.min.css' %}" rel="stylesheet">
</head>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -41,6 +41,9 @@ urlpatterns = [
# tables view
path('tables', views.tables),
# test view (delete later)
path('test', views.test),
url(r'^register', views.RegisterViewSet),
# re_path('video/?video_name<str:video_name>', views.video),
url(r'^teachers/', views.teachersList.as_view()),
......@@ -75,7 +78,6 @@ urlpatterns = [
# timetable API
url(r'^timetable', api.FacultyTimetableViewSet.as_view()),
##### VIDEO Section #####
# lecture video API
......@@ -84,7 +86,6 @@ urlpatterns = [
# lecture video API (to retrieve a lecture)
url(r'^get-lecture-video/$', api.GetLectureVideoViewSet.as_view()),
##### ACTIVITIES API #####
# lecture activity API (to retrieve lecture activities)
......@@ -112,6 +113,9 @@ urlpatterns = [
url(r'^get-lecture-activity-individual-student-evaluation/$',
api.GetLectureActivityIndividualStudentEvaluation.as_view()),
# lecture activity report generation
url(r'^lecture-activity-report-generation/$',
api.GenerateActivityReport.as_view()),
###### EMOTION Section #####
# getting lecture emotion record availability
......@@ -133,7 +137,6 @@ urlpatterns = [
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section #####
# lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
......@@ -147,6 +150,27 @@ urlpatterns = [
# lecture video individual student process pose estimation API (for Pose estimation)
url(r'^process-lecture-video-individual-pose-estimation', api.ProcessIndividualStudentPoseEstimation.as_view()),
##### GAZE Section #####
# lecture video Gaze estimation
url(r'^get-lecture-video-gaze-estimation-availability/$', api.GetLectureGazeEstimationAvailaibility.as_view()),
# process a lecture Gaze estimation
url(r'^process-lecture-gaze-estimation/$', api.ProcessLectureGazeEstimation.as_view()),
# retrieve a Lecture Gaze estimation
url(r'^get-lecture-gaze-estimation/$', api.GetLectureGazeEstimationViewSet.as_view()),
# lecture gaze estimation for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-gaze-estimation-for-frame/$', api.GetLectureGazeEstimationForFrames.as_view()),
##### VIEW STUDENT BEHAVIOR SUMMARY SECTION #####
# retrieves student behavior summary for specified time period
url(r'^get-student-behavior-summary-for-period/$', api.GetStudentBehaviorSummaryForPeriod.as_view()),
# routers
# path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
......
This diff is collapsed.
from django.templatetags.static import static
from django.urls import reverse
from jinja2 import Environment
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': static,
'url': reverse,
})
return env
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment