Commit a577e3e7 authored by SohanDanushka's avatar SohanDanushka

Merge branch 'QA_RELEASE' into db_and_monitoring_IT17097284

parents 71039e2f ea733587
...@@ -10,6 +10,7 @@ from rest_framework.views import APIView ...@@ -10,6 +10,7 @@ from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser from rest_framework.parsers import MultiPartParser, FormParser
from . import record from . import record
from . import test as t
from rest_framework.views import * from rest_framework.views import *
...@@ -171,3 +172,29 @@ class InitiateLecture(APIView): ...@@ -171,3 +172,29 @@ class InitiateLecture(APIView):
return Response({ return Response({
"response": "success" "response": "success"
}) })
class stopRecording(APIView):
def get(self, request):
t.isStop = 1
return Response({
"response": "stopped"
})
def post(self, request):
pass
# test method (delete later)
class TestAPI(APIView):
def get(self, request):
t.isStop = 0
param = request.query_params.get('param')
# t.test()
t.IPWebcamTest()
return Response({
"response": "started"
})
def post(self, request):
pass
\ No newline at end of file
...@@ -23,15 +23,12 @@ maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector ...@@ -23,15 +23,12 @@ maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector
class IPWebCam(object): class IPWebCam(object):
def __init__(self): def __init__(self):
self.url = "http://192.168.8.100:8080/shot.jpg" self.url = "http://192.168.8.103:8080/shot.jpg"
self._count = 0
def __del__(self): def __del__(self):
cv2.destroyAllWindows() cv2.destroyAllWindows()
def get_frame(self): def get_frame(self):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
imgResp = urllib.request.urlopen(self.url) imgResp = urllib.request.urlopen(self.url)
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8) imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
img= cv2.imdecode(imgNp,-1) img= cv2.imdecode(imgNp,-1)
...@@ -46,9 +43,6 @@ class IPWebCam(object): ...@@ -46,9 +43,6 @@ class IPWebCam(object):
frame_flip = cv2.flip(resize,1) frame_flip = cv2.flip(resize,1)
ret, jpeg = cv2.imencode('.jpg', frame_flip) ret, jpeg = cv2.imencode('.jpg', frame_flip)
# capture frame and save on a given time in order to run the face recognition
sleep(3); cv2.imwrite("%d.jpg" % self._count, img)
self._count =+1
return jpeg.tobytes() return jpeg.tobytes()
......
...@@ -43,6 +43,50 @@ function toggleLectureLive() { ...@@ -43,6 +43,50 @@ function toggleLectureLive() {
y.style.display = "none"; y.style.display = "none";
} }
} }
var timer = false;
//this is a test function
function testAPI() {
timer = true
startTimer()
let param = 'sachith';
//call the API
fetch('http://127.0.0.1:8000/attendance/test-api/?param=' + param)
.then((res) => res.json())
.then((out) => {})
.catch((err) => alert('error: ' + err));
}
var time = 'time';
function f() {
let param = 'sachith';
//call the API
fetch('http://127.0.0.1:8000/attendance/stop-api/?param=' + param)
.then((res) => res.json())
.then((out) => {
timer = false
startTimer();
})
.catch((err) => alert('error: ' + err));
}
function startTimer() {
var min = 0;
var seconds = 0;
if (timer) {
var sec = 0;
function pad ( val ) { return val > 9 ? val : "0" + val; }
setInterval( function(){
min = pad(parseInt(sec/60,10));
seconds = pad(++sec%60)
document.getElementById("seconds").innerHTML=pad(++sec%60);
document.getElementById("minutes").innerHTML=pad(parseInt(sec/60,10));
}, 1000);
} else {
document.getElementById("secondsStop").innerHTML=seconds;
document.getElementById("minutesStop").innerHTML=min;
}
}
</script> </script>
{% endblock %} {% endblock %}
...@@ -60,13 +104,21 @@ function toggleLectureLive() { ...@@ -60,13 +104,21 @@ function toggleLectureLive() {
<div class="card-body"> <div class="card-body">
<button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button> <button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button>
{# <button type="button" class="btn btn-success" id="test_btn" onclick="testAPI()">Test</button>#}
</div> </div>
<span id="minutes"></span>:<span id="seconds"></span>
<span id="minutesStop"></span>:<span id="secondsStop"></span>
<div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%"> <div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%">
<div class="row justify-content-center"> <div class="row justify-content-center">
<img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}"> <img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}">
</div> </div>
<div class="row justify-content-center"> <div class="row justify-content-center">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle"><i class="fas fa-video"></i></button> <div class="col">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle" onclick="testAPI()"><i class="fas fa-video"></i></button>
</div>
<div class="col">
<button style="display: block; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle" onclick="f()"><i class="fas fa-square"></i></button>
</div>
</div> </div>
</div> </div>
</div> </div>
......
import urllib3
import urllib.request as req
import cv2
import numpy as np
import time
isStop = 0
def IPWebcamTest():
# Replace the URL with your own IPwebcam shot.jpg IP:port
# url = 'http://192.168.2.35:8080/shot.jpg'
url = 'http://192.168.8.103:8080/shot.jpg'
# url = 'http://192.168.1.11:8080/startvideo?force=1&tag=rec'
# url = 'http://192.168.1.11:8080/stopvideo?force=1'
size = (600, 600)
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
# vid_cod = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
# output = cv2.VideoWriter("cam_video.avi", vid_cod, 20.0, (640, 480))
# output = cv2.VideoWriter("cam_video.mp4", vid_cod, 20.0, size)
output = cv2.VideoWriter("cam_video.mp4", vid_cod, 10.0, size)
no_of_frames = 0
while True:
# Use urllib to get the image from the IP camera
imgResp = req.urlopen(url)
# imgResp = urllib3.respon
# Numpy to convert into a array
imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)
# Finally decode the array to OpenCV usable format ;)
img = cv2.imdecode(imgNp, -1)
# resize the image
img = cv2.resize(img, (600, 600))
# put the image on screen
# cv2.imshow('IPWebcam', img)
# write to the output writer
output.write(img)
# To give the processor some less stress
# time.sleep(0.1)
# time.sleep(1)
no_of_frames += 1
if isStop == 1:
break
# imgResp.release()
# cv2.destroyAllWindows()
print('no of frames: ', no_of_frames)
\ No newline at end of file
...@@ -2,7 +2,7 @@ from django.urls import path ...@@ -2,7 +2,7 @@ from django.urls import path
from .api import student_list, student_detail, subject_list, subject_detail, attendance_list, StudentAPIView, \ from .api import student_list, student_detail, subject_list, subject_detail, attendance_list, StudentAPIView, \
StudentDetails StudentDetails
from django.conf.urls import url from django.conf.urls import url
from .api import FileView, InitiateLecture from .api import *
from . import views from . import views
urlpatterns = [ urlpatterns = [
...@@ -19,5 +19,10 @@ urlpatterns = [ ...@@ -19,5 +19,10 @@ urlpatterns = [
url(r'^upload/$', FileView.as_view(), name='file-upload'), url(r'^upload/$', FileView.as_view(), name='file-upload'),
path('webcam_feed', views.webcam_feed, name='webcam_feed'), path('webcam_feed', views.webcam_feed, name='webcam_feed'),
# this url will initiate the lecture # this url will initiate the lecture
url(r'^process-initiate-lecture/$', InitiateLecture.as_view()) url(r'^process-initiate-lecture/$', InitiateLecture.as_view()),
# this url will be used for testing
url(r'^test-api/$', TestAPI.as_view()),
url(r'^stop-api/$', stopRecording.as_view())
] ]
from django.shortcuts import render from django.shortcuts import render
from django.http.response import StreamingHttpResponse from django.http.response import StreamingHttpResponse
from AttendanceApp.camera import IPWebCam from AttendanceApp.camera import IPWebCam
from FirstApp.MongoModels import LectureVideo
from FirstApp.serializers import LectureVideoSerializer
def initiate_lecture(request): def initiate_lecture(request):
lecture_video = LectureVideo.objects.all()
lecture_video_ser = LectureVideoSerializer(lecture_video, many=True)
print('lecture video data: ', lecture_video_ser.data)
return render(request, "AttendanceApp/Initiate_lecture.html") return render(request, "AttendanceApp/Initiate_lecture.html")
def gen(camera): def gen(camera):
while True: while True:
frame = camera.get_frame() frame = camera.get_frame()
yield (b'--frame\r\n' yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n') b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def webcam_feed(request): def webcam_feed(request):
return StreamingHttpResponse(gen(IPWebCam()), return StreamingHttpResponse(gen(IPWebCam()),
content_type='multipart/x-mixed-replace; boundary=frame') content_type='multipart/x-mixed-replace; boundary=frame')
\ No newline at end of file
...@@ -14,4 +14,10 @@ admin.site.register(LectureVideo) ...@@ -14,4 +14,10 @@ admin.site.register(LectureVideo)
admin.site.register(LectureActivity) admin.site.register(LectureActivity)
admin.site.register(LectureGazeEstimation) admin.site.register(LectureGazeEstimation)
admin.site.register(Admin) admin.site.register(Admin)
admin.site.register(AdminCredentialDetails) admin.site.register(AdminCredentialDetails)
\ No newline at end of file admin.site.register(LectureActivityFrameRecognitions)
admin.site.register(LectureActivityFrameGroupings)
admin.site.register(LectureEmotionFrameRecognitions)
admin.site.register(LectureEmotionFrameGroupings)
admin.site.register(LectureGazeFrameRecognitions)
admin.site.register(LectureGazeFrameGroupings)
\ No newline at end of file
This diff is collapsed.
...@@ -52,6 +52,8 @@ def emotion_recognition(classifier, face_classifier, image): ...@@ -52,6 +52,8 @@ def emotion_recognition(classifier, face_classifier, image):
roi_gray = gray[y:y + h, x:x + w] roi_gray = gray[y:y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
# rect,face,image = face_detector(frame) # rect,face,image = face_detector(frame)
# draw a rectangle
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
if np.sum([roi_gray]) != 0: if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float') / 255.0 roi = roi_gray.astype('float') / 255.0
...@@ -63,6 +65,9 @@ def emotion_recognition(classifier, face_classifier, image): ...@@ -63,6 +65,9 @@ def emotion_recognition(classifier, face_classifier, image):
preds = classifier.predict(roi)[0] preds = classifier.predict(roi)[0]
label = class_labels[preds.argmax()] label = class_labels[preds.argmax()]
# put the emotion label
cv2.putText(image, label, (x, y), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)
return label return label
...@@ -79,6 +84,7 @@ def detect_emotion(video): ...@@ -79,6 +84,7 @@ def detect_emotion(video):
face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml')) face_classifier = cv2.CascadeClassifier(os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5') classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
classifier = load_model(classifier_path) classifier = load_model(classifier_path)
EMOTION_DIR = os.path.join(BASE_DIR, "static\\FirstApp\\emotion")
meta_data = VideoMeta() meta_data = VideoMeta()
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise'] class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
...@@ -99,6 +105,20 @@ def detect_emotion(video): ...@@ -99,6 +105,20 @@ def detect_emotion(video):
# for testing purposes # for testing purposes
print('starting the emotion recognition process') print('starting the emotion recognition process')
# get width and height of the video frames
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# get the video frame size
size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(EMOTION_DIR, video)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, size)
while (count_frames < frame_count): while (count_frames < frame_count):
# Grab a single frame of video # Grab a single frame of video
ret, frame = cap.read() ret, frame = cap.read()
...@@ -135,6 +155,9 @@ def detect_emotion(video): ...@@ -135,6 +155,9 @@ def detect_emotion(video):
# for testing purposes # for testing purposes
print('emotion frame count: ', count_frames) print('emotion frame count: ', count_frames)
# write the video frame to the video writer
output.write(frame)
count_frames += 1 count_frames += 1
# setting up the counted values # setting up the counted values
...@@ -146,8 +169,13 @@ def detect_emotion(video): ...@@ -146,8 +169,13 @@ def detect_emotion(video):
meta_data.surprise_count = count_surprise meta_data.surprise_count = count_surprise
cap.release() cap.release()
output.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes # for testing purposes
print('ending the emotion recognition process') print('ending the emotion recognition process')
...@@ -198,6 +226,8 @@ def get_frame_emotion_recognition(video_name): ...@@ -198,6 +226,8 @@ def get_frame_emotion_recognition(video_name):
# for testing purposes # for testing purposes
print('starting the emotion frame recognition process') print('starting the emotion frame recognition process')
# looping through the frames # looping through the frames
while (frame_count < no_of_frames): while (frame_count < no_of_frames):
...@@ -216,18 +246,19 @@ def get_frame_emotion_recognition(video_name): ...@@ -216,18 +246,19 @@ def get_frame_emotion_recognition(video_name):
surprise_count = 0 surprise_count = 0
# get the detections # get the detections
detections = ar.person_detection(image, net) detections, persons = ar.person_detection(image, net)
# to count the extracted detections for a frame # to count the extracted detections for a frame
detection_count = 0 detection_count = 0
# if there are detections # if there are detections
if (len(detections) > 0): if (len(detections) > 0):
# loop through the detections # loop through the detections
for detection in detections: for person in persons:
label = emotion_recognition(classifier, face_classifier, detection) label = emotion_recognition(classifier, face_classifier, person)
# checking for the label # checking for the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -422,17 +453,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict): ...@@ -422,17 +453,17 @@ def emotion_frame_groupings(video_name, frame_landmarks, frame_group_dict):
neutral_count = 0 neutral_count = 0
detection_count = 0 detection_count = 0
detections = ar.person_detection(image, net) detections, persons = ar.person_detection(image, net)
# if there are detections # if there are detections
if (len(detections) > 0): if (len(detections) > 0):
# looping through the detections in each frame # looping through the detections in each frame
for detection in detections: for person in persons:
# run the model and get the emotion label # run the model and get the emotion label
label = emotion_recognition(classifier, face_classifier, detection) label = emotion_recognition(classifier, face_classifier, person)
# increment the count based on the label # increment the count based on the label
if label == class_labels[0]: if label == class_labels[0]:
...@@ -639,10 +670,14 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data ...@@ -639,10 +670,14 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
limit = 10 # limit = 10
limit = len(individual_lec_emotions)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))] data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_emotions))]
# declare the correlation data dictionary
corr_data = {}
# student activity labels # student activity labels
student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral'] student_emotion_labels = ['Happy', 'Sad', 'Angry', 'Surprise', 'Neutral']
lecturer_activity_labels = ['seated', 'standing', 'walking'] lecturer_activity_labels = ['seated', 'standing', 'walking']
...@@ -662,31 +697,72 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data ...@@ -662,31 +697,72 @@ def get_emotion_correlations(individual_lec_emotions, lec_recorded_activity_data
# loop through the lecturer recorded data (lecturer) # loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data: for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count'])) value = int(data['seated_count'])
standing_perct_list.append(int(data['standing_count'])) value1 = int(data['standing_count'])
walking_perct_list.append(int(data['walking_count'])) value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student) # loop through the lecturer recorded data (student)
for data in individual_lec_emotions: for data in individual_lec_emotions:
happy_perct_list.append(int(data['happy_perct'])) value = int(data['happy_perct'])
sad_perct_list.append(int(data['sad_perct'])) value1 = int(data['sad_perct'])
angry_perct_list.append(int(data['angry_perct'])) value2 = int(data['angry_perct'])
surprise_perct_list.append(int(data['surprise_perct'])) value3 = int(data['surprise_perct'])
neutral_perct_list.append(int(data['neutral_perct'])) value4 = int(data['neutral_perct'])
if value != 0:
corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list, happy_perct_list.append(int(data['happy_perct']))
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list} if value1 != 0:
sad_perct_list.append(int(data['sad_perct']))
if value2 != 0:
angry_perct_list.append(int(data['angry_perct']))
if value3 != 0:
surprise_perct_list.append(int(data['surprise_perct']))
if value4 != 0:
neutral_perct_list.append(int(data['neutral_perct']))
if len(happy_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[0]] = happy_perct_list
if len(sad_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[1]] = sad_perct_list
if len(angry_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[2]] = angry_perct_list
if len(surprise_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[3]] = surprise_perct_list
if len(neutral_perct_list) == len(individual_lec_emotions):
corr_data[student_emotion_labels[4]] = neutral_perct_list
if (len(sitting_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_emotions):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'Happy': happy_perct_list, 'Sad': sad_perct_list, 'Angry': angry_perct_list, 'Surprise': surprise_perct_list, 'Neutral': neutral_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe # create the dataframe
df = pd.DataFrame(corr_data, index=data_index) df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation # calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit) pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====') print('====correlated variables=====')
print(pd_series) print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit): for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately # this dictionary will get the pandas.Series object's indices and values separately
corr_dict = {} corr_dict = {}
......
This diff is collapsed.
import requests
def batch_process(video_id, video_name):
# call the activity process
activity_resp = requests.get('http://127.0.0.1:8000/process-lecture-activity/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
# call the emotion process
emotion_resp = requests.get('http://127.0.0.1:8000/process-lecture-emotion/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
# call the gaze process
gaze_resp = requests.get('http://127.0.0.1:8000/process-lecture-gaze-estimation/?lecture_video_name=' + video_name + '&lecture_video_id=' + video_id)
pass
# this method will save the lecture video
def save_student_lecture_video(student_video):
# call the API
student_video_save_resp = requests.post('http://127.0.0.1:8000/lecture-video', student_video)
\ No newline at end of file
...@@ -171,6 +171,7 @@ def process_gaze_estimation(video_path): ...@@ -171,6 +171,7 @@ def process_gaze_estimation(video_path):
ret, img = cap.read() ret, img = cap.read()
size = img.shape size = img.shape
font = cv2.FONT_HERSHEY_SIMPLEX
# 3D model points. # 3D model points.
model_points = np.array([ model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip (0.0, 0.0, 0.0), # Nose tip
...@@ -211,6 +212,18 @@ def process_gaze_estimation(video_path): ...@@ -211,6 +212,18 @@ def process_gaze_estimation(video_path):
# for testing purposes # for testing purposes
print('starting the gaze estimation process') print('starting the gaze estimation process')
# get the frame sizes
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
frame_size = (frame_width, frame_height)
# this is the annotated video path
ANNOTATED_VIDEO_PATH = os.path.join(GAZE_DIR, video_path)
# initiailizing the video writer
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
output = cv2.VideoWriter(ANNOTATED_VIDEO_PATH, vid_cod, 30.0, frame_size)
# iterate the video frames # iterate the video frames
while True: while True:
ret, img = cap.read() ret, img = cap.read()
...@@ -285,14 +298,19 @@ def process_gaze_estimation(video_path): ...@@ -285,14 +298,19 @@ def process_gaze_estimation(video_path):
# checking for vertical and horizontal directions # checking for vertical and horizontal directions
if isLookingDown & isLookingRight: if isLookingDown & isLookingRight:
cv2.putText(img, 'looking down and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_right_count += 1 head_down_right_count += 1
elif isLookingDown & isLookingLeft: elif isLookingDown & isLookingLeft:
cv2.putText(img, 'looking down and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_down_left_count += 1 head_down_left_count += 1
elif isLookingUp & isLookingRight: elif isLookingUp & isLookingRight:
cv2.putText(img, 'looking up and right', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_right_count += 1 head_up_right_count += 1
elif isLookingUp & isLookingLeft: elif isLookingUp & isLookingLeft:
cv2.putText(img, 'looking up and left', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_up_left_count += 1 head_up_left_count += 1
elif isLookingFront: elif isLookingFront:
cv2.putText(img, 'looking front', (facebox[0], facebox[1]), font, 2, (255, 255, 128), 3)
head_front_count += 1 head_front_count += 1
...@@ -304,6 +322,9 @@ def process_gaze_estimation(video_path): ...@@ -304,6 +322,9 @@ def process_gaze_estimation(video_path):
# for testing purposes # for testing purposes
print('gaze estimation count: ', frame_count) print('gaze estimation count: ', frame_count)
# write to the video writer
output.write(img)
# increment the frame count # increment the frame count
frame_count += 1 frame_count += 1
...@@ -330,6 +351,12 @@ def process_gaze_estimation(video_path): ...@@ -330,6 +351,12 @@ def process_gaze_estimation(video_path):
cv2.destroyAllWindows() cv2.destroyAllWindows()
cap.release() cap.release()
output.release()
# after saving the video, save the changes to static content
p = os.popen("python manage.py collectstatic", "w")
p.write("yes")
# for testing purposes # for testing purposes
print('ending the gaze estimation process') print('ending the gaze estimation process')
...@@ -538,6 +565,7 @@ def get_lecture_gaze_estimation_for_frames(video_name): ...@@ -538,6 +565,7 @@ def get_lecture_gaze_estimation_for_frames(video_name):
# for testing purposes # for testing purposes
print('ending the gaze estimation for frames process') print('ending the gaze estimation for frames process')
...@@ -979,10 +1007,15 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data): ...@@ -979,10 +1007,15 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# this variable will be used to store the correlations # this variable will be used to store the correlations
correlations = [] correlations = []
limit = 10
# limit = 10
limit = len(individual_lec_gaze)
data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))] data_index = ['lecture-{}'.format(i + 1) for i in range(len(individual_lec_gaze))]
# declare the correlation data dictionary
corr_data = {}
# student gaze labels # student gaze labels
student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front'] student_gaze_labels = ['Up and Right', 'Up and Left', 'Down and Right', 'Down and Left', 'Front']
lecturer_activity_labels = ['seated', 'standing', 'walking'] lecturer_activity_labels = ['seated', 'standing', 'walking']
...@@ -1001,28 +1034,72 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data): ...@@ -1001,28 +1034,72 @@ def get_gaze_correlations(individual_lec_gaze, lec_recorded_activity_data):
# loop through the lecturer recorded data (lecturer) # loop through the lecturer recorded data (lecturer)
for data in lec_recorded_activity_data: for data in lec_recorded_activity_data:
sitting_perct_list.append(int(data['seated_count'])) value = int(data['seated_count'])
standing_perct_list.append(int(data['standing_count'])) value1 = int(data['standing_count'])
walking_perct_list.append(int(data['walking_count'])) value2 = int(data['walking_count'])
if value != 0:
sitting_perct_list.append(int(data['seated_count']))
if value1 != 0:
standing_perct_list.append(int(data['standing_count']))
if value2 != 0:
walking_perct_list.append(int(data['walking_count']))
# loop through the lecturer recorded data (student) # loop through the lecturer recorded data (student)
for data in individual_lec_gaze: for data in individual_lec_gaze:
upright_perct_list.append(int(data['looking_up_and_right_perct'])) value = int(data['looking_up_and_right_perct'])
upleft_perct_list.append(int(data['looking_up_and_left_perct'])) value1 = int(data['looking_up_and_left_perct'])
downright_perct_list.append(int(data['looking_down_and_right_perct'])) value2 = int(data['looking_down_and_right_perct'])
downleft_perct_list.append(int(data['looking_down_and_left_perct'])) value3 = int(data['looking_down_and_left_perct'])
front_perct_list.append(int(data['looking_front_perct'])) value4 = int(data['looking_front_perct'])
corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list, if value != 0:
'Down and Left': downleft_perct_list, 'Front': front_perct_list, upright_perct_list.append(int(data['looking_up_and_right_perct']))
'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list} if value1 != 0:
upleft_perct_list.append(int(data['looking_up_and_left_perct']))
if value2 != 0:
downright_perct_list.append(int(data['looking_down_and_right_perct']))
if value3 != 0:
downleft_perct_list.append(int(data['looking_down_and_left_perct']))
if value4 != 0:
front_perct_list.append(int(data['looking_front_perct']))
if (len(upright_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[0]] = upright_perct_list
if (len(upleft_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[1]] = upleft_perct_list
if (len(downright_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[2]] = downright_perct_list
if (len(downleft_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[3]] = downleft_perct_list
if (len(front_perct_list)) == len(individual_lec_gaze):
corr_data[student_gaze_labels[4]] = front_perct_list
if (len(sitting_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[0]] = sitting_perct_list
if (len(standing_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[1]] = standing_perct_list
if (len(walking_perct_list)) == len(individual_lec_gaze):
corr_data[lecturer_activity_labels[2]] = walking_perct_list
# corr_data = {'Up and Right': upright_perct_list, 'Up and Left': upleft_perct_list, 'Down and Right': downright_perct_list,
# 'Down and Left': downleft_perct_list, 'Front': front_perct_list,
# 'seated': sitting_perct_list, 'standing': standing_perct_list, 'walking': walking_perct_list}
# create the dataframe # create the dataframe
df = pd.DataFrame(corr_data, index=data_index) df = pd.DataFrame(corr_data, index=data_index)
print(df)
# calculate the correlation # calculate the correlation
pd_series = ut.get_top_abs_correlations(df, limit) pd_series = ut.get_top_abs_correlations(df, limit)
print('====correlated variables=====')
print(pd_series)
# assign a new value to the 'limit' variable
limit = len(pd_series) if len(pd_series) < limit else limit
for i in range(limit): for i in range(limit):
# this dictionary will get the pandas.Series object's indices and values separately # this dictionary will get the pandas.Series object's indices and values separately
......
This diff is collapsed.
# this method will remove the redundant pairs in pandas dataframe
def get_redundant_pairs(df): def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix''' '''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set() pairs_to_drop = set()
...@@ -8,6 +9,7 @@ def get_redundant_pairs(df): ...@@ -8,6 +9,7 @@ def get_redundant_pairs(df):
pairs_to_drop.add((cols[i], cols[j])) pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop return pairs_to_drop
# this method will return the top specified correlations
def get_top_abs_correlations(df, n): def get_top_abs_correlations(df, n):
au_corr = df.corr().abs().unstack() au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df) labels_to_drop = get_redundant_pairs(df)
......
...@@ -306,4 +306,9 @@ def get_frame_landmarks(video_name): ...@@ -306,4 +306,9 @@ def get_frame_landmarks(video_name):
# now return the frame landmarks # now return the frame landmarks
return frame_landmarks return frame_landmarks
\ No newline at end of file
# this method will save leture video (student)
def save_lecture_student_video():
pass
\ No newline at end of file
...@@ -18,8 +18,12 @@ there are two fields inside "Meta" class, as follows. ...@@ -18,8 +18,12 @@ there are two fields inside "Meta" class, as follows.
from rest_framework import serializers from rest_framework import serializers
from djongo import models
from .MongoModels import * from .MongoModels import *
from . models import VideoMeta from . models import VideoMeta
from .logic import id_generator as ig
# from datetime import datetime as dt
import datetime
# lecture serializer # lecture serializer
...@@ -190,6 +194,110 @@ class LectureVideoSerializer(serializers.ModelSerializer): ...@@ -190,6 +194,110 @@ class LectureVideoSerializer(serializers.ModelSerializer):
model = LectureVideo model = LectureVideo
fields = '__all__' fields = '__all__'
# this method will validate the input data
def to_internal_value(self, data):
lecturer = None
subject = None
lecturer_data = data.get('lecturer')
subject_data = data.get('subject')
# serialize the lecturer data
lecturer = Lecturer.objects.filter(id=lecturer_data)
subject = Subject.objects.filter(id=subject_data)
lecturer_ser_data = LecturerSerializer(lecturer, many=True).data[0]
subject_ser_data = SubjectSerializer(subject, many=True).data[0]
# retrieve the last lecture video details
last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# create the next lecture video id
new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
# if both subject and lecturer details are available
if len(lecturer) == 1 & len(subject) == 1:
str_video_length = data.get('video_length')
video_length_parts = str_video_length.split(':')
video_length = datetime.timedelta(minutes=int(video_length_parts[0]), seconds=int(video_length_parts[1]),
milliseconds=int(video_length_parts[2]))
# this data will be passed as validated data
validated_data = {
'lecture_video_id': new_lecture_video_id,
'lecturer': lecturer_ser_data,
'subject': subject_ser_data,
'date': data.get('date'),
'video_name': data.get('video_name'),
'video_length': video_length
}
return super(LectureVideoSerializer, self).to_internal_value(validated_data)
# this method will override the 'create' method
def create(self, validated_data):
lecturer = None
subject = None
lecturer_data = validated_data.pop('lecturer')
subject_data = validated_data.pop('subject')
# serialize the lecturer data
lecturer = Lecturer.objects.filter(id=lecturer_data)
subject = Subject.objects.filter(id=subject_data)
# retrieve the last lecture video details
last_lec_video = LectureVideo.objects.order_by('lecture_video_id').last()
# create the next lecture video id
new_lecture_video_id = ig.generate_new_id(last_lec_video.lecture_video_id)
# if both subject and lecturer details are available
if len(lecturer) == 1 & len(subject) == 1:
str_video_length = validated_data.pop('video_length')
video_length_parts = str_video_length.split(':')
video_length = datetime.timedelta(minutes=int(video_length_parts[0]), seconds=int(video_length_parts[1]), milliseconds=int(video_length_parts[2]))
lecture_video, created = LectureVideo.objects.update_or_create(
lecture_video_id=new_lecture_video_id,
lecturer=lecturer[0],
subject=subject[0],
date=validated_data.pop('date'),
video_name=validated_data.pop('video_name'),
video_length=video_length
)
# faculty_data = validated_data.pop('faculty')
# serialized_faculty = FacultySerializer(data=faculty_data)
#
# if (serialized_faculty.is_valid()):
# # faculty, faculty_created = Faculty.objects.get_or_create(defaults={}, faculty_id=serialized_faculty.data['faculty_id'])
# faculty = Faculty.objects.filter(faculty_id=serialized_faculty.data['faculty_id'])
#
# if (len(faculty) == 1):
# lecturer, created = Lecturer.objects.update_or_create(
# faculty=faculty[0],
# lecturer_id=validated_data.pop('lecturer_id'),
# fname=validated_data.pop('fname'),
# lname=validated_data.pop('lname'),
# email=validated_data.pop('email'),
# telephone=validated_data('telephone')
# )
#
# return lecturer
#
return lecture_video
return None
# lecture video time landmarks serializer # lecture video time landmarks serializer
class LectureVideoTimeLandmarksSerializer(serializers.ModelSerializer): class LectureVideoTimeLandmarksSerializer(serializers.ModelSerializer):
......
This diff is collapsed.
...@@ -241,7 +241,13 @@ ...@@ -241,7 +241,13 @@
//to handle the 'integrate' modal //to handle the 'integrate' modal
$('#integrate_activity').click(function () { $('#integrate_activity').click(function () {
//define the student video src //define the student video src
{#global_video_name = "Video_test_9.mp4";#}
{#global_video_name = "Video_test_9_annotated.mp4";#}
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#let video_src = "{% static '' %}FirstApp/video/" + global_video_name;#}
{#let video_src = "{% static '' %}/FirstApp/activity/" + global_video_name;#}
{#let video_src = "{% static '' %}FirstApp/emotion/" + global_video_name;#}
//assign the video src //assign the video src
$('#student_video').attr('src', video_src); $('#student_video').attr('src', video_src);
...@@ -1078,6 +1084,11 @@ ...@@ -1078,6 +1084,11 @@
type="video/mp4"> type="video/mp4">
Your browser does not support the video tag. Your browser does not support the video tag.
</video> </video>
{# <video width="500" height="300" id="student_video" controls>#}
{# <source src="{% static 'FirstApp/videos/Video_test_2.mp4' %}"#}
{# type="video/mp4">#}
{# Your browser does not support the video tag.#}
{# </video>#}
</div> </div>
<!--end of student video section --> <!--end of student video section -->
......
...@@ -74,6 +74,8 @@ ...@@ -74,6 +74,8 @@
real_class = '.' + real_class; real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML; let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-emotion-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index) fetch('http://127.0.0.1:8000/get-lecture-emotion-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json()) .then((res) => res.json())
...@@ -143,7 +145,8 @@ ...@@ -143,7 +145,8 @@
$('#video_name').text(video.video_name); $('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length); $('#video_duration').text(video.video_length);
$('#video_date').text(video.date); $('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id; {#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name; global_video_name = video.video_name;
...@@ -241,32 +244,67 @@ ...@@ -241,32 +244,67 @@
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
{#fetch('http://127.0.0.1:8000/get-random-number')#}
{#.then((res) => res.json())#}
{#.then((out) => alert(out.response))#}
{#.catch((err) => alert('err: ' + err));#}
//fetch the lecture recorded video name
fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#} {#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#} {#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4"; {#global_lecturer_video_name = "Test_3.mp4";#}
{#global_lecturer_video_name = "Lecturer_Video_4.mp4";#}
{##}
{#//define the lecturer video src#}
{#let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;#}
{##}
{##}
{##}
{#//assign the video src#}
{#$('#lecturer_video').attr('src', lecturer_video_src);#}
{##}
{#$('#integrate_modal').modal();#}
{#//fetch data from the API#}
{#fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name)#}
{# .then((res) => res.json())#}
{# .then((out) => displayEmotionRecognitionForFrame(out.response))#}
{# .catch((err) => alert('error: ' + err));#}
});
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src //define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name; let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src
$('#student_video').attr('src', video_src);
//assign the video src //assign the video src
$('#lecturer_video').attr('src', lecturer_video_src); $('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal(); $('#integrate_modal').modal();
//fetch data from the API //fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-emotion-for-frame?video_name=' + global_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => displayEmotionRecognitionForFrame(out.response)) .then((out) => displayEmotionRecognitionForFrame(out.response))
.catch((err) => alert('error: ' + err)); .catch((err) => alert('error: ' + err));
}
});
//this function will display the emotion percentages for each frame //this function will display the emotion percentages for each frame
...@@ -338,7 +376,7 @@ ...@@ -338,7 +376,7 @@
fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name) fetch('http://127.0.0.1:8000/lecturer/get-lecturer-video-frame-recognitions/?video_name=' + global_lecturer_video_name)
.then((res) => res.json()) .then((res) => res.json())
.then((out) => displayLecturerEmotionRecognitionForFrame(out)) .then((out) => displayLecturerEmotionRecognitionForFrame(out))
.catch((err) => alert('error: ' + err)) .catch((err) => alert('error: ' + err));
} }
......
...@@ -74,6 +74,8 @@ ...@@ -74,6 +74,8 @@
real_class = '.' + real_class; real_class = '.' + real_class;
let date = e.target.parentNode.parentNode.firstChild.innerHTML; let date = e.target.parentNode.parentNode.firstChild.innerHTML;
//assign the date
global_lecture_date = date;
fetch('http://127.0.0.1:8000/get-lecture-video-gaze-estimation-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index) fetch('http://127.0.0.1:8000/get-lecture-video-gaze-estimation-availability/?lecturer=' + global_lecturer + '&date=' + date + '&index=' + global_lecturer_subject_index)
.then((res) => res.json()) .then((res) => res.json())
...@@ -142,7 +144,8 @@ ...@@ -142,7 +144,8 @@
$('#video_name').text(video.video_name); $('#video_name').text(video.video_name);
$('#video_duration').text(video.video_length); $('#video_duration').text(video.video_length);
$('#video_date').text(video.date); $('#video_date').text(video.date);
global_lecture_video_id = video.lecture_video_id; {#global_lecture_video_id = video.lecture_video_id;#}
global_lecture_video_id = video.id;
global_video_name = video.video_name; global_video_name = video.video_name;
...@@ -239,21 +242,28 @@ ...@@ -239,21 +242,28 @@
//define the student video src //define the student video src
let video_src = "{% static '' %}FirstApp/videos/" + global_video_name; let video_src = "{% static '' %}FirstApp/videos/" + global_video_name;
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
global_lecturer_video_name = "Test_3.mp4";
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
//assign the video src //assign the video src
$('#student_video').attr('src', video_src); $('#student_video').attr('src', video_src);
//assign the video src //fetch the lecture recorded video name
$('#lecturer_video').attr('src', lecturer_video_src); fetch('http://127.0.0.1:8000/get-lecture-recorded-video-name/?lecturer=' + global_lecturer + '&subject=' + global_subject + '&date=' + global_lecture_date)
.then((res) => res.json())
.then((out) => assignLecturerRecordedVideoName(out))
.catch((err) => alert('error: ' + err));
{#global_lecturer_video_name = "Test_1.mp4";#}
{#global_lecturer_video_name = "Test_2.mp4";#}
{#global_lecturer_video_name = "Test_3.mp4";#}
{#global_lecturer_video_name = "Lecturer_Video_4.mp4";#}
{##}
{#//define the lecturer video src#}
{#let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;#}
{##}
{#//assign the video src#}
{#$('#lecturer_video').attr('src', lecturer_video_src);#}
{##}
{#$('#integrate_modal').modal();#}
$('#integrate_modal').modal();
//fetch data from the API //fetch data from the API
fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name) fetch('http://127.0.0.1:8000/get-lecture-gaze-estimation-for-frame/?video_name=' + global_video_name)
...@@ -264,6 +274,23 @@ ...@@ -264,6 +274,23 @@
}); });
//assign the lecturer recorded video name
function assignLecturerRecordedVideoName(res) {
global_lecturer_video_name = res.video_name;
//define the lecturer video src
let lecturer_video_src = "{% static '' %}FirstApp/lecturer_videos/" + global_lecturer_video_name;
alert('hello');
//assign the video src
$('#lecturer_video').attr('src', lecturer_video_src);
$('#integrate_modal').modal();
}
//this function will load the activity recognition for frames //this function will load the activity recognition for frames
function displayGazeEstimationForFrame(response) { function displayGazeEstimationForFrame(response) {
......
This diff is collapsed.
...@@ -151,21 +151,30 @@ urlpatterns = [ ...@@ -151,21 +151,30 @@ urlpatterns = [
# retrieves lecture activity summary # retrieves lecture activity summary
url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()), url(r'^get-lecture-activity-summary/$', api.GetLectureActivitySummary.as_view()),
# retrieves lecture activity summary # retrieves lecture emotion summary
url(r'^get-lecture-emotion-summary/$', api.GetLectureEmotionSummary.as_view()), url(r'^get-lecture-emotion-summary/$', api.GetLectureEmotionSummary.as_view()),
# retrieves lecture activity summary # retrieves lecture gaze estimation summary
url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()), url(r'^get-lecture-gaze-summary/$', api.GetLectureGazeSummary.as_view()),
# retrieves lecture activity summary # retrieves student activity correlations with lecturer activity
url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()), url(r'^get-activity-correlations/$', api.GetLectureActivityCorrelations.as_view()),
# retrieves lecture activity summary # retrieves student emotion correlations with lecturer activity
url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()), url(r'^get-emotion-correlations/$', api.GetLectureEmotionCorrelations.as_view()),
# retrieves lecture activity summary # retrieves student gaze estimation correlations with lecturer activity
url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()), url(r'^get-gaze-correlations/$', api.GetLectureGazeCorrelations.as_view()),
# retrieves student activity-emotion correlations
url(r'^get-student-activity-emotion-correlations/$', api.GetStudentActivityEmotionCorrelations.as_view()),
# retrieves student activity-gaze correlations
url(r'^get-student-activity-gaze-correlations/$', api.GetStudentActivityGazeCorrelations.as_view()),
# retrieves student emotion-gaze correlations
url(r'^get-student-emotion-gaze-correlations/$', api.GetStudentEmotionGazeCorrelations.as_view()),
##### OTHERS ##### ##### OTHERS #####
...@@ -173,6 +182,19 @@ urlpatterns = [ ...@@ -173,6 +182,19 @@ urlpatterns = [
url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()), url(r'^get-lecture-recorded-video-name/$', api.GetLecturerRecordedVideo.as_view()),
##### BATCH PROCESS #####
# perform batch process for student behavior
url(r'^student-behavior-batch-process/$', api.BatchProcess.as_view()),
# check availability for student behavior components
url(r'^check-availability/$', api.CheckStudentBehaviorAvailability.as_view()),
# perform random task (delete later)
url(r'^get-random-number/$', api.TestRandom.as_view()),
# routers # routers
# path('', include(router.urls)), # path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')) path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
......
...@@ -189,6 +189,8 @@ def video_result(request): ...@@ -189,6 +189,8 @@ def video_result(request):
data = serializer.data data = serializer.data
print('data length: ', len(data))
# iterate through the existing lecture videos for the lecturer # iterate through the existing lecture videos for the lecturer
for video in data: for video in data:
video_id = video['id'] video_id = video['id']
...@@ -197,6 +199,8 @@ def video_result(request): ...@@ -197,6 +199,8 @@ def video_result(request):
# check whether the video id exist in the Activity Recognition table # check whether the video id exist in the Activity Recognition table
lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists() lec_activity = LectureActivity.objects.filter(lecture_video_id_id=video_id).exists()
print('lecture activity existence: ', lec_activity)
if lec_activity == False: if lec_activity == False:
to_do_lecture_list.append({ to_do_lecture_list.append({
"lecturer": lecturer, "lecturer": lecturer,
...@@ -227,11 +231,15 @@ def video_result(request): ...@@ -227,11 +231,15 @@ def video_result(request):
# loop through the to-do lecture list # loop through the to-do lecture list
for item in to_do_lecture_list: for item in to_do_lecture_list:
isDate = item['date'] == str(day_timetable['date']) isDate = item['date'] == str(day_timetable['date'])
print('item date: ', item['date'])
print('timetable date: ', str(day_timetable['date']))
# isLecturer = item['lecturer'] == # isLecturer = item['lecturer'] ==
# check for the particular lecture on the day # check for the particular lecture on the day
if isDate: if isDate:
slots = day_timetable['time_slots'] slots = day_timetable['time_slots']
# loop through the slots # loop through the slots
for slot in slots: for slot in slots:
# check for the lecturer and subject # check for the lecturer and subject
...@@ -260,6 +268,8 @@ def video_result(request): ...@@ -260,6 +268,8 @@ def video_result(request):
print('what is wrong?: ', exc) print('what is wrong?: ', exc)
return redirect('/500') return redirect('/500')
print('due lectures: ', due_lecture_list)
return render(request, "FirstApp/video_results.html", return render(request, "FirstApp/video_results.html",
{"lecturer": lecturer, "due_lectures": due_lecture_list}) {"lecturer": lecturer, "due_lectures": due_lecture_list})
......
...@@ -15,20 +15,18 @@ import os ...@@ -15,20 +15,18 @@ import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production # Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = 'typgn#m(t#byxnp#ut@^gfqyh*1doa28gkqu(ap*k4s5!q&oyo' #original one # SECRET_KEY = 'typgn#m(t#byxnp#ut@^gfqyh*1doa28gkqu(ap*k4s5!q&oyo' #original one
SECRET_KEY = '!3-gwi-1#5-4**85xb#z(t-8#ayc#*gguw4v4+fkax4037sp=)' # exported one SECRET_KEY = '!3-gwi-1#5-4**85xb#z(t-8#ayc#*gguw4v4+fkax4037sp=)' # exported one
# SECURITY WARNING: don't run with debug turned on in production! # SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True DEBUG = True
ALLOWED_HOSTS = [] ALLOWED_HOSTS = []
# Application definition # Application definition
INSTALLED_APPS = [ INSTALLED_APPS = [
...@@ -36,6 +34,7 @@ INSTALLED_APPS = [ ...@@ -36,6 +34,7 @@ INSTALLED_APPS = [
'AttendanceApp.apps.AttendanceappConfig', 'AttendanceApp.apps.AttendanceappConfig',
'MonitorLecturerApp.apps.MonitorlecturerappConfig', 'MonitorLecturerApp.apps.MonitorlecturerappConfig',
'LectureSummarizingApp.apps.LectureSummarizingAppConfig', 'LectureSummarizingApp.apps.LectureSummarizingAppConfig',
'corsheaders',
'django.contrib.admin', 'django.contrib.admin',
'django.contrib.auth', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.contenttypes',
...@@ -48,6 +47,7 @@ INSTALLED_APPS = [ ...@@ -48,6 +47,7 @@ INSTALLED_APPS = [
] ]
MIDDLEWARE = [ MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware', 'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware', 'django.middleware.common.CommonMiddleware',
...@@ -59,6 +59,9 @@ MIDDLEWARE = [ ...@@ -59,6 +59,9 @@ MIDDLEWARE = [
ROOT_URLCONF = 'integrated_slpes.urls' ROOT_URLCONF = 'integrated_slpes.urls'
# adding the CORS attributes
CORS_ALLOW_ALL_ORIGINS = True
TEMPLATES = [ TEMPLATES = [
{ {
'BACKEND': 'django.template.backends.django.DjangoTemplates', 'BACKEND': 'django.template.backends.django.DjangoTemplates',
...@@ -78,7 +81,6 @@ TEMPLATES = [ ...@@ -78,7 +81,6 @@ TEMPLATES = [
WSGI_APPLICATION = 'integrated_slpes.wsgi.application' WSGI_APPLICATION = 'integrated_slpes.wsgi.application'
# Database # Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
...@@ -93,7 +95,6 @@ DATABASES = { ...@@ -93,7 +95,6 @@ DATABASES = {
} }
} }
# Password validation # Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
...@@ -112,7 +113,6 @@ AUTH_PASSWORD_VALIDATORS = [ ...@@ -112,7 +113,6 @@ AUTH_PASSWORD_VALIDATORS = [
}, },
] ]
# Internationalization # Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/ # https://docs.djangoproject.com/en/2.2/topics/i18n/
...@@ -126,7 +126,6 @@ USE_L10N = True ...@@ -126,7 +126,6 @@ USE_L10N = True
USE_TZ = True USE_TZ = True
# Static files (CSS, JavaScript, Images) # Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/ # https://docs.djangoproject.com/en/2.2/howto/static-files/
...@@ -137,7 +136,6 @@ STATICFILES_DIRS = [ ...@@ -137,7 +136,6 @@ STATICFILES_DIRS = [
] ]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets') STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
# media files # media files
MEDIA_URL = '/media/' MEDIA_URL = '/media/'
...@@ -145,7 +143,9 @@ MEDIA_ROOT = os.path.join(BASE_DIR, 'media') ...@@ -145,7 +143,9 @@ MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# REST FRAMEWORK # REST FRAMEWORK
REST_FRAMEWORK = { REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [ # 'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated', # 'rest_framework.permissions.IsAuthenticated',
] # ]
} 'DEFAULT_AUTHENTICATION_CLASSES': [],
\ No newline at end of file 'DEFAULT_PERMISSION_CLASSES': []
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment