Commit bdfebe71 authored by I.K Seneviratne's avatar I.K Seneviratne

Committing the removal of unnecessary files and code segments and code...

Committing the removal of unnecessary files and code segments and code formatting in some files in the main files in the logic folder.
.
parent 2eab3f22
import cv2
import os
import re
import base64
import shutil
def saveImage(response):
dataUrlPattern = re.compile('data:image/(png|jpeg);base64,(.*)$')
base_path = os.path.join(os.path.abspath(__file__))
root_dir = os.path.dirname(os.path.dirname(base_path))
new_dir_name = "static\\FirstApp\\images\\{}".format(response["imageName"])
new_dir = os.path.join(root_dir, new_dir_name)
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
count = 0
for url in response["ImageURLS"]:
url = dataUrlPattern.match(url).group(2)
encoded = url.encode()
image = base64.b64decode(encoded)
imageName = response["imageName"] + '_img_' + format(count) + '.png'
new_file = os.path.join(new_dir, imageName)
count += 1
# saving the images (method 1)
with open(new_file, "wb") as f:
f.write(image)
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
...@@ -13,22 +13,16 @@ arbitrary media types. ...@@ -13,22 +13,16 @@ arbitrary media types.
""" """
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData from MonitorLecturerApp.models import LectureRecordedVideo, LecturerVideoMetaData
from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer from MonitorLecturerApp.serializers import LectureRecordedVideoSerializer, LecturerVideoMetaDataSerializer
from .MongoModels import * from .MongoModels import *
from rest_framework.views import * from rest_framework.views import *
from .logic import activity_recognition as ar from .logic import activity_recognition as ar
from .logic import posenet_calculation as pc
from . import emotion_detector as ed from . import emotion_detector as ed
from .logic import id_generator as ig from .logic import id_generator as ig
from .logic import pdf_file_generator as pdf from .logic import pdf_file_generator as pdf
from .logic import head_gaze_estimation as hge from .logic import head_gaze_estimation as hge
from .logic import video_extraction as ve from .logic import video_extraction as ve
from .models import Teachers, Video, VideoMeta, RegisterUser
from .MongoModels import *
from .serializers import * from .serializers import *
import datetime import datetime
...@@ -240,16 +234,11 @@ class GetLectureActivityViewSet(APIView): ...@@ -240,16 +234,11 @@ class GetLectureActivityViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id) lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureActivitySerializer(lecture_activities, many=True) serializer = LectureActivitySerializer(lecture_activities, many=True)
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
"extracted": extracted
}) })
...@@ -296,60 +285,6 @@ class LectureActivityProcess(APIView): ...@@ -296,60 +285,6 @@ class LectureActivityProcess(APIView):
ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict) ar.save_frame_groupings(video_name, frame_landmarks, frame_group_dict)
class GetLectureActivityDetections(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
frame_name = request.query_params.get('frame_name')
detections = ar.get_detections(video_name, frame_name)
return Response({
"detections": detections
})
# the API class for getting student detections for a label
class GetLectureActvityDetectionsForLabel(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
label = request.query_params.get('label')
labelled_detections, detected_people = ar.get_detections_for_label(video_name, label)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class for getting students activity evaluations
class GetLectureActivityStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = ar.get_student_activity_evaluation(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class to retrieve individual student evaluation (activity)
class GetLectureActivityIndividualStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
student_name = request.query_params.get('student_name')
meta_data = ar.get_individual_student_evaluation(video_name, student_name)
return Response({
"response": meta_data
})
# API to retrieve activity detections for frames # API to retrieve activity detections for frames
class GetLectureActivityRecognitionsForFrames(APIView): class GetLectureActivityRecognitionsForFrames(APIView):
...@@ -488,45 +423,16 @@ class GetLectureEmotionReportViewSet(APIView): ...@@ -488,45 +423,16 @@ class GetLectureEmotionReportViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id) lecture_emotions = LectureEmotionReport.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureEmotionSerializer(lecture_emotions, many=True) serializer = LectureEmotionSerializer(lecture_emotions, many=True)
print(len(serializer.data))
return Response({ return Response({
"response": serializer.data, "response": serializer.data,
}) })
# the API class for getting students activity evaluations (emotions)
class GetLectureEmotionStudentEvaluations(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = ed.get_student_emotion_evaluations(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# the API class to retrieve individual student evaluation (emotion)
class GetLectureEmotionIndividualStudentEvaluation(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
student_name = request.query_params.get('student_name')
meta_data = ed.get_individual_student_evaluation(video_name, student_name)
serialized = VideoMetaSerializer(meta_data)
return Response({
"response": serialized.data
})
# API to retrieve emotion detections for frames # API to retrieve emotion detections for frames
class GetLectureEmotionRecognitionsForFrames(APIView): class GetLectureEmotionRecognitionsForFrames(APIView):
...@@ -559,73 +465,6 @@ class GetLectureEmotionRecognitionsForFrames(APIView): ...@@ -559,73 +465,6 @@ class GetLectureEmotionRecognitionsForFrames(APIView):
}) })
##### POSE #####
class GetLectureVideoForPose(APIView):
def get(self, request):
lecturer = request.query_params.get('lecturer')
date = request.query_params.get('date')
index = int(request.query_params.get('index'))
lecturer_video = LectureVideo.objects.filter(lecturer_id=lecturer, date=date)
serializer = LectureVideoSerializer(lecturer_video, many=True)
return Response({
"response": serializer.data[index]
})
# API to retrieve one lecture activity
class GetLectureVideoExtractedFrames(APIView):
def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
# retrieve the extracted frames
extracted = ar.getExtractedFrames(lecture_video_name)
# lecture_activities = LectureActivity.objects.filter(lecture_video_id__lecture_video_id=lecture_video_id)
# serializer = LectureActivitySerializer(lecture_activities, many=True)
return Response({
# "response": serializer.data,
"extracted": extracted
})
# API to retrieve individual student detections
class GetLectureVideoIndividualStudentFrames(APIView):
def get(self, request):
video_name = request.query_params.get('video_name')
labelled_detections, detected_people = pc.get_pose_estimations(video_name)
return Response({
"response": labelled_detections,
"people": detected_people
})
# API to process pose estimation for an individual student
class ProcessIndividualStudentPoseEstimation(APIView):
authentication_classes = [BasicAuthentication]
permission_classes = [IsAuthenticated, IsAdminUser]
def get(self):
pass
# POST method
def post(self, request):
video_name = request.data['video_name']
student = request.data['student']
poses = request.data['poses']
pc.calculate_pose_estimation_for_student(video_name, student, poses)
return Response({
"response": video_name
})
##### GAZE ESTIMATION SECTION ##### ##### GAZE ESTIMATION SECTION #####
class GetLectureGazeEstimationAvailaibility(APIView): class GetLectureGazeEstimationAvailaibility(APIView):
...@@ -702,8 +541,6 @@ class GetLectureGazeEstimationViewSet(APIView): ...@@ -702,8 +541,6 @@ class GetLectureGazeEstimationViewSet(APIView):
def get(self, request): def get(self, request):
lecture_video_id = request.query_params.get('lecture_video_id') lecture_video_id = request.query_params.get('lecture_video_id')
lecture_video_name = request.query_params.get('lecture_video_name')
lecture_gaze_estimations = LectureGazeEstimation.objects.filter( lecture_gaze_estimations = LectureGazeEstimation.objects.filter(
lecture_video_id__lecture_video_id=lecture_video_id) lecture_video_id__lecture_video_id=lecture_video_id)
serializer = LectureGazeEstimationSerializer(lecture_gaze_estimations, many=True) serializer = LectureGazeEstimationSerializer(lecture_gaze_estimations, many=True)
...@@ -1251,6 +1088,7 @@ class GetLectureGazeSummary(APIView): ...@@ -1251,6 +1088,7 @@ class GetLectureGazeSummary(APIView):
# =====OTHERS===== # =====OTHERS=====
# this API will retrieve the respective lecturer video name
class GetLecturerRecordedVideo(APIView): class GetLecturerRecordedVideo(APIView):
def get(self, request): def get(self, request):
...@@ -1263,10 +1101,12 @@ class GetLecturerRecordedVideo(APIView): ...@@ -1263,10 +1101,12 @@ class GetLecturerRecordedVideo(APIView):
lec_recorded_video_ser = LectureRecordedVideoSerializer(lec_recorded_video, many=True) lec_recorded_video_ser = LectureRecordedVideoSerializer(lec_recorded_video, many=True)
lec_recorded_video_data = lec_recorded_video_ser.data[0] lec_recorded_video_data = lec_recorded_video_ser.data[0]
# extract the lecturer video name
video_name = lec_recorded_video_data['lecture_video_name'] video_name = lec_recorded_video_data['lecture_video_name']
print('lecturer recorded video name: ', video_name) print('lecturer recorded video name: ', video_name)
# return the response
return Response({ return Response({
"video_name": video_name "video_name": video_name
}) })
...@@ -1276,16 +1116,22 @@ class GetLecturerRecordedVideo(APIView): ...@@ -1276,16 +1116,22 @@ class GetLecturerRecordedVideo(APIView):
class GetLectureActivityCorrelations(APIView): class GetLectureActivityCorrelations(APIView):
def get(self, request): def get(self, request):
# this variable defines the number of dates to be considered for activity correlations
option = request.query_params.get('option') option = request.query_params.get('option')
# the lecturer id
lecturer = request.query_params.get('lecturer') lecturer = request.query_params.get('lecturer')
int_option = int(option) int_option = int(option)
# get the current date
current_date = datetime.datetime.now().date() current_date = datetime.datetime.now().date()
option_date = datetime.timedelta(days=int_option) option_date = datetime.timedelta(days=int_option)
# subtract the current date by the time period given
previous_date = current_date - option_date previous_date = current_date - option_date
# this list contains the student activities for each lecture
individual_lec_activities = [] individual_lec_activities = []
# this list will contain the student activity-lecturer posture activity correlations
activity_correlations = [] activity_correlations = []
# retrieving lecture activities # retrieving lecture activities
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
import os
import cv2
import numpy as np
import shutil
from .facial_landmarks import get2DPoints
from .classes import pose
# Read Image
def estimatePose(request):
directory = request['directory']
images = request['images']
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
IMAGE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\images")
SPEC_DIR = os.path.join(IMAGE_DIR, "{}".format(directory))
# new directory will be created to store pose estimations
new_dir_name = "static\\FirstApp\\poses\\{}".format(directory)
new_dir = os.path.join(BASE_DIR, new_dir_name)
face_count_response = 0
pose_response_list = []
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
for im in images:
IMAGE_PATH = os.path.join(SPEC_DIR, "{}".format(im))
image = cv2.imread(IMAGE_PATH)
size = image.shape
left_corner, right_corner, nose_tip, right_mouth, left_mouth, chin, face_center_top, face_center_bottom, face_count = get2DPoints(image)
# if faces are found
if left_corner is not None:
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# print("Camera Matrix :\n {0}".format(camera_matrix))
for i in range (face_count):
text = ''
# 2D image points. If you change the image, you need to change vector
image_points = np.array([
nose_tip[i],
chin[i],
left_corner[i],
right_corner[i],
left_mouth[i],
right_mouth[i]
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector,
camera_matrix, dist_coeffs)
# for p in image_points:
# cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
if (p2[0] < face_center_top[i][0]):
text = 'RIGHT'
else:
text = 'LEFT'
cv2.putText(image, text, p2, cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
cv2.line(image, p1, p2, (255, 0, 0), 2)
# saving the image
new_file = os.path.join(new_dir, im)
cv2.imwrite(new_file, image)
face_count_response += 1
# create a response object for the image
pose_response = {}
pose_response["directory"] = directory
pose_response["image"] = im
pose_response["label"] = text
pose_response_list.append(pose_response)
else:
print('No faces found')
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
# returning the static path
STATIC_POSE = os.path.join(BASE_DIR, "assets\\FirstApp\\pose")
STATIC_SPEC = os.path.join(STATIC_POSE, "{}".format(directory))
# if no images were created
if (face_count_response < 1):
shutil.rmtree(new_dir)
return "No faces were found"
return pose_response_list
\ No newline at end of file
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import cv2
import os
import math
import shutil
from . import custom_sorter as cs
def get_pose_estimations(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# face_classifier = cv2.CascadeClassifier(
# os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
# classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
# classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
# label = emotion_recognition(classifier, face_classifier, image)
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = cs.custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = cs.custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# calculate pose estimations for a student
def calculate_pose_estimation_for_student(video_name, student, poses):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
POSE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\poses")
POSE_VIDEO_DIR = os.path.join(POSE_DIR, video_name)
pose_count = 0
# checking whether the pose directory
if os.path.isdir(POSE_VIDEO_DIR) == False:
# create the pose directory
os.mkdir(POSE_VIDEO_DIR)
# loop through each frame of the directory
for frame in os.listdir(VIDEO_DIR):
FRAME_FOLDER = os.path.join(VIDEO_DIR, frame)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# detection image
detection_img = cv2.imread(DETECTION_PATH)
# checking for the given student
if detection == student:
# select the correct pose detection
pose = poses[pose_count]
# extract the coordinates
x1 = int(pose['keypoints'][5]['position']['x'])
y1 = int(pose['keypoints'][5]['position']['y'])
x2 = int(pose['keypoints'][6]['position']['x'])
y2 = int(pose['keypoints'][6]['position']['y'])
# extract the head positions
x_diff = x1 - x2
y_diff = y1 - y2
x_pow = math.pow(x_diff, 2)
y_pow = math.pow(y_diff, 2)
summation = x_pow + y_pow
distance = int(math.sqrt(summation))
# defining the hyperparameter
param = 0.6
fraction = int(math.floor(distance * param)) if int(math.floor(distance * param)) > 0 else 1
middle_x = x2 + fraction
# middle_y = y2 - 20
middle_y = y2
head_x = middle_x
head_y = 0 if (middle_y - fraction) < 0 else (middle_y - fraction)
left_upper_x = 0 if (middle_x - fraction) < 0 else (middle_x - fraction)
# extract the new image
new_img = detection_img[head_y:head_y+fraction, left_upper_x:left_upper_x+distance]
# new directory name
# new_img_dir = os.path.join(POSE_VIDEO_DIR, frame)
new_img_dir = os.path.join(POSE_VIDEO_DIR, detection)
# check if the directory exists
if os.path.isdir(new_img_dir) == False:
# create the new directory
os.mkdir(new_img_dir)
# create new image name
frame_name = frame + ".png"
new_img_path = os.path.join(new_img_dir, frame_name)
# saving the new image
cv2.imwrite(new_img_path, new_img)
# increment the count
pose_count += 1
print('saving the image')
...@@ -101,26 +101,14 @@ urlpatterns = [ ...@@ -101,26 +101,14 @@ urlpatterns = [
# lecture activity API (to retrieve a lecture activity) # lecture activity API (to retrieve a lecture activity)
url(r'^process-lecture-activity/$', api.LectureActivityProcess.as_view()), url(r'^process-lecture-activity/$', api.LectureActivityProcess.as_view()),
# lecture activity detection API (to retrieve detections for a given lecture activity frame)
url(r'^get-lecture-activity-frame-detection/$', api.GetLectureActivityDetections.as_view()),
# lecture activity detection for label API (to retrieve detections for a certain label)
url(r'^get-lecture-activity-detection-for-label/$', api.GetLectureActvityDetectionsForLabel.as_view()),
# lecture activity detection for label API (to retrieve detections for a certain label)
url(r'^get-lecture-activity-student-evaluation/$', api.GetLectureActivityStudentEvaluation.as_view()),
# lecture activity detection for frames API (to retrieve detections for each frame in lecture video) # lecture activity detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-activity-for-frame/$', api.GetLectureActivityRecognitionsForFrames.as_view()), url(r'^get-lecture-activity-for-frame/$', api.GetLectureActivityRecognitionsForFrames.as_view()),
# lecture activity evaluation for individual students
url(r'^get-lecture-activity-individual-student-evaluation/$',
api.GetLectureActivityIndividualStudentEvaluation.as_view()),
# lecture activity report generation # lecture activity report generation
url(r'^lecture-activity-report-generation/$', url(r'^lecture-activity-report-generation/$',
api.GenerateActivityReport.as_view()), api.GenerateActivityReport.as_view()),
###### EMOTION Section ##### ###### EMOTION Section #####
# getting lecture emotion record availability # getting lecture emotion record availability
url(r'^get-lecture-emotion-availability/$', api.GetLectureEmotionAvailability.as_view()), url(r'^get-lecture-emotion-availability/$', api.GetLectureEmotionAvailability.as_view()),
...@@ -131,30 +119,10 @@ urlpatterns = [ ...@@ -131,30 +119,10 @@ urlpatterns = [
# process a lecture emotion record # process a lecture emotion record
url(r'^process-lecture-emotion/$', api.LectureEmotionProcess.as_view()), url(r'^process-lecture-emotion/$', api.LectureEmotionProcess.as_view()),
# lecture emotion evaluation for students
url(r'^get-lecture-emotion-student-evaluation/$', api.GetLectureEmotionStudentEvaluations.as_view()),
# lecture emotion evaluation for students
url(r'^get-lecture-emotion-individual-student-evaluation/$',
api.GetLectureEmotionIndividualStudentEvaluation.as_view()),
# lecture emotion detection for frames API (to retrieve detections for each frame in lecture video) # lecture emotion detection for frames API (to retrieve detections for each frame in lecture video)
url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()), url(r'^get-lecture-emotion-for-frame/$', api.GetLectureEmotionRecognitionsForFrames.as_view()),
###### POSE Section #####
# lecture video API (for Pose estimation)
url(r'^get-lecture-video-for-pose/$', api.GetLectureVideoForPose.as_view()),
# lecture video extracted frames API (for Pose estimation)
url(r'^get-lecture-video-extracted-frames/$', api.GetLectureVideoExtractedFrames.as_view()),
# lecture video individual student extracted frames API (for Pose estimation)
url(r'^get-lecture-video-individual-student-frames/$', api.GetLectureVideoIndividualStudentFrames.as_view()),
# lecture video individual student process pose estimation API (for Pose estimation)
url(r'^process-lecture-video-individual-pose-estimation', api.ProcessIndividualStudentPoseEstimation.as_view()),
##### GAZE Section ##### ##### GAZE Section #####
# lecture video Gaze estimation # lecture video Gaze estimation
......
...@@ -29,20 +29,8 @@ from django.contrib.auth import ( ...@@ -29,20 +29,8 @@ from django.contrib.auth import (
logout, logout,
) )
from django.contrib.auth.decorators import login_required from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import viewsets
from . models import Teachers, Video, VideoMeta, RegisterUser
from . MongoModels import *
from . serializers import * from . serializers import *
from . emotion_detector import detect_emotion
from . ImageOperations import saveImage
from . logic import head_pose_estimation
from . logic import video_extraction
from . forms import * from . forms import *
import cv2
import os import os
from datetime import datetime from datetime import datetime
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment