Commit 2dfede4d authored by SohanDanushka's avatar SohanDanushka

Merge remote-tracking branch 'origin/QA_RELEASE' into db_and_monitoring_IT17097284

parents 16102ea7 dc8d49fd
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import imutils
import cv2,os,urllib.request
import numpy as np
from django.conf import settings
from time import sleep
import random
face_detection_videocam = cv2.CascadeClassifier(os.path.join(
settings.BASE_DIR,'opencv_haarcascade_data/haarcascade_frontalface_default.xml'))
face_detection_webcam = cv2.CascadeClassifier(os.path.join(
settings.BASE_DIR,'opencv_haarcascade_data/haarcascade_frontalface_default.xml'))
# load our serialized face detector model from disk
prototxtPath = os.path.sep.join([settings.BASE_DIR, "face_detector/deploy.prototxt"])
weightsPath = os.path.sep.join([settings.BASE_DIR,"face_detector/res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
maskNet = load_model(os.path.join(settings.BASE_DIR,'face_detector/mask_detector.model'))
class IPWebCam(object):
def __init__(self):
self.url = "http://192.168.8.100:8080/shot.jpg"
self._count = 0
def __del__(self):
cv2.destroyAllWindows()
def get_frame(self):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
imgResp = urllib.request.urlopen(self.url)
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
img= cv2.imdecode(imgNp,-1)
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces_detected = face_detection_webcam.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces_detected:
cv2.rectangle(img, pt1=(x, y), pt2=(x + w, y + h), color=(255, 0, 0), thickness=2)
resize = cv2.resize(img, (640, 480), interpolation = cv2.INTER_LINEAR)
frame_flip = cv2.flip(resize,1)
ret, jpeg = cv2.imencode('.jpg', frame_flip)
# capture frame and save on a given time in order to run the face recognition
sleep(3); cv2.imwrite("%d.jpg" % self._count, img)
self._count =+1
return jpeg.tobytes()
class MaskDetect(object):
def __init__(self):
self.vs = VideoStream(src=0).start()
def __del__(self):
cv2.destroyAllWindows()
def detect_and_predict_mask(self,frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
def get_frame(self):
frame = self.vs.read()
frame = imutils.resize(frame, width=650)
frame = cv2.flip(frame, 1)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = self.detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
import cv2
import os
import numpy as np
#This module contains all common functions that are called in tester.py file
#Given an image below function returns rectangle for face detected alongwith gray scale imagess
def faceDetection(test_img):
gray_img=cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)#convert color image to grayscale
gray_img=cv2.normalize(gray_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
gray_img=(255*gray_img).astype(np.uint8)
gray_img=cv2.fastNlMeansDenoising(gray_img);
face_haar_cascade=cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml')#Load haar classifier
faces=face_haar_cascade.detectMultiScale(gray_img,scaleFactor=1.32,minNeighbors=5)#detectMultiScale returns rectangles
return faces,gray_img
#Given a directory below function returns part of gray_img which is face alongwith its label/ID
def labels_for_training_data(directory):
faces=[]
faceID=[]
for path,subdirnames,filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file")#Skipping files that startwith .
continue
id=os.path.basename(path)#fetching subdirectory names
img_path=os.path.join(path,filename)#fetching image path
print("img_path:",img_path)
print("id:",id)
test_img=cv2.imread(img_path)#loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect,gray_img=faceDetection(test_img)#Calling faceDetection function to return faces detected in particular image
if len(faces_rect)!=1:
continue #Since we are assuming only single person images are being fed to classifier
(x,y,w,h)=faces_rect[0]
roi_gray=gray_img[y:y+w,x:x+h]#cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces,faceID
def train_classifier(faces,faceID):
face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces,np.array(faceID))
return face_recognizer
#Below function draws bounding boxes around detected face in image
def draw_rect(test_img,face):
(x,y,w,h)=face
cv2.rectangle(test_img,(x,y),(x+w,y+h),(255,0,0),thickness=4)
#Below function writes name of person for detected label
def put_text(test_img,text,x,y):
cv2.putText(test_img,text,(x,y),cv2.FONT_HERSHEY_DUPLEX,1,(255,0,0),3)
#Save video frames
def extractAndSaveFrames():
vidcap = cv2.VideoCapture('IT17098960.mp4')
success, image = vidcap.read()
count = 0
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success, image = vidcap.read()
print('Read a new frame: ', success)
count += 1
...@@ -31,18 +31,18 @@ ...@@ -31,18 +31,18 @@
</script> </script>
<script type="text/javascript"> <script>
function toggleLectureLive() {
$(document).ready(function() { var x = document.getElementById("liveStreamLecture");
$('#initiate_btn').click(function() { var y = document.getElementById("liveStreamLectureStartButton");
if (x.style.display === "none") {
fetch('http://127.0.0.1:8000/attendance/process-initiate-lecture') x.style.display = "block";
.then((res) => res.json()) y.style.display = "block";
.then((out) => alert(out.response)) } else {
.catch((err) => alert('error: ' + err)) x.style.display = "none";
}); y.style.display = "none";
}) }
}
</script> </script>
{% endblock %} {% endblock %}
...@@ -55,11 +55,19 @@ ...@@ -55,11 +55,19 @@
<div class="text-center"> <div class="text-center">
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">
<h4 class="card-title">Starting the lecture....</h4> <h4 class="card-title">Lecture Live</h4>
</div> </div>
<div class="card-body"> <div class="card-body">
<button type="button" class="btn btn-success" id="initiate_btn">Initiate Lecture</button> <button type="button" class="btn btn-success" id="initiate_btn" onclick="toggleLectureLive()">Show Live Stream</button>
</div>
<div style="vertical-align: middle; border-style: none; background-color: #055270; height: 500px; width: 100%">
<div class="row justify-content-center">
<img id="liveStreamLecture" style="display: none; height: inherit; margin-bottom: -25px;" src="{% url 'webcam_feed' %}">
</div>
<div class="row justify-content-center">
<button style="display: none; width: 70px; height: 70px;" id="liveStreamLectureStartButton" class="btn btn-warning btn-circle"><i class="fas fa-video"></i></button>
</div>
</div> </div>
</div> </div>
</div> </div>
......
import cv2
from .faceRecognition import faceDetection, train_classifier, labels_for_training_data, draw_rect, put_text
#This module takes images stored in diskand performs face recognition
test_img=cv2.imread('TestImages/frame0.jpg')#test_img path
faces_detected,gray_img=faceDetection(test_img)
print("faces_detected:",faces_detected)
#Comment belows lines when running this program second time.Since it saves training.yml file in directory
faces,faceID=labels_for_training_data('trainingImages')
face_recognizer=train_classifier(faces,faceID)
face_recognizer.write('trainingData.yml')
# For subsequent runs
face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read('trainingData.yml')#use this to load training data for subsequent runs
name={0: "IT17138000", 1: "IT17100908", 2: "IT17098960"}#creating dictionary containing names for each label
file = open("attendance.txt", "w")
for face in faces_detected:
(x,y,w,h)=face
roi_gray=gray_img[y:y+h,x:x+h]
label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image
print("confidence:",confidence)
print("label:",label)
draw_rect(test_img,face)
predicted_name=name[label]
if(confidence>90):#If confidence more than 37 then don't print predicted face text on screen
continue
file.write(predicted_name)
put_text(test_img,predicted_name,x,y)
file.close()
resized_img=cv2.resize(test_img,(700,700))
cv2.imshow("face dtecetion tutorial",resized_img)
cv2.waitKey(0)#Waits indefinitely until a key is pressed
cv2.destroyAllWindows
This diff is collapsed.
...@@ -17,7 +17,7 @@ urlpatterns = [ ...@@ -17,7 +17,7 @@ urlpatterns = [
path('student/', StudentAPIView.as_view()), path('student/', StudentAPIView.as_view()),
path('student/<str:pk>', StudentDetails.as_view()), path('student/<str:pk>', StudentDetails.as_view()),
url(r'^upload/$', FileView.as_view(), name='file-upload'), url(r'^upload/$', FileView.as_view(), name='file-upload'),
path('webcam_feed', views.webcam_feed, name='webcam_feed'),
# this url will initiate the lecture # this url will initiate the lecture
url(r'^process-initiate-lecture/$', InitiateLecture.as_view()) url(r'^process-initiate-lecture/$', InitiateLecture.as_view())
] ]
from django.shortcuts import render from django.shortcuts import render
from django.http.response import StreamingHttpResponse
from AttendanceApp.camera import IPWebCam
def initiate_lecture(request): def initiate_lecture(request):
return render(request, "AttendanceApp/Initiate_lecture.html") return render(request, "AttendanceApp/Initiate_lecture.html")
\ No newline at end of file
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def webcam_feed(request):
return StreamingHttpResponse(gen(IPWebCam()),
content_type='multipart/x-mixed-replace; boundary=frame')
\ No newline at end of file
import cv2
import os
import re
import base64
import shutil
def saveImage(response):
dataUrlPattern = re.compile('data:image/(png|jpeg);base64,(.*)$')
base_path = os.path.join(os.path.abspath(__file__))
root_dir = os.path.dirname(os.path.dirname(base_path))
new_dir_name = "static\\FirstApp\\images\\{}".format(response["imageName"])
new_dir = os.path.join(root_dir, new_dir_name)
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
count = 0
for url in response["ImageURLS"]:
url = dataUrlPattern.match(url).group(2)
encoded = url.encode()
image = base64.b64decode(encoded)
imageName = response["imageName"] + '_img_' + format(count) + '.png'
new_file = os.path.join(new_dir, imageName)
count += 1
# saving the images (method 1)
with open(new_file, "wb") as f:
f.write(image)
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
class PoseResponse:
# directory = ''
# image_name = ''
# label = ''
def __init__(self, directory, image_name, label):
self.directory = directory
self.image_name = image_name
self.labels = label
from imutils import face_utils
import os
import cv2
import dlib
import numpy as np
import imutils
def get2DPoints(image):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
CLASSIFIER_DIR = os.path.join(BASE_DIR, "FirstApp\\classifiers")
detector_path = os.path.join(CLASSIFIER_DIR, "shape_predictor_68_face_landmarks.dat")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(detector_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
left_corner_arr = None
right_corner_arr = None
nose_tip_arr = None
right_mouth_arr = None
left_mouth_arr = None
chin_arr = None
face_center_arr = None
face_center_top_arr = None
face_center_bottom_arr = None
count = 0
print('no of faces: ', len(rects))
if (len(rects)):
left_corner_arr = np.zeros((len(rects), 2))
right_corner_arr = np.zeros((len(rects), 2))
nose_tip_arr = np.zeros((len(rects), 2))
right_mouth_arr = np.zeros((len(rects), 2))
left_mouth_arr = np.zeros((len(rects), 2))
chin_arr = np.zeros((len(rects), 2))
face_center_top_arr = np.zeros((len(rects), 2))
face_center_bottom_arr = np.zeros((len(rects), 2))
for (i, rect) in enumerate(rects):
left_corner = None
right_corner = None
nose_tip = None
right_mouth = None
left_mouth = None
chin = None
(fx, fy, fw, fh) = face_utils.rect_to_bb(rect)
cv2.rectangle(image, (fx, fy), (fx+fw, fy+fh), (0, 255, 0), 2)
face_center_top = [int(fx + fw/2), int(fy)]
face_center_bottom = [int(fx + fw/2), int(fy + fh)]
cv2.line(image, (int(fx + fw/2), int(fy)), (int(fx + fw/2), int(fy + fh)), (0, 255, 0), 2)
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# looping through each facial landmark category
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# clone the original image so we can draw on it, then
# display the name of the face part on the image
clone = image
# loop over the subset of facial landmarks, drawing the
# specific face part
for (x, y) in shape[i:j]:
if (name == 'left_eye'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
left_corner = np.amax(shape[i:j], axis=0)
elif (name == 'right_eye'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_corner = np.amin(shape[i:j], axis=0)
elif (name == 'jaw'):
minArr = np.array(shape[i:j][8], dtype=int)
chin = np.array(shape[i:j][8], dtype=int)
# cv2.putText(clone, "Chin", (int(minArr[0]), int(minArr[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.circle(image, (int(minArr[0]), int(minArr[1])), 3, (255, 0, 255), -1)
# cv2.circle(clone, (int(minArr[0]), int(minArr[1])), 3, (0, 255, 255), -1)
elif (name == 'nose'):
# nose_tip = np.array(shape[i:j][3], dtype=int)
nose_tip = np.array(shape[i:j][3], dtype=int)
# cv2.putText(clone, "Nose tip", (int(nose_tip[0]), int(nose_tip[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
# 2)
# cv2.circle(clone, (int(nose_tip[0]), int(nose_tip[1])), 3, (255, 0, 255), -1)
elif (name == 'inner_mouth'):
# maxArr = np.amax(shape[i:j], axis=0)
# minArr = np.amin(shape[i:j], axis=0)
right_mouth = np.amin(shape[i:j], axis=0)
left_mouth = np.amax(shape[i:j], axis=0)
# cv2.circle(clone, (maxArr[0], maxArr[1]), 3, (127, 0, 255), -1)
# cv2.circle(clone, (minArr[0], minArr[1]), 3, (127, 0, 255), -1)
# else:
# cv2.circle(image, (x, y), 3, (255, 0, 255), -1)
left_corner_arr[count] = left_corner
right_corner_arr[count] = right_corner
nose_tip_arr[count] = nose_tip
right_mouth_arr[count] = right_mouth
left_mouth_arr[count] = left_mouth
chin_arr[count] = chin
face_center_top_arr[count] = face_center_top
face_center_bottom_arr[count] = face_center_bottom
count += 1
return left_corner_arr, right_corner_arr, nose_tip_arr, right_mouth_arr, left_mouth_arr, chin_arr, face_center_top_arr, face_center_bottom_arr, count
# extract the ROI of the face region as a separate image
# (x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
# roi = image[y:y + h, x:x + w]
# roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
#
# # show the particular face part
# cv2.imshow("ROI", roi)
# cv2.imshow("Image", clone)
# cv2.waitKey(0)
#
# # visualize all facial landmarks with a transparent overlay
# output = face_utils.visualize_facial_landmarks(image, shape)
# cv2.imshow("Image", output)
# cv2.waitKey(0)
\ No newline at end of file
This diff is collapsed.
import os
import cv2
import numpy as np
import shutil
from .facial_landmarks import get2DPoints
from .classes import pose
# Read Image
def estimatePose(request):
directory = request['directory']
images = request['images']
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
IMAGE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\images")
SPEC_DIR = os.path.join(IMAGE_DIR, "{}".format(directory))
# new directory will be created to store pose estimations
new_dir_name = "static\\FirstApp\\poses\\{}".format(directory)
new_dir = os.path.join(BASE_DIR, new_dir_name)
face_count_response = 0
pose_response_list = []
if (os.path.isdir(new_dir)):
# delete the previous directory
shutil.rmtree(new_dir)
# create the new directory
os.mkdir(new_dir)
for im in images:
IMAGE_PATH = os.path.join(SPEC_DIR, "{}".format(im))
image = cv2.imread(IMAGE_PATH)
size = image.shape
left_corner, right_corner, nose_tip, right_mouth, left_mouth, chin, face_center_top, face_center_bottom, face_count = get2DPoints(image)
# if faces are found
if left_corner is not None:
# 3D model points.
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
# print("Camera Matrix :\n {0}".format(camera_matrix))
for i in range (face_count):
text = ''
# 2D image points. If you change the image, you need to change vector
image_points = np.array([
nose_tip[i],
chin[i],
left_corner[i],
right_corner[i],
left_mouth[i],
right_mouth[i]
], dtype="double")
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE)
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
(nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector,
camera_matrix, dist_coeffs)
# for p in image_points:
# cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
if (p2[0] < face_center_top[i][0]):
text = 'RIGHT'
else:
text = 'LEFT'
cv2.putText(image, text, p2, cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 2)
cv2.line(image, p1, p2, (255, 0, 0), 2)
# saving the image
new_file = os.path.join(new_dir, im)
cv2.imwrite(new_file, image)
face_count_response += 1
# create a response object for the image
pose_response = {}
pose_response["directory"] = directory
pose_response["image"] = im
pose_response["label"] = text
pose_response_list.append(pose_response)
else:
print('No faces found')
# respond 'yes' to the command line prompt
p = os.popen('python manage.py collectstatic', "w")
p.write("yes")
# returning the static path
STATIC_POSE = os.path.join(BASE_DIR, "assets\\FirstApp\\pose")
STATIC_SPEC = os.path.join(STATIC_POSE, "{}".format(directory))
# if no images were created
if (face_count_response < 1):
shutil.rmtree(new_dir)
return "No faces were found"
return pose_response_list
\ No newline at end of file
import tensorflow as tf
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
import cv2
import os
import math
import shutil
from . import custom_sorter as cs
def get_pose_estimations(video_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# face_classifier = cv2.CascadeClassifier(
# os.path.join(BASE_DIR, 'FirstApp\classifiers\haarcascade_frontalface_default.xml'))
# classifier_path = os.path.join(BASE_DIR, 'FirstApp\classifiers\Emotion_little_vgg.h5')
# classifier = load_model(classifier_path)
EXTRACTED_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
class_labels = ['Angry', 'Happy', 'Neutral', 'Sad', 'Surprise']
detections = []
frames = []
for frame_folder in os.listdir(EXTRACTED_DIR):
FRAME_DIR = os.path.join(EXTRACTED_DIR, frame_folder)
frame_details = {}
frame_details['frame'] = frame_folder
# for each detection in the frame directory
detected_images = []
for detection in os.listdir(FRAME_DIR):
if "frame" not in detection:
DETECTION_PATH = os.path.join(FRAME_DIR, detection)
image = cv2.imread(DETECTION_PATH)
# label = emotion_recognition(classifier, face_classifier, image)
detected_images.append(detection)
detections.append(detection)
frame_details['detections'] = detected_images
frames.append(frame_details)
sorted_frames = cs.custom_object_sorter(frames)
set_detections = set(detections)
list_set_detections = list(set_detections)
sorted_list_set_detections = cs.custom_sort(list_set_detections)
return sorted_frames, sorted_list_set_detections
# calculate pose estimations for a student
def calculate_pose_estimation_for_student(video_name, student, poses):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VIDEO_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\activity\\{}".format(video_name))
POSE_DIR = os.path.join(BASE_DIR, "assets\\FirstApp\\poses")
POSE_VIDEO_DIR = os.path.join(POSE_DIR, video_name)
pose_count = 0
# checking whether the pose directory
if os.path.isdir(POSE_VIDEO_DIR) == False:
# create the pose directory
os.mkdir(POSE_VIDEO_DIR)
# loop through each frame of the directory
for frame in os.listdir(VIDEO_DIR):
FRAME_FOLDER = os.path.join(VIDEO_DIR, frame)
for detection in os.listdir(FRAME_FOLDER):
DETECTION_PATH = os.path.join(FRAME_FOLDER, detection)
# detection image
detection_img = cv2.imread(DETECTION_PATH)
# checking for the given student
if detection == student:
# select the correct pose detection
pose = poses[pose_count]
# extract the coordinates
x1 = int(pose['keypoints'][5]['position']['x'])
y1 = int(pose['keypoints'][5]['position']['y'])
x2 = int(pose['keypoints'][6]['position']['x'])
y2 = int(pose['keypoints'][6]['position']['y'])
# extract the head positions
x_diff = x1 - x2
y_diff = y1 - y2
x_pow = math.pow(x_diff, 2)
y_pow = math.pow(y_diff, 2)
summation = x_pow + y_pow
distance = int(math.sqrt(summation))
# defining the hyperparameter
param = 0.6
fraction = int(math.floor(distance * param)) if int(math.floor(distance * param)) > 0 else 1
middle_x = x2 + fraction
# middle_y = y2 - 20
middle_y = y2
head_x = middle_x
head_y = 0 if (middle_y - fraction) < 0 else (middle_y - fraction)
left_upper_x = 0 if (middle_x - fraction) < 0 else (middle_x - fraction)
# extract the new image
new_img = detection_img[head_y:head_y+fraction, left_upper_x:left_upper_x+distance]
# new directory name
# new_img_dir = os.path.join(POSE_VIDEO_DIR, frame)
new_img_dir = os.path.join(POSE_VIDEO_DIR, detection)
# check if the directory exists
if os.path.isdir(new_img_dir) == False:
# create the new directory
os.mkdir(new_img_dir)
# create new image name
frame_name = frame + ".png"
new_img_path = os.path.join(new_img_dir, frame_name)
# saving the new image
cv2.imwrite(new_img_path, new_img)
# increment the count
pose_count += 1
print('saving the image')
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
return au_corr[0:n]
from rest_framework import serializers """
from .models import Teachers, RegisterUser
from .MongoModels import *
from .logic import classes
from . models import VideoMeta
class TeachersSerializer(serializers.ModelSerializer): This file is responsible for implementing the serializer classes for each model classes
class Meta: each serializer class extended by the djangorestframework's serializers class
model = Teachers
fields = ('firstName', 'lastName')
# fields = __all__
class RegisterUserSerializer(serializers.ModelSerializer): there should be an inner class named "Meta" that needs to implemented inside each serializer class
class Meta: there are two fields inside "Meta" class, as follows.
model = RegisterUser model: the relevant model class that needs to be serialized
fields = ('firstName', 'lastName', 'email', 'password') fields: fields that need to be displayed when serializing the model class
('__all__' indicates that all the fields are required to be displayed)
"""
# image serializer
class ImageSerializer(serializers.Serializer):
metaData = serializers.CharField()
# image serializer
class PoseSerializer(serializers.Serializer):
directory = serializers.CharField() from rest_framework import serializers
image_name = serializers.CharField() from .MongoModels import *
text = serializers.CharField() from . models import VideoMeta
# lecture serializer # lecture serializer
......
This diff is collapsed.
This diff is collapsed.
<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" href="style.css">
<title>{% block title %}My amazing site{% endblock %}</title>
</head>
<body>
<div id="sidebar">
{% block sidebar %}
<ul>
<li><a href="/">Home</a></li>
<li><a href="/blog/">Blog</a></li>
</ul>
{% endblock %}
</div>
<div id="content">
{% block content %}{% endblock %}
</div>
</body>
</html>
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
{% extends "FirstApp/base.html" %}
{% block title %}My amazing blog{% endblock %}
{% block content %}
{% for entry in blog_entries %}
<h2>{{ entry.title }}</h2>
<p>{{ entry.body }}</p>
{% endfor %}
{% endblock %}
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="">
<meta name="author" content="">
<title>SB Admin 2 - Forgot Password</title>
<!-- Custom fonts for this template-->
<link href="vendor/fontawesome-free/css/all.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i" rel="stylesheet">
<!-- Custom styles for this template-->
<link href="css/sb-admin-2.min.css" rel="stylesheet">
</head>
<body class="bg-gradient-primary">
<div class="container">
<!-- Outer Row -->
<div class="row justify-content-center">
<div class="col-xl-10 col-lg-12 col-md-9">
<div class="card o-hidden border-0 shadow-lg my-5">
<div class="card-body p-0">
<!-- Nested Row within Card Body -->
<div class="row">
<div class="col-lg-6 d-none d-lg-block bg-password-image"></div>
<div class="col-lg-6">
<div class="p-5">
<div class="text-center">
<h1 class="h4 text-gray-900 mb-2">Forgot Your Password?</h1>
<p class="mb-4">We get it, stuff happens. Just enter your email address below and we'll send you a link to reset your password!</p>
</div>
<form class="user">
<div class="form-group">
<input type="email" class="form-control form-control-user" id="exampleInputEmail" aria-describedby="emailHelp" placeholder="Enter Email Address...">
</div>
<a href="login.html" class="btn btn-primary btn-user btn-block">
Reset Password
</a>
</form>
<hr>
<div class="text-center">
<a class="small" href="register.html">Create an Account!</a>
</div>
<div class="text-center">
<a class="small" href="login.html">Already have an account? Login!</a>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- Bootstrap core JavaScript-->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Core plugin JavaScript-->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<!-- Custom scripts for all pages-->
<script src="js/sb-admin-2.min.js"></script>
</body>
</html>
This diff is collapsed.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<p>This is my username{{ username }}</p>
</body>
</html>
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
import spacy
from spacy.lang.pt.stop_words import STOP_WORDS from spacy.lang.pt.stop_words import STOP_WORDS
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import CountVectorizer
import pt_core_news_sm import pt_core_news_sm
# Reading the file
nlp = pt_core_news_sm.load() nlp = pt_core_news_sm.load()
with open("audioToText01.txt", "r", encoding="utf-8") as f: with open("audioToText01.txt", "r", encoding="utf-8") as f:
text = " ".join(f.readlines()) text = " ".join(f.readlines())
doc = nlp(text) doc = nlp(text)
#calculating the word frequency
corpus = [sent.text.lower() for sent in doc.sents ] corpus = [sent.text.lower() for sent in doc.sents ]
cv = CountVectorizer(stop_words=list(STOP_WORDS)) cv = CountVectorizer(stop_words=list(STOP_WORDS))
cv_fit=cv.fit_transform(corpus) cv_fit=cv.fit_transform(corpus)
...@@ -18,6 +18,7 @@ word_list = cv.get_feature_names() ...@@ -18,6 +18,7 @@ word_list = cv.get_feature_names()
count_list = cv_fit.toarray().sum(axis=0) count_list = cv_fit.toarray().sum(axis=0)
word_frequency = dict(zip(word_list,count_list)) word_frequency = dict(zip(word_list,count_list))
val=sorted(word_frequency.values()) val=sorted(word_frequency.values())
higher_word_frequencies = [word for word,freq in word_frequency.items() if freq in val[-3:]] higher_word_frequencies = [word for word,freq in word_frequency.items() if freq in val[-3:]]
print("\nWords with higher frequencies: ", higher_word_frequencies) print("\nWords with higher frequencies: ", higher_word_frequencies)
...@@ -26,6 +27,7 @@ higher_frequency = val[-1] ...@@ -26,6 +27,7 @@ higher_frequency = val[-1]
for word in word_frequency.keys(): for word in word_frequency.keys():
word_frequency[word] = (word_frequency[word]/higher_frequency) word_frequency[word] = (word_frequency[word]/higher_frequency)
#calculating sentence rank and taking top ranked sentences for the summary
sentence_rank={} sentence_rank={}
for sent in doc.sents: for sent in doc.sents:
for word in sent : for word in sent :
......
What is OOP OBJECT ORIENTED PROGRAMMING is a programming concept that works on the principles of abstraction, encapsulation, inheritance, and polymorphism. The basic concept of OOPs is to create objects, re-use them throughout the program, and manipulate these objects to get results. Object Oriented Programming popularly known as OOP, is used in a modern programming language like Java OBJECT ORIENTED PROGRAMMING is a programming concept that works on the principles of abstraction, encapsulation, inheritance and polymorphism the basic concept of OOPs is to create objects re-use them in the program and manipulate these objects to get results Object Oriented Programming popularly known as OOP, is used in programming language like Java. Core OOPS concepts are as follow. While the methods may be performed with these cars are driving, reverse, braking etc. next is Object An object can be defined as an instance of a class, and there can be multiple instances of a class in a program.
\ No newline at end of file
...@@ -9,7 +9,7 @@ from LectureSummarizingApp.serializer import LectureAudioSerializer, LectureAudi ...@@ -9,7 +9,7 @@ from LectureSummarizingApp.serializer import LectureAudioSerializer, LectureAudi
from . import speech_to_text as stt from . import speech_to_text as stt
# this API will retrieve lecture audio details # APIs used in Lecture Summarizing Component
class LectureAudioAPI(APIView): class LectureAudioAPI(APIView):
def get(self, request): def get(self, request):
......
# from django.db import models
from djongo import models from djongo import models
# Create your models here. # Models used in Lecture Summarization Component
from FirstApp.MongoModels import Lecturer, Subject from FirstApp.MongoModels import Lecturer, Subject
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render from django.shortcuts import get_object_or_404, render
from rest_framework.views import APIView from rest_framework.views import APIView
from rest_framework.response import Response from rest_framework.response import Response
...@@ -9,7 +7,7 @@ from .serializer import LectureAudioSerializer, LectureAudioNoiseRemovedSerializ ...@@ -9,7 +7,7 @@ from .serializer import LectureAudioSerializer, LectureAudioNoiseRemovedSerializ
LectureSpeechToTextSerializer, LectureNoticesSerializer LectureSpeechToTextSerializer, LectureNoticesSerializer
# Create your views here. # Views used in Lecture Summarization
def summarization(request): def summarization(request):
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment