Commit cfc9b11f authored by Shenthuri Vimaleshwaran's avatar Shenthuri Vimaleshwaran

Merge branch 'revert-37cb5aa6' into 'master'

Revert "Merge branch 'jebarsandias' into 'master'"

See merge request !3
parents 37cb5aa6 34d2d062
TANU,08:26:49
\ No newline at end of file
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
# from PIL import ImageGrab
path = 'C:/Users/jebar/Desktop/attendanemark/attendanemark/studensimages'
images = []
classNames = []
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def markAttendance(name):
with open('Att.csv','r+') as f:
myDataList = f.readlines()
nameList = []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\n{name},{dtString}')
#### FOR CAPTURING SCREEN RATHER THAN WEBCAM
# def captureScreen(bbox=(300,300,690+300,530+300)):
# capScr = np.array(ImageGrab.grab(bbox))
# capScr = cv2.cvtColor(capScr, cv2.COLOR_RGB2BGR)
# return capScr
encodeListKnown = findEncodings(images)
print('Encoding Complete- Press Q or q to CLOSE WEBCAM')
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
# img = captureScreen()
imgS = cv2.resize(img,(0,0),None,0.25,0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
#print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].upper()
print(name)
y1,x2,y2,x1 = faceLoc
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
markAttendance(name)
cv2.imshow('Webcam',img)
b=cv2.waitKey(1)
if b==31 or b==113:
print("End Face Detection")
break
import cv2
import os
import sys
import numpy as np
from datetime import datetime
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox
from PyQt5.uic import loadUi
class USER(QDialog):
def __init__(self):
super(USER, self).__init__()
loadUi("user_info.ui", self)
def get_name_key(self):
name = self.name_label.text()
key = int(self.key_label.text())
return name, key
class AUFR(QMainWindow):
def __init__(self):
super(AUFR, self).__init__()
loadUi("mainwindow.ui", self)
self.face_classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
self.eye_classifier = cv2.CascadeClassifier("haarcascade_eye.xml")
self.smile_classifier = cv2.CascadeClassifier("haarcascade_smile.xml")
self.camera_id = 0 # can also be a url of Video
self.dataset_per_subject = 50
self.ret = False
self.trained_model = 0
self.image = cv2.imread("", 1)
self.modified_image = self.image.copy()
self.draw_text("fyh", 40, 30, 1, (255, 255, 255))
self.display()
# Actions
self.generate_dataset_btn.setCheckable(True)
self.train_model_btn.setCheckable(True)
self.recognize_face_btn.setCheckable(True)
# Menu
self.about_menu = self.menu_bar.addAction("About")
self.help_menu = self.menu_bar.addAction("Help")
self.about_menu.triggered.connect(self.about_info)
self.help_menu.triggered.connect(self.help_info)
# Algorithms
self.algo_radio_group.buttonClicked.connect(self.algorithm_radio_changed)
# Recangle
self.face_rect_radio.setChecked(True)
self.eye_rect_radio.setChecked(False)
self.smile_rect_radio.setChecked(False)
# Events
self.generate_dataset_btn.clicked.connect(self.generate)
self.train_model_btn.clicked.connect(self.train)
self.recognize_face_btn.clicked.connect(self.recognize)
self.save_image_btn.clicked.connect(self.save_image)
self.video_recording_btn.clicked.connect(self.save_video)
# Recognizers
self.update_recognizer()
self.assign_algorithms()
def start_timer(self): # start the timeer for execution.
self.capture = cv2.VideoCapture(self.camera_id)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.timer = QtCore.QTimer()
if self.generate_dataset_btn.isChecked():
self.timer.timeout.connect(self.save_dataset)
elif self.recognize_face_btn.isChecked():
self.timer.timeout.connect(self.update_image)
self.timer.start(5)
def stop_timer(self): # stop timer or come out of the loop.
self.timer.stop()
self.ret = False
self.capture.release()
def update_image(self): # update canvas every time according to time set in the timer.
if self.recognize_face_btn.isChecked():
self.ret, self.image = self.capture.read()
self.image = cv2.flip(self.image, 1)
faces = self.get_faces()
self.draw_rectangle(faces)
if self.video_recording_btn.isChecked():
self.recording()
self.display()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment