Commit f6f50c02 authored by Gunasena D K B's avatar Gunasena D K B

Upload New File

parent 59b94e68
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
path = 'ImagesAttendance'
images = []
classNames = []
myList = os.listdir(path)
# print(myList)
for cls in myList:
curImg = cv2.imread(f'{path}/{cls}')
images.append(curImg)
classNames.append(os.path.splitext(cls)[0])
# print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def unknown_image_encoded(images):
"""
encode a face given the file name
"""
face = face_recognition.load_image_file("faces/" + images)
encoding = face_recognition.face_encodings(face)[0]
return encoding
# def markAttendance(name):
# with open('Attendance.csv', 'r+') as f:
# myDataList = f.readlines()
# print(myDataList)
# nameList = []
# for line in myDataList:
# entry = line.split(',')
# nameList.append(entry[0])
# if name not in nameList:
# now = datetime.now()
# dtString = now.strftime('%H:%M:%S')
# f.writelines(f'\n{name}, {dtString}')
encodeListKnown = findEncodings(images)
print('Encoding Complete')
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
face_locations = face_recognition.face_locations(imgS)
unknown_face_encodings = face_recognition.face_encodings(imgS, face_locations)
for face_encoding in unknown_face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(encodeListKnown, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(encodeListKnown, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = encodeListKnown[best_match_index]
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].upper()
# print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
# markAttendance(name)
cv2.imshow('Webcam', img)
cv2.waitKey(1)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment