Commit 6e650bf8 authored by Wijesundara W.M.S.G's avatar Wijesundara W.M.S.G

Merge branch 'Kasun' into 'master'

Kasun

See merge request !3
parents f9a6637f 0ec547bd
import cv2
import numpy as np
import face_recognition
imgElon = face_recognition.load_image_file('ImagesBasic/Me Test.JPG')
imgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)
imgTest = face_recognition.load_image_file('ImagesBasic/Hesith.jpg')
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)
faceLoc = face_recognition.face_locations(imgElon)[0]
encodeElon = face_recognition.face_encodings(imgElon)[0]
cv2.rectangle(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]), (255, 0, 255), 2)
faceLocTest = face_recognition.face_locations(imgTest)[0]
encodeTest = face_recognition.face_encodings(imgTest)[0]
cv2.rectangle(imgTest, (faceLocTest[3], faceLocTest[0]), (faceLocTest[1], faceLocTest[2]), (255, 0, 255), 2)
results = face_recognition.compare_faces([encodeElon], encodeTest)
faceDis = face_recognition.face_distance([encodeElon], encodeTest)
print(results, faceDis)
cv2.putText(imgTest, f'{results} {round(faceDis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('Original', imgElon)
cv2.imshow('Test', imgTest)
cv2.waitKey(0)
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
path = 'ImagesAttendance'
images = []
classNames = []
myList = os.listdir(path)
# print(myList)
for cls in myList:
curImg = cv2.imread(f'{path}/{cls}')
images.append(curImg)
classNames.append(os.path.splitext(cls)[0])
# print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def unknown_image_encoded(images):
"""
encode a face given the file name
"""
face = face_recognition.load_image_file("faces/" + images)
encoding = face_recognition.face_encodings(face)[0]
return encoding
# def markAttendance(name):
# with open('Attendance.csv', 'r+') as f:
# myDataList = f.readlines()
# print(myDataList)
# nameList = []
# for line in myDataList:
# entry = line.split(',')
# nameList.append(entry[0])
# if name not in nameList:
# now = datetime.now()
# dtString = now.strftime('%H:%M:%S')
# f.writelines(f'\n{name}, {dtString}')
encodeListKnown = findEncodings(images)
print('Encoding Complete')
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
face_locations = face_recognition.face_locations(imgS)
unknown_face_encodings = face_recognition.face_encodings(imgS, face_locations)
for face_encoding in unknown_face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(encodeListKnown, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(encodeListKnown, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = encodeListKnown[best_match_index]
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].upper()
# print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
# markAttendance(name)
cv2.imshow('Webcam', img)
cv2.waitKey(1)
\ No newline at end of file
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from time import sleep
print("Project Start")
def get_encoded_faces():
"""
looks through the faces folder and encodes all
the faces
:return: dict of (name, image encoded)
"""
encoded = {}
for dirpath, dnames, fnames in os.walk("./faces"):
for f in fnames:
if f.endswith(".jpg") or f.endswith(".png") or f.endswith(".jpeg"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded
def unknown_image_encoded(img):
"""
encode a face given the file name
"""
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classify_face(im):
"""
will find all of the faces in a given image and label
them if it knows what they are
:param im: str of file path
:return: list of face names
"""
faces = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
img = cv2.imread(im, 1)
#img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
#img = img[:,:,::-1]
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Draw a box around the face
# cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
cv2.rectangle(img, (left - 20, top - 20), (right + 20, bottom + 20), (0, 225, 0), 2)
# Draw a label with a name below the face
# cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
cv2.rectangle(img, (left - 20, bottom - 20), (right + 20, bottom + 20), (0, 225, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
cv2.putText(img, name, (left - 20, bottom + 8), font, 0.8, (255, 255, 255), 2)
# Display the resulting image
while True:
cv2.imshow('Video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
return face_names
# print(classify_face("me.jpeg"))
print(classify_face("./Images/test3.jpg"))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment