Commit 9fd9b207 authored by Fernando P.I.S.P's avatar Fernando P.I.S.P

1st

parent 621187b3
{
"cells": [
{
"cell_type": "markdown",
"id": "e3cee31f",
"metadata": {},
"source": [
"Create Data set"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6811868",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af2467d4",
"metadata": {},
"outputs": [],
"source": [
"face_id = input('enter your id ')\n",
"# Start capturing video\n",
"vid_cam = cv2.VideoCapture(0)\n",
"\n",
"# Detect object in video stream using Haarcascade Frontal Face\n",
"face_detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n",
"\n",
"# Initialize sample face image\n",
"count = 0\n",
"\n",
"\n",
"\n",
"# Start looping\n",
"while (True):\n",
"\n",
" # Capture video frame\n",
" _, image_frame = vid_cam.read()\n",
"\n",
" # Convert frame to grayscale\n",
" gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)\n",
"\n",
" # Detect frames of different sizes, list of faces rectangles\n",
" faces = face_detector.detectMultiScale(gray, 1.3, 5)\n",
"\n",
" # Loops for each faces\n",
" for (x, y, w, h) in faces:\n",
" # Crop the image frame into rectangle\n",
" cv2.rectangle(image_frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n",
"\n",
" # Increment sample face image\n",
" count += 1\n",
"\n",
" # Save the captured image into the datasets folder\n",
" cv2.imwrite(\"data/User.\" + str(face_id) + '.' + str(count) + \".jpg\", gray[y:y + h, x:x + w])\n",
"\n",
" # Display the video frame, with bounded rectangle on the person's face\n",
" cv2.imshow(\"frame\", image_frame)\n",
" \n",
" \n",
"\n",
" # To stop taking video, press 'q' for at least 100ms\n",
" if cv2.waitKey(1)==13 or int(count)==100:\n",
" #if cv2.waitKey(100) & 0xFF == ord('q'):\n",
" break\n",
"\n",
" # If image taken reach 100, stop taking video\n",
" elif count >= 50:\n",
" print(\"Successfully Captured\")\n",
" break\n",
"\n",
"# Stop video\n",
"vid_cam.release()\n",
"\n",
"# Close all started windows\n",
"cv2.destroyAllWindows()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7715ea46",
"metadata": {},
"outputs": [],
"source": [
"##Train Dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "36639b47",
"metadata": {},
"outputs": [],
"source": [
"import os,cv2\n",
"import numpy as np\n",
"from PIL import Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8e9bd387",
"metadata": {},
"outputs": [],
"source": [
"recognizer = cv2.face.LBPHFaceRecognizer_create()\n",
"detector= cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\");\n",
"\n",
"def getImagesAndLabels(path):\n",
" #get the path of all the files in the folder\n",
" imagePaths=[os.path.join(path,f) for f in os.listdir(path)]\n",
" #create empth face list\n",
" faceSamples=[]\n",
" #create empty ID list\n",
" Ids=[]\n",
" #now looping through all the image paths and loading the Ids and the images\n",
" for imagePath in imagePaths:\n",
" #loading the image and converting it to gray scale\n",
" pilImage=Image.open(imagePath).convert('L')\n",
" #Now we are converting the PIL image into numpy array\n",
" imageNp=np.array(pilImage,'uint8')\n",
" #getting the Id from the image\n",
" Id=int(os.path.split(imagePath)[-1].split(\".\")[1])\n",
" # extract the face from the training image sample\n",
" faces=detector.detectMultiScale(imageNp)\n",
" #If a face is there then append that in the list as well as Id of it\n",
" for (x,y,w,h) in faces:\n",
" faceSamples.append(imageNp[y:y+h,x:x+w])\n",
" Ids.append(Id)\n",
" return faceSamples,Ids\n",
"\n",
"faces,Ids = getImagesAndLabels(\"data\")\n",
"s = recognizer.train(faces, np.array(Ids))\n",
"print(\"Successfully trained\")\n",
"recognizer.write(\"trainer/trainer.yml\")\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "61d56dba",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4f63033",
"metadata": {},
"outputs": [],
"source": [
"face_detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n",
"cap = cv2.VideoCapture(0);\n",
"recognizer = cv2.face.LBPHFaceRecognizer_create();\n",
"recognizer.read(\"trainer/trainer.yml\")\n",
"id = 0\n",
"font = cv2.FONT_HERSHEY_SIMPLEX\n",
"while True:\n",
" ret, img = cap.read();\n",
" gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
" faces = face_detector.detectMultiScale(gray, 1.3, 5)\n",
" for (x, y, w, h) in faces:\n",
" roi_gray = gray[y:y + h, x:x + w]\n",
" cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n",
" id, conf = recognizer.predict(roi_gray)\n",
" if(id==1):\n",
" id=\"Harsha\"\n",
" elif(id==2):\n",
" id=\"Ishara\"\n",
" elif(id==2):\n",
" id=\"Saluk\"\n",
" else:\n",
" id = \"Unknown, can not recognize\"\n",
" cv2.putText(img, str(id) + \" \" + str(conf), (x, y - 10), font, 0.55, (120, 255, 120), 1)\n",
" #cv2.cv.PutText(cv2.cv.fromarray(img),str(id),(x,y+h),font,255)\n",
" \n",
" cv2.imshow(\"frame\", img)\n",
" if cv2.waitKey(100) & 0xFF == ord('q'):\n",
" \n",
" break\n",
"vid_cam.release()\n",
"cv2.destroyAllWindows()\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a11261e4",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "f76ddfe4",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment