Commit 367894f2 authored by DougDeMuro's avatar DougDeMuro

waste detection completed

parent 0f3def2a
#! /usr/bin/python
from gpiozero import MotionSensor
from picamera import PiCamera
from time import sleep
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import cv2
import serial
import RPi.GPIO as GPIO
import string
import pynmea2
import requests
from gpiozero import Servo
TRIG=21
ECHO=20
GPIO.setmode(GPIO.BCM)
from time import sleep
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
servo = Servo(17)
classNames = []
classFile = "/home/pi/Desktop/Project/coco.names"
with open(classFile,"rt") as f:
classNames = f.read().rstrip("\n").split("\n")
configPath = "/home/pi/Desktop/Project/ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
weightsPath = "/home/pi/Desktop/Project/frozen_inference_graph.pb"
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/ 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
def getObjects(img, thres, nms, draw=True, objects=[]):
classIds, confs, bbox = net.detect(img,confThreshold=thres,nmsThreshold=nms)
#print(classIds,bbox)
if len(objects) == 0: objects = classNames
objectInfo =[]
if len(classIds) != 0:
for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
className = classNames[classId - 1]
if className in objects:
objectInfo.append(className)
if (draw):
cv2.rectangle(img,box,color=(0,255,0),thickness=2)
cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
return objectInfo
#Initialize 'currentname' to trigger only when a new person is identified.
currentname = "unknown"
#Determine faces from encodings.pickle file model created from train_model.py
encodingsP = "encodings.pickle"
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(encodingsP, "rb").read())
# initialize the video stream and allow the camera sensor to warm up
# Set the ser to the followng
# src = 0 : for the build in single web cam, could be your laptop webcam
# src = 2 : I had to set it to 2 inorder to use the USB webcam attached to my laptop
#vs = VideoStream(src=2,framerate=10).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# start the FPS counter
fps = FPS().start()
pir = MotionSensor(4)
while True:
port="/dev/ttyAMA0"
ser=serial.Serial(port, baudrate=9600, timeout=0.5)
dataout = pynmea2.NMEAStreamReader()
newdata=ser.readline().decode('cp1252')
#print(newdata)
lat=9.37
lng=80.41
if newdata[0:6] == "$GPRMC":
newmsg=pynmea2.parse(newdata)
#print(newmsg,"vbfgdh")
lat=newmsg.latitude
lng=newmsg.longitude
gps = "Latitude=" + str(lat) + "and Longitude=" + str(lng)
#print(gps)
#print("distance measurement in progress")
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.output(TRIG,False)
#print("waiting for sensor to settle")
time.sleep(0.2)
GPIO.output(TRIG,True)
time.sleep(0.00001)
GPIO.output(TRIG,False)
while GPIO.input(ECHO)==0:
pulse_start=time.time()
while GPIO.input(ECHO)==1:
pulse_end=time.time()
pulse_duration=pulse_end-pulse_start
distance=pulse_duration*17150
distance=round(distance,2)
persentage=round(abs((100-(distance/6.67)*100)),2)
#persentage=0
#print("distance:",distance,"cm")
if GPIO.input(18) == GPIO.HIGH:
gas=1
else:
gas=0
pir.wait_for_motion()
i=0
servo.min()
# loop over frames from the video file stream
while True and i <10:
i+=1
#print(i)
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
frame = imutils.resize(frame, width=500)
objectInfo = getObjects(frame,0.60,0.2)
if len(objectInfo)>0:
objectInfo=objectInfo[0]
else:
objectInfo="NULL object"
#print(objectInfo)
# Detect the fce boxes
boxes = face_recognition.face_locations(frame)
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(frame, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown" #if face is not recognized, then print Unknown
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
#If someone in your dataset is identified, print their name on the screen
if currentname != name:
currentname = name
#print(currentname)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image - color is in BGR
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 225), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
.8, (0, 255, 255), 2)
# display the image to our screen
cv2.imshow("Facial Recognition is Running", frame)
key = cv2.waitKey(1) & 0xFF
print(str(lng),str(lat),"dustbin level:",distance,currentname,objectInfo,gas)
url = 'https://testing-api-laravel.herokuapp.com/api/bbot'
myobj = {
'name': currentname,
'object': objectInfo,
'bin_longitude': str(lng),
'bin_latitude': str(lat),
'bin_level': persentage,
'is_gas': gas,
}
x = requests.post(url, json = myobj)
# quit when 'q' key is pressed
if key == ord("q"):
break
# update the FPS counter
fps.update()
cv2.destroyAllWindows()
vs.stop()
servo.max()
#print("distance:",distance,"cm")
#pir.wait_for_no_motion()
#cv2.destroyAllWindows()
#vs.stop()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment