Commit f5f3f112 authored by IT19110530-Pramodini A.A.D.A's avatar IT19110530-Pramodini A.A.D.A

Merge branch 'feature/gaze-following' into 'master'

Feature/gaze following

See merge request !2
parents f1bfc10c 41670218
This source diff could not be displayed because it is too large. You can view the blob instead.
import cv2
import numpy as np
import argparse
import time
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--webcam', help="True/False", default=False)
parser.add_argument('--play_video', help="Tue/False", default=False)
parser.add_argument('--image', help="Tue/False", default=False)
parser.add_argument('--video_path', help="Path of video file", default="Videos\DataCollection\ASD2.mp4")
parser.add_argument('--image_path', help="Path of image to detect objects", default="Images\_000050.jpg")
parser.add_argument('--verbose', help="To print statements", default=True)
args = parser.parse_args()
data = {'frame_number':[],'x_center':[], 'y_center':[]} #sr
#Load yolo
def load_yolo():
net = cv2.dnn.readNet("yolov3_custom_last_eyediapBall.weights", "yolov3_custom_eyediapball.cfg")
classes = []
with open("obj.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
output_layers = [layer_name for layer_name in net.getUnconnectedOutLayersNames()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
return net, classes, colors, output_layers
def load_image(img_path):
# image loading
img = cv2.imread(img_path)
img = cv2.resize(img, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
return img, height, width, channels
def start_webcam():
cap = cv2.VideoCapture(0)
return cap
def display_blob(blob):
'''
Three images each for RED, GREEN, BLUE channel
'''
for b in blob:
for n, imgb in enumerate(b):
cv2.imshow(str(n), imgb)
def detect_objects(img, net, outputLayers):
blob = cv2.dnn.blobFromImage(img, scalefactor=0.00392, size=(320, 320), mean=(0, 0, 0), swapRB=True, crop=False)
net.setInput(blob)
outputs = net.forward(outputLayers)
return blob, outputs
def get_box_dimensions(outputs, height, width):
boxes = []
confs = []
class_ids = []
for output in outputs:
for detect in output:
scores = detect[5:]
class_id = np.argmax(scores)
conf = scores[class_id]
if conf > 0.3:
center_x = int(detect[0] * width)
center_y = int(detect[1] * height)
w = int(detect[2] * width)
h = int(detect[3] * height)
x = int(center_x - w/2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confs.append(float(conf))
class_ids.append(class_id)
# print(center_x)
# print(center_y)
return boxes, confs, class_ids
def draw_labels(boxes, confs, colors, class_ids, classes, img):
indexes = cv2.dnn.NMSBoxes(boxes, confs, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
# print(len(boxes))
temp_image = 0
print('Boxes:',boxes)
print('Indexes:', indexes)
print('Classes:', classes, 'class_ids: ', class_ids)
if (len(boxes)== 0) or (len(indexes)== 0):
data['x_center'].append(np.nan)#sr
data['y_center'].append(np.nan)#sr
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
# print(colors)
color = colors[0]
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
cv2.putText(img, label, (x, y - 5), font, 1, color, 1)
data['x_center'].append(x+(w/2))#sr
data['y_center'].append(y+(h/2))#sr
print(len(boxes))
temp_image = img
return temp_image
# print('x+w/2', (x+(w/2)))
# print('y+h/2', (y+(h/2)))
# cv2.imshow("Image", img)
def image_detect(img_path):
model, classes, colors, output_layers = load_yolo()
image, height, width, channels = load_image(img_path)
blob, outputs = detect_objects(image, model, output_layers)
boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
draw_labels(boxes, confs, colors, class_ids, classes, image)
while True:
key = cv2.waitKey(1)
if key == 27:
break
def webcam_detect():
model, classes, colors, output_layers = load_yolo()
cap = start_webcam()
while True:
_, frame = cap.read()
height, width, channels = frame.shape
blob, outputs = detect_objects(frame, model, output_layers)
boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
draw_labels(boxes, confs, colors, class_ids, classes, frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
#resizing arrays which are not same length
# def f(x):
# vals = x[~x.isnull()].values
# vals = np.resize(vals,len(x))
# return vals
def start_video(video_path):
model, classes, colors, output_layers = load_yolo()
cap = cv2.VideoCapture(video_path)
current_frame = 0 #sr
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
# Below VideoWriter object will create a frame of above defined The output is stored in 'filename.avi' file.
result = cv2.VideoWriter('Output Videos\DC\output_ASD2_FullyTrainedModel_er5.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
30, size)
while cap.isOpened():
print(current_frame, len(data['x_center']), len(data['y_center']))
data['frame_number'].append(current_frame)
_, frame = cap.read()
try:
height, width, channels = frame.shape
except AttributeError:
print('NoneType frame reached!')
break
blob, outputs = detect_objects(frame, model, output_layers)
boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
temp_image = draw_labels(boxes, confs, colors, class_ids, classes, frame)
key = cv2.waitKey(1)
if key == 27:
break
current_frame += 1#sr
result.write(temp_image)
# df_results = pd.DataFrame(data=data)
df_results = pd.DataFrame.from_dict(data=data, orient='index')
df_results = df_results.transpose()
print(df_results)
# df_results.to_csv('csv\_530COD_output_1_A_FT_S.csv', index=False,header=True, encoding='utf-8')
df_results.to_csv('csv\DC\Output_ASD2_FullyTrainedModel_er5.csv', index=False)
cap.release()
if __name__ == '__main__':
webcam = args.webcam
video_play = args.play_video
image = args.image
if webcam:
if args.verbose:
print('---- Starting Web Cam object detection ----')
webcam_detect()
if video_play:
video_path = args.video_path
if args.verbose:
print('Opening '+video_path+" .... ")
start_video(video_path)
if image:
image_path = args.image_path
if args.verbose:
print("Opening "+image_path+" .... ")
image_detect(image_path)
cv2.destroyAllWindows()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment