Commit d627e898 authored by IT19110530-Pramodini A.A.D.A's avatar IT19110530-Pramodini A.A.D.A

Merge branch 'feature/gaze-following' into 'master'

Feature/gaze following

See merge request !1
parents 4540c4e6 6d4649d5
This diff is collapsed.
This diff is collapsed.
<mxfile host="app.diagrams.net" modified="2022-05-24T05:42:08.306Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.67 Safari/537.36" etag="mJFzNh-2MzGtQxGA6ogV" version="17.5.0" type="device"><diagram id="q3JA0qPTVl4SIV4RplZb" name="Page-1">7Vpbb6M4FP41eczKYG55bNN0ZqWO1J2u1PbRYAe84+CsMbnsr18bDIRAOokakp1oHyLw8ZXzne9crIzgdLH5ItAy+cYxYSMb4M0IPoxs2wIwUA8t2RqJ51ulJBYUG1kjeKH/kGqqkeYUk6w1UHLOJF22hRFPUxLJlgwJwdftYXPO2rsuUUw6gpcIsa70lWKZlNLABY38K6FxUu1sAdOzQNVgI8gShPl6RwRnIzgVnMvybbGZEqa1V+mlnPd4oLc+mCCpPGaC65JZNHnL2WsYfg9W769/vPljp1xlhVhuPtgcVm4rDRCsFGKaXMiExzxFbNZI7wXPU0z0NkC1mjFPnC+V0FLCv4iUW4MuyiVXokQumOmd81SaTstR7fIMeuOD32pEGc9FRD74wMpmkIiJ/GAcrBFRtkz4gkixVfMEYUjSVfscyNhUXI9r1K5ejOZPQMG/htKVKsX2zcwvGu+68ZtbNR82u50PW9P6D4DlXhOs4H+wTgLLuyZYVo9/85g0mmnB6P2d86pjnBU6u1MDLGe5aTrVW6yfs0xS5eGJGpAQhLUKeaZbeUbTWD2h/po0Zip2mQ3V+cs9yxU6VtTYiAZ8nVBJXpaogGCtguuex6SMTTnjopgLQ0Ag8ZQ8k4L/IDs9gAQgCOr9VkRIsvkY+y5W1YQqlJlgbjumvW4iozUxsmQnKnpgIHjtg/CGjZ5PxDvow/trCfJzCbJBn/L0ILi1ODwIuAJCtlFFjMapeo8UIEQBeK/hoioZuTMdC4px6VCIOjEKi6U0z5ecprLQrns/ch/0WsqHZIb5HcNIeUr2rMiIzmImbstMxhB07cQGPXZiD2UncCA38I7Wx9L7J4Rus3+P3hEmYRD20Rt6cALxeXCDkz1698BWu4Bd2JyhYHMHgu2Zyii5YeDcawPnDQTcd673vlXc+vzkZXGz+vKlm6oHJ0dmrRboh+4yaeukA8PvKq3nOI90+vGIIor0Gk8oxQskfmSfTCb31L1HBmJhl/h9ZJh4PkTeMMnluM4ar5ZdWrd/OwKOpcNV70eqY+4AMeWpMsc80sHkC0mJKIsxZhhR1mVyoUz9xLiwr+0P48Q8iEgU9VEjDFzHBQNRw4JXp0a38qLpnIuqKlJ1MNEFU12OiSpOqzqF6xMVVpdpoDK6WDI6p6ri+SVKpH4+fgrh+rq7RnjSg7Bz0VSgL4e7LecHj3V+V71vtIYqXp8FWVGeZ/oAirsNaQuCpprMTC9hgzkq8o4sQUtSjC5GREgRQlOv6TpLZn6SBw5RFGDY54Ft6DjumTL1bnJypAP2B6Onf/P0dI+l51XZOdQdxU5Sgw4kNiBEJWGLiCsFSjMdggvRvKoRWFMjXJ6dc4R91J8fOZ4LzhQ999lp9dXRfeycDMbOoGMV34giFy5xommB0Vj9VM3/On56/iwKe3pXBVuAnT69B3YIvTOVbI5/TMnmXzQv7VbOf5JMNiy5e3x6tQEAY0PVX17nvZnimXSums3/E4q+nb95wNm/</diagram></mxfile>
\ No newline at end of file
Object Tracker @ f59d49e9
Subproject commit f59d49e90ff184fc8d2798040dd31f813643757b
#REFERENCE MATERIAL
#https://livecodestream.dev/post/object-tracking-with-opencv/
import cv2
import sys
import csv
import pandas as pd
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if __name__ == '__main__' :
# Set up tracker
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[7]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
elif tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
elif tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
elif tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
elif tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
elif tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
elif tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
elif tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture('EYEDIAP\EYEDIAP\_6_A_FT_M\_6_A_FT_M.mov')
# Exit if video not opened.
if not video.isOpened():
print("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
# Defining arrays to store values
df_frame, df_centerX, df_centerY, df_x1, df_y1, df_x2, df_y2 = [],[],[],[],[],[],[]
frameNo = 0
while True:
# Frame count
frameNo += 1
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[0] + bbox[2])
y2 = int(bbox[1] + bbox[3])
x1y1 = x1,y1
x2y2 = x2,y2
#centroid Calculation
avgX = (p1[0]+p2[0])/2
avgY = (p1[1]+p2[1])/2
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
#Initializing values
avgX = 0
avgY = 0
x1 = 0
y1 = 0
x2 = 0
y2 = 0
df_frame.append(frameNo)
df_centerX.append(avgX)
df_centerY.append(avgY)
df_x1.append(x1)
df_y1.append(y1)
df_x2.append(x2)
df_y2.append(y2)
print("Frame No : " + str(int(frameNo)))
print("FPS : " + str(int(fps)),"----", p1, " , ", p2)
print("X displacement = ", avgX)
print("Y displacement = ", avgY)
# print('x1y1:',x1y1, '.....', 'x2y2:', x2y2)
print("-----------------------------------------------")
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
if cv2.waitKey(1) & 0xFF == ord('q'): # if press SPACE bar
break
# CSV File generation
df_dict = {'Frame_number': df_frame, 'Tracker_center(x)': df_centerX, 'Tracker_center(y)': df_centerY,'Tracker_x1':df_x1, 'Tracker_y1':df_y1, 'Tracker_x2':df_x2, 'Tracker_y2':df_y2}
df = pd.DataFrame(df_dict)
print(df.head)
df.to_csv("csv_files/Eyediap_csv/Tracker_output_6_A_FT_M.csv", index=False)
video.release()
cv2.destroyAllWindows()
\ No newline at end of file
Splitting data in csv to different columns
https://support.microsoft.com/en-us/office/split-text-into-different-columns-with-the-convert-text-to-columns-wizard-30b14928-5550-41f5-97ca-7a3e9c363ed7
1) Select the cell or column that contains the text you want to split.
2) Select Data > Text to Columns.
3) In the Convert Text to Columns Wizard, select Delimited > Next.
4) Select the Delimiters for your data. For example, Comma and Space. You can see a preview of your data in the Data preview window.
5) Select Next.
6) Select the Destination in your worksheet which is where you want the split data to appear.
7) Select Finish.
\ No newline at end of file
#Link to tutorial: https://datatofish.com/convert-text-file-to-csv-using-python-tool-included/
import pandas as pd
read_file = pd.read_csv (r'EYEDIAP\EYEDIAP\2_A_FT_M\ball_tracking.txt')
read_file.to_csv (r'csv_files\Plotting Graphs\2_A_FT_M\ball_tracking.csv', index=None)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Frame_number,x_displacement,y_displacement
1,207.0,335.5
2,212.5,335.5
3,217.5,336.0
4,223.0,335.5
5,228.5,337.0
6,234.5,338.0
7,240.0,338.5
8,245.0,339.5
9,250.5,340.0
10,256.5,341.0
11,263.5,342.0
12,269.5,342.0
13,275.0,342.5
14,280.5,343.0
15,288.0,345.5
16,293.5,346.0
17,298.5,347.0
18,306.0,347.5
19,311.0,349.5
20,318.5,350.0
21,326.0,351.5
22,332.5,352.0
23,339.0,352.5
24,345.5,353.0
25,352.5,353.0
26,359.5,353.0
27,367.0,352.5
28,374.0,353.5
29,381.0,352.5
30,388.5,354.0
31,396.0,355.5
32,404.0,354.5
33,411.0,355.5
34,418.5,357.0
35,425.5,357.0
36,433.0,357.5
37,440.5,359.0
38,448.0,358.5
39,455.5,361.0
40,464.0,361.5
41,471.5,362.0
42,478.5,362.0
43,487.5,362.0
44,494.5,361.0
45,503.5,359.0
46,511.5,356.0
47,519.0,353.5
48,528.5,353.0
49,537.5,352.0
50,546.5,352.0
51,555.5,353.0
52,566.5,352.0
53,575.0,353.5
54,585.0,353.5
55,594.5,351.0
56,604.5,349.0
57,612.5,344.0
58,618.5,336.0
59,626.5,331.5
60,634.5,324.5
61,638.5,315.5
62,639.5,308.5
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
## REFERENCE
# LINK: https://pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
#Original Code
# import the necessary packages
from collections import namedtuple
import numpy as np
import cv2
# define the `Detection` object
Detection = namedtuple("Detection", ["image_path", "gt", "pred"])
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle (The interArea variable represents the numerator in the Intersection over Union calculation)
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth rectangles
# (To compute the denominator we first need to derive the area of both the predicted bounding box and the ground-truth bounding box)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
# define the list of example detections
examples = [
Detection("IoU_Sample_Images/car1_1.png", [64, 83, 223, 125], [77, 83, 203, 124]),
Detection("IoU_Sample_Images/car2.png", [58, 96, 247, 155], [53, 98, 227, 156])]
# Detection("image_0075.jpg", [31, 69, 201, 125], [18, 63, 235, 135]),
# Detection("image_0090.jpg", [50, 72, 197, 121], [54, 72, 198, 120]),
# Detection("image_0120.jpg", [35, 51, 196, 110], [36, 60, 180, 108])]
# loop over the example detections
for detection in examples:
# load the image
image = cv2.imread(detection.image_path)
# draw the ground-truth bounding box along with the predicted
# bounding box
cv2.rectangle(image, tuple(detection.gt[:2]),
tuple(detection.gt[2:]), (0, 255, 0), 2)
cv2.rectangle(image, tuple(detection.pred[:2]),
tuple(detection.pred[2:]), (0, 0, 255), 2)
# compute the intersection over union and display it
iou = bb_intersection_over_union(detection.gt, detection.pred)
cv2.putText(image, "IoU: {:.4f}".format(iou), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
print("{}: {:.4f}".format(detection.image_path, iou))
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
#REFERENCE BOXPLOT: https://www.geeksforgeeks.org/box-plot-in-python-using-matplotlib/
#REFERENCE EXCEL: https://stackabuse.com/reading-and-writing-excel-files-in-python-with-the-pandas-library/
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#immporting csv file
df = pd.read_csv (r'csv\csv_eyediap_n_detector\1_A_FT_M_DnE.csv')
print (df)
detector_x_error = df['eyediap_center_x'] - df['center_x']
detector_y_error = df['eyediap_center_y'] - df['center_y']
print('detector_x_error',detector_x_error, 'detector_y_error', detector_y_error)
#plotting bocplot graph
# Creating dataset
# np.random.seed(10)
# data = np.random.normal(y.x1, 20, 200)
# fig = plt.figure(figsize =(10, 7))
# # Creating plot
# plt.boxplot(data)
# # show plot
# plt.show()
#############################################
# Object detection - YOLO - OpenCV
# Author : Arun Ponnusamy (July 16, 2018)
# Website : http://www.arunponnusamy.com
############################################
import argparse
import csv
import cv2
import numpy as np
import pandas as pd
#Ref for mov to mp4: http://www.legendu.net/en/blog/python-opencv-python/
VIDEO_STREAM = 'Input_Videos\eyediap\A_1_A_FT_M.mov'
VIDEO_STREAM_OUT = 'Processed_Videos\Datacollection\output_1_A_FT_M2.mov'
fourcc = cv2.VideoWriter_fourcc(*"XVID")
cap = cv2.VideoCapture(VIDEO_STREAM)
ret, frame = cap.read()
# frame = cv2.resize(frame, (640,480), interpolation=cv2.INTER_AREA)
writer = cv2.VideoWriter(VIDEO_STREAM_OUT, fourcc, 30, (frame.shape[1],frame.shape[0]), True)
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True,
help = 'path to input image')
ap.add_argument('-c', '--config', required=True,
help = 'path to yolo config file')
ap.add_argument('-w', '--weights', required=True,
help = 'path to yolo pre-trained weights')
ap.add_argument('-cl', '--classes', required=True,
help = 'path to text file containing class names')
args = ap.parse_args()
def get_output_layers(net):
layer_names = net.getLayerNames()
#output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
print('x - ',x, 'y- ', y, 'x+w = ', x_plus_w, 'y+h = ', y_plus_h)
# print('x1y1 = ',x1_y1, 'x2y2 = ', x2_y2)
# label = str(classes[class_id])
label = 'ball'
# color = COLORS[class_id]
color = 255,0,0
img = cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
img = cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
return img
# image = cv2.imread(args.image)
Width = frame.shape[1]
Height = frame.shape[0]
scale = 0.00392
print(frame.shape)
classes = None
frame_count = 0
df_center_x,df_center_y, df_width, df_height, df_frame_num, df_x1, df_y1, df_x2, df_y2 = [],[],[],[],[],[],[],[],[]
net = cv2.dnn.readNet(args.weights, args.config)
# COLORS = np.random.uniform(0, 255, size=(len(classes), 3))
while True :
ret, image = cap.read()
# print(ret)
if ret:
# if frame_count == 10:
# break
image = cv2.resize(image, (640,360), interpolation=cv2.INTER_AREA)
frame_count += 1
print(frame_count)
center_x, center_y, x, y,w,h = 0,0,0,0,0,0
with open(args.classes, 'r') as f:
classes = [line.strip() for line in f.readlines()]
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(get_output_layers(net))
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5 and (class_id == 32 or class_id == 29 or class_id == 47 or class_id == 41 ):
# sports ball, frisbee, apple
# and class_id == 24:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
print(class_id, center_x, center_y)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
df_frame_num.append(frame_count)
df_width.append(w)
df_height.append(h)
df_center_x.append(center_x)
df_center_y.append(center_y)
x1 = int(x)
y1 = int(y)
x2 = int(x+w)
y2 = int(y+h)
x1_y1 = (x1, y1)
x2_y2 = (x2, y2)
df_x1.append(x1)
df_y1.append(y1)
df_x2.append(x2)
df_y2.append(y2)
print(x1, ',', y1, '-----',x2, ',', y2 )
print("Center x:", center_x, " center y:", center_y)
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
# i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
img = draw_prediction(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h))
#cv2.imshow("object detection", img)
cv2.waitKey(10)
writer.write(image)
else :
break
df_dict = {'Frame_number': df_frame_num, 'Detector_center(x)': df_center_x, 'Detector_center(y)': df_center_y,'detector_bb_height': df_height, 'detector_bb_width':df_width, 'Detector_x1':df_x1, 'Detector_y1':df_y1, 'Detector_x2':df_x2, 'Detector_y2':df_y2}
df = pd.DataFrame(df_dict)
print(df.head)
df.to_csv("csv/csv_eyediap/detector_output_1_A_FT_MTest.csv", index=False)
# VIDEO_STREAM.release()
writer.release()
cv2.destroyAllWindows()
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment