Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
S
Smart E- Learn Tracer
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
23_22 - J 01
Smart E- Learn Tracer
Commits
278d59e2
Commit
278d59e2
authored
Jan 25, 2023
by
Shenthuri Vimaleshwaran
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Upload New File
parent
9e3e58c6
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
224 additions
and
0 deletions
+224
-0
eyemove_ment/eyetrack.py
eyemove_ment/eyetrack.py
+224
-0
No files found.
eyemove_ment/eyetrack.py
0 → 100644
View file @
278d59e2
# import face_recognition
import
numpy
as
np
import
cv2
as
cv
import
copy
from
matplotlib
import
pyplot
as
plt
import
pyautogui
import
time
import
os
import
cv2
# Load the cascade
face_cascade
=
cv2
.
CascadeClassifier
(
"haarcascade_frontalface_default.xml"
)
eye_cascade
=
cv2
.
CascadeClassifier
(
"haarcascade_eye.xml"
)
# Load the video
cap
=
cv2
.
VideoCapture
(
0
)
# Variables to store the last position of the eyes
eye_pos_x
=
None
eye_pos_y
=
None
while
True
:
# Read the frame
_
,
frame
=
cap
.
read
()
# Convert to grayscale
gray
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
# Detect faces
faces
=
face_cascade
.
detectMultiScale
(
gray
,
scaleFactor
=
1.1
,
minNeighbors
=
5
)
# Loop over the face detections
for
(
x
,
y
,
w
,
h
)
in
faces
:
roi_gray
=
gray
[
y
:
y
+
h
,
x
:
x
+
w
]
roi_color
=
frame
[
y
:
y
+
h
,
x
:
x
+
w
]
# Detect eyes
eyes
=
eye_cascade
.
detectMultiScale
(
roi_gray
)
# Loop over the eye detections
for
(
ex
,
ey
,
ew
,
eh
)
in
eyes
:
# Draw a rectangle around the eyes
cv2
.
rectangle
(
roi_color
,
(
ex
,
ey
),
(
ex
+
ew
,
ey
+
eh
),
(
0
,
255
,
0
),
2
)
# Check if the eyes are open or closed
if
ew
>
20
:
# Check if this is the first frame
if
eye_pos_x
is
None
and
eye_pos_y
is
None
:
eye_pos_x
=
ex
+
ew
/
2
eye_pos_y
=
ey
+
eh
/
2
else
:
# Calculate the displacement of the eyes
displacement_x
=
(
ex
+
ew
/
2
)
-
eye_pos_x
displacement_y
=
(
ey
+
eh
/
2
)
-
eye_pos_y
# Draw the displacement vector
cv2
.
arrowedLine
(
roi_color
,
(
int
(
eye_pos_x
),
int
(
eye_pos_y
)),
(
int
(
eye_pos_x
+
displacement_x
),
int
(
eye_pos_y
+
displacement_y
)),
(
255
,
0
,
0
),
2
)
# Check if the eyes are looking to the left or right
if
displacement_x
<
0
:
print
(
"Eyes are looking to the left."
)
elif
displacement_x
>
0
:
print
(
"Eyes are looking to the right."
)
def
maxAndMin
(
featCoords
,
mult
=
1
):
adj
=
10
/
mult
listX
=
[]
listY
=
[]
for
tup
in
featCoords
:
listX
.
append
(
tup
[
0
])
listY
.
append
(
tup
[
1
])
maxminList
=
np
.
array
([
min
(
listX
)
-
adj
,
min
(
listY
)
-
adj
,
max
(
listX
)
+
adj
,
max
(
listY
)
+
adj
])
print
(
maxminList
)
return
(
maxminList
*
mult
)
.
astype
(
int
),
(
np
.
array
([
sum
(
listX
)
/
len
(
listX
)
-
maxminList
[
0
],
sum
(
listY
)
/
len
(
listY
)
-
maxminList
[
1
]])
*
mult
)
.
astype
(
int
)
def
findCircs
(
img
):
circles
=
cv
.
HoughCircles
(
img
,
cv
.
HOUGH_GRADIENT
,
2
,
20
,
param1
=
200
,
param2
=
50
,
minRadius
=
1
,
maxRadius
=
40
)
#, minRadius = 0, maxRadius = 30)
# circles = np.uint16(np.around(circles))
return
circles
def
findBlobs
(
img
):
params
=
cv
.
SimpleBlobDetector_Params
()
params
.
minThreshold
=
10
params
.
maxThreshold
=
200
# params.filterByColor = True
# params.blobColor = 0
params
.
filterByArea
=
True
params
.
maxArea
=
3000
# params.filterByCircularity = True
# params.minCircularity = 0.1
detector
=
cv
.
SimpleBlobDetector_create
(
params
)
keypoints
=
detector
.
detect
(
img
)
# imkeypoints = cv.drawKeypoints(img, keypoints, np.array([]),
# (0, 0, 255),
# cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return
keypoints
def
getWebcam
(
feed
=
False
):
webcam
=
cv
.
VideoCapture
(
0
)
# Frame coordinates go frame[y][x]
haventfoundeye
=
True
screenw
=
1440
screenh
=
900
while
True
:
ret
,
frame
=
webcam
.
read
()
smallframe
=
cv
.
resize
(
copy
.
deepcopy
(
frame
),
(
0
,
0
),
fy
=
.15
,
fx
=
.15
)
smallframe
=
cv
.
cvtColor
(
smallframe
,
cv
.
COLOR_BGR2GRAY
)
feats
=
face_recognition
.
face_landmarks
(
smallframe
)
if
len
(
feats
)
>
0
:
leBds
,
leCenter
=
maxAndMin
(
feats
[
0
][
'left_eye'
],
mult
=
1
/
.15
)
# reBds,_ = maxAndMin(feats[0]['right_eye'])
# print(leBds)
left_eye
=
frame
[
leBds
[
1
]:
leBds
[
3
],
leBds
[
0
]:
leBds
[
2
]]
# right_eye = frame[reBds[1]:reBds[3], reBds[0]:reBds[2]]
left_eye
=
cv
.
cvtColor
(
left_eye
,
cv
.
COLOR_BGR2GRAY
)
ret
,
thresh
=
cv
.
threshold
(
left_eye
,
50
,
255
,
0
)
# Find weighted average for center of the eye
TMP
=
255
-
np
.
copy
(
thresh
)
#.astype(int)
# TMP = TMP[0:-1, 10:-10]
# cv.imshow("tmp", TMP)
# TMP = cv.blur(TMP, (3, 3))
y
=
np
.
sum
(
TMP
,
axis
=
1
)
x
=
np
.
sum
(
TMP
,
axis
=
0
)
# x = TMP[int(len(TMP)/2)]
y
=
y
/
len
(
TMP
[
0
])
x
=
x
/
len
(
TMP
)
y
=
y
>
np
.
average
(
y
)
+
np
.
std
(
y
)
#*1.2
x
=
x
>
np
.
average
(
x
)
+
np
.
std
(
x
)
#*1.2
try
:
y
=
int
(
np
.
dot
(
np
.
arange
(
1
,
len
(
y
)
+
1
),
y
)
/
sum
(
y
))
except
:
y
=
int
(
np
.
dot
(
np
.
arange
(
1
,
len
(
y
)
+
1
),
y
)
/
1
)
try
:
x
=
int
(
np
.
dot
(
np
.
arange
(
1
,
len
(
x
)
+
1
),
x
)
/
sum
(
x
))
except
:
x
=
int
(
np
.
dot
(
np
.
arange
(
1
,
len
(
x
)
+
1
),
x
)
/
1
)
haventfoundeye
=
False
left_eye
=
cv
.
cvtColor
(
left_eye
,
cv
.
COLOR_GRAY2BGR
)
cv
.
circle
(
left_eye
,
(
x
,
y
),
2
,
(
20
,
20
,
120
),
3
)
cv
.
circle
(
left_eye
,
(
int
(
leCenter
[
0
]),
int
(
leCenter
[
1
])),
2
,
(
120
,
20
,
20
),
3
)
if
feed
:
cv
.
imshow
(
'frame'
,
left_eye
)
if
cv
.
waitKey
(
1
)
&
0xFF
==
ord
(
'q'
):
break
elif
not
haventfoundeye
:
plt
.
imshow
(
left_eye
)
plt
.
show
()
return
left_eye
def
getEye
(
times
=
1
,
frameShrink
=
0.15
,
coords
=
(
0
,
0
),
counterStart
=
0
,
folder
=
"eyes"
):
os
.
makedirs
(
folder
,
exist_ok
=
True
)
webcam
=
cv
.
VideoCapture
(
0
)
counter
=
counterStart
ims
=
[]
while
counter
<
counterStart
+
times
:
ret
,
frame
=
webcam
.
read
()
smallframe
=
cv
.
resize
(
copy
.
deepcopy
(
frame
),
(
0
,
0
),
fy
=
frameShrink
,
fx
=
frameShrink
)
smallframe
=
cv
.
cvtColor
(
smallframe
,
cv
.
COLOR_BGR2GRAY
)
feats
=
face_recognition
.
face_landmarks
(
smallframe
)
if
len
(
feats
)
>
0
:
leBds
,
leCenter
=
maxAndMin
(
feats
[
0
][
'left_eye'
],
mult
=
1
/
frameShrink
)
left_eye
=
frame
[
leBds
[
1
]:
leBds
[
3
],
leBds
[
0
]:
leBds
[
2
]]
# right_eye = frame[reBds[1]:reBds[3], reBds[0]:reBds[2]]
left_eye
=
cv
.
cvtColor
(
left_eye
,
cv
.
COLOR_BGR2GRAY
)
left_eye
=
cv
.
resize
(
left_eye
,
dsize
=
(
100
,
50
))
# D
# isplay the image - DEBUGGING ONLY
cv
.
imshow
(
'frame'
,
left_eye
)
if
cv
.
waitKey
(
1
)
&
0xFF
==
ord
(
'q'
):
break
cv
.
imwrite
(
folder
+
"/"
+
str
(
coords
[
0
])
+
"."
+
str
(
coords
[
1
])
+
"."
+
str
(
counter
)
+
".jpg"
,
left_eye
)
counter
+=
1
# # 1440x900
# for i in [0,720,1440]:
# for j in [0,450,900]:q
for
i
in
[
404
,
951
]:
for
j
in
[
383
,
767
]:
print
(
i
,
j
)
pyautogui
.
moveTo
(
i
,
j
)
input
(
"Press Enter to continue..."
)
pyautogui
.
moveTo
(
i
,
j
)
getEye
(
times
=
10
,
coords
=
(
i
,
j
),
counterStart
=
0
,
folder
=
"testeyes"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment