Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
S
Smart E- Learn Tracer
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
23_22 - J 01
Smart E- Learn Tracer
Commits
d31413ec
Commit
d31413ec
authored
Jan 31, 2023
by
Shenthuri Vimaleshwaran
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Revert "Merge branch 'revert-
37cb5aa6
' into 'master'"
This reverts merge request
!3
parent
cfc9b11f
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
177 additions
and
0 deletions
+177
-0
attendance /.gitkeep
attendance /.gitkeep
+0
-0
attendance /Att.csv
attendance /Att.csv
+3
-0
attendance /attendence.py
attendance /attendence.py
+81
-0
attendance /notupdate.py
attendance /notupdate.py
+93
-0
No files found.
attendance /.gitkeep
0 → 100644
View file @
d31413ec
attendance /Att.csv
0 → 100644
View file @
d31413ec
TANU,08:26:49
\ No newline at end of file
attendance /attendence.py
0 → 100644
View file @
d31413ec
import
cv2
import
numpy
as
np
import
face_recognition
import
os
from
datetime
import
datetime
# from PIL import ImageGrab
path
=
'C:/Users/jebar/Desktop/attendanemark/attendanemark/studensimages'
images
=
[]
classNames
=
[]
myList
=
os
.
listdir
(
path
)
print
(
myList
)
for
cl
in
myList
:
curImg
=
cv2
.
imread
(
f
'{path}/{cl}'
)
images
.
append
(
curImg
)
classNames
.
append
(
os
.
path
.
splitext
(
cl
)[
0
])
print
(
classNames
)
def
findEncodings
(
images
):
encodeList
=
[]
for
img
in
images
:
img
=
cv2
.
cvtColor
(
img
,
cv2
.
COLOR_BGR2RGB
)
encode
=
face_recognition
.
face_encodings
(
img
)[
0
]
encodeList
.
append
(
encode
)
return
encodeList
def
markAttendance
(
name
):
with
open
(
'Att.csv'
,
'r+'
)
as
f
:
myDataList
=
f
.
readlines
()
nameList
=
[]
for
line
in
myDataList
:
entry
=
line
.
split
(
','
)
nameList
.
append
(
entry
[
0
])
if
name
not
in
nameList
:
now
=
datetime
.
now
()
dtString
=
now
.
strftime
(
'
%
H:
%
M:
%
S'
)
f
.
writelines
(
f
'
\n
{name},{dtString}'
)
#### FOR CAPTURING SCREEN RATHER THAN WEBCAM
# def captureScreen(bbox=(300,300,690+300,530+300)):
# capScr = np.array(ImageGrab.grab(bbox))
# capScr = cv2.cvtColor(capScr, cv2.COLOR_RGB2BGR)
# return capScr
encodeListKnown
=
findEncodings
(
images
)
print
(
'Encoding Complete- Press Q or q to CLOSE WEBCAM'
)
cap
=
cv2
.
VideoCapture
(
0
)
while
True
:
success
,
img
=
cap
.
read
()
# img = captureScreen()
imgS
=
cv2
.
resize
(
img
,(
0
,
0
),
None
,
0.25
,
0.25
)
imgS
=
cv2
.
cvtColor
(
imgS
,
cv2
.
COLOR_BGR2RGB
)
facesCurFrame
=
face_recognition
.
face_locations
(
imgS
)
encodesCurFrame
=
face_recognition
.
face_encodings
(
imgS
,
facesCurFrame
)
for
encodeFace
,
faceLoc
in
zip
(
encodesCurFrame
,
facesCurFrame
):
matches
=
face_recognition
.
compare_faces
(
encodeListKnown
,
encodeFace
)
faceDis
=
face_recognition
.
face_distance
(
encodeListKnown
,
encodeFace
)
#print(faceDis)
matchIndex
=
np
.
argmin
(
faceDis
)
if
matches
[
matchIndex
]:
name
=
classNames
[
matchIndex
]
.
upper
()
print
(
name
)
y1
,
x2
,
y2
,
x1
=
faceLoc
y1
,
x2
,
y2
,
x1
=
y1
*
4
,
x2
*
4
,
y2
*
4
,
x1
*
4
cv2
.
rectangle
(
img
,(
x1
,
y1
),(
x2
,
y2
),(
0
,
255
,
0
),
2
)
cv2
.
rectangle
(
img
,(
x1
,
y2
-
35
),(
x2
,
y2
),(
0
,
255
,
0
),
cv2
.
FILLED
)
cv2
.
putText
(
img
,
name
,(
x1
+
6
,
y2
-
6
),
cv2
.
FONT_HERSHEY_COMPLEX
,
1
,(
255
,
255
,
255
),
2
)
markAttendance
(
name
)
cv2
.
imshow
(
'Webcam'
,
img
)
b
=
cv2
.
waitKey
(
1
)
if
b
==
31
or
b
==
113
:
print
(
"End Face Detection"
)
break
attendance /notupdate.py
0 → 100644
View file @
d31413ec
import
cv2
import
os
import
sys
import
numpy
as
np
from
datetime
import
datetime
from
PyQt5
import
QtGui
,
QtCore
from
PyQt5.QtWidgets
import
QDialog
,
QApplication
,
QMainWindow
,
QMessageBox
from
PyQt5.uic
import
loadUi
class
USER
(
QDialog
):
def
__init__
(
self
):
super
(
USER
,
self
)
.
__init__
()
loadUi
(
"user_info.ui"
,
self
)
def
get_name_key
(
self
):
name
=
self
.
name_label
.
text
()
key
=
int
(
self
.
key_label
.
text
())
return
name
,
key
class
AUFR
(
QMainWindow
):
def
__init__
(
self
):
super
(
AUFR
,
self
)
.
__init__
()
loadUi
(
"mainwindow.ui"
,
self
)
self
.
face_classifier
=
cv2
.
CascadeClassifier
(
"haarcascade_frontalface_default.xml"
)
self
.
eye_classifier
=
cv2
.
CascadeClassifier
(
"haarcascade_eye.xml"
)
self
.
smile_classifier
=
cv2
.
CascadeClassifier
(
"haarcascade_smile.xml"
)
self
.
camera_id
=
0
# can also be a url of Video
self
.
dataset_per_subject
=
50
self
.
ret
=
False
self
.
trained_model
=
0
self
.
image
=
cv2
.
imread
(
""
,
1
)
self
.
modified_image
=
self
.
image
.
copy
()
self
.
draw_text
(
"fyh"
,
40
,
30
,
1
,
(
255
,
255
,
255
))
self
.
display
()
# Actions
self
.
generate_dataset_btn
.
setCheckable
(
True
)
self
.
train_model_btn
.
setCheckable
(
True
)
self
.
recognize_face_btn
.
setCheckable
(
True
)
# Menu
self
.
about_menu
=
self
.
menu_bar
.
addAction
(
"About"
)
self
.
help_menu
=
self
.
menu_bar
.
addAction
(
"Help"
)
self
.
about_menu
.
triggered
.
connect
(
self
.
about_info
)
self
.
help_menu
.
triggered
.
connect
(
self
.
help_info
)
# Algorithms
self
.
algo_radio_group
.
buttonClicked
.
connect
(
self
.
algorithm_radio_changed
)
# Recangle
self
.
face_rect_radio
.
setChecked
(
True
)
self
.
eye_rect_radio
.
setChecked
(
False
)
self
.
smile_rect_radio
.
setChecked
(
False
)
# Events
self
.
generate_dataset_btn
.
clicked
.
connect
(
self
.
generate
)
self
.
train_model_btn
.
clicked
.
connect
(
self
.
train
)
self
.
recognize_face_btn
.
clicked
.
connect
(
self
.
recognize
)
self
.
save_image_btn
.
clicked
.
connect
(
self
.
save_image
)
self
.
video_recording_btn
.
clicked
.
connect
(
self
.
save_video
)
# Recognizers
self
.
update_recognizer
()
self
.
assign_algorithms
()
def
start_timer
(
self
):
# start the timeer for execution.
self
.
capture
=
cv2
.
VideoCapture
(
self
.
camera_id
)
self
.
capture
.
set
(
cv2
.
CAP_PROP_FRAME_HEIGHT
,
480
)
self
.
capture
.
set
(
cv2
.
CAP_PROP_FRAME_WIDTH
,
640
)
self
.
timer
=
QtCore
.
QTimer
()
if
self
.
generate_dataset_btn
.
isChecked
():
self
.
timer
.
timeout
.
connect
(
self
.
save_dataset
)
elif
self
.
recognize_face_btn
.
isChecked
():
self
.
timer
.
timeout
.
connect
(
self
.
update_image
)
self
.
timer
.
start
(
5
)
def
stop_timer
(
self
):
# stop timer or come out of the loop.
self
.
timer
.
stop
()
self
.
ret
=
False
self
.
capture
.
release
()
def
update_image
(
self
):
# update canvas every time according to time set in the timer.
if
self
.
recognize_face_btn
.
isChecked
():
self
.
ret
,
self
.
image
=
self
.
capture
.
read
()
self
.
image
=
cv2
.
flip
(
self
.
image
,
1
)
faces
=
self
.
get_faces
()
self
.
draw_rectangle
(
faces
)
if
self
.
video_recording_btn
.
isChecked
():
self
.
recording
()
self
.
display
()
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment