Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2021-005
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Hasini Piumika Alwis
2021-005
Commits
a38d604e
Commit
a38d604e
authored
Nov 26, 2021
by
Hasini Piumika Alwis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
realtime_baby_emotion_recognition.py file
parent
ee99a779
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
49 additions
and
0 deletions
+49
-0
Backend/realtime.py
Backend/realtime.py
+49
-0
No files found.
Backend/realtime.py
0 → 100644
View file @
a38d604e
import
cv2
import
numpy
as
np
from
tensorflow.keras.models
import
model_from_json
faceExpression
=
{
0
:
"Angry"
,
1
:
"Disgusted"
,
2
:
"Fearful"
,
3
:
"Happy"
,
4
:
"Neutral"
,
5
:
"Sad"
,
6
:
"Surprised"
}
# create model by loading JSON
json_file
=
open
(
'model/emotion_model.json'
,
'r'
)
loaded_model_json
=
json_file
.
read
()
json_file
.
close
()
emotion_model
=
model_from_json
(
loaded_model_json
)
# load weights
emotion_model
.
load_weights
(
"model/emotion_model.h5"
)
print
(
"Loaded model from disk"
)
# capture with PC webcam
cap
=
cv2
.
VideoCapture
(
0
)
while
True
:
# draw bounding box around face by using haar
ret
,
frame
=
cap
.
read
()
frame
=
cv2
.
resize
(
frame
,
(
1280
,
720
))
if
not
ret
:
break
face_detector
=
cv2
.
CascadeClassifier
(
'haarcascades/haarcascade_frontalface_default.xml'
)
gray_frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
# face detector
num_faces
=
face_detector
.
detectMultiScale
(
gray_frame
,
scaleFactor
=
1.3
,
minNeighbors
=
5
)
# detect each and every face available on cam area
for
(
x
,
y
,
w
,
h
)
in
num_faces
:
cv2
.
rectangle
(
frame
,
(
x
,
y
-
50
),
(
x
+
w
,
y
+
h
+
10
),
(
0
,
255
,
0
),
4
)
roi_gray_frame
=
gray_frame
[
y
:
y
+
h
,
x
:
x
+
w
]
cropped_img
=
np
.
expand_dims
(
np
.
expand_dims
(
cv2
.
resize
(
roi_gray_frame
,
(
48
,
48
)),
-
1
),
0
)
# Emotion prediction
emotion_prediction
=
emotion_model
.
predict
(
cropped_img
)
maxindex
=
int
(
np
.
argmax
(
emotion_prediction
))
cv2
.
putText
(
frame
,
faceExpression
[
maxindex
],
(
x
+
5
,
y
-
20
),
cv2
.
FONT_HERSHEY_SIMPLEX
,
1
,
(
255
,
0
,
0
),
2
,
cv2
.
LINE_AA
)
cv2
.
imshow
(
'Emotion Detection'
,
frame
)
if
cv2
.
waitKey
(
1
)
&
0xFF
==
ord
(
'q'
):
break
cap
.
release
()
cv2
.
destroyAllWindows
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment