Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2021-060
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Chalika Mihiran
2021-060
Commits
2751651f
Commit
2751651f
authored
Jul 18, 2021
by
Dhananjaya Jayashanka
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Updated textAnalyze(NLTK).py.py
parent
8e1b96c1
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
83 additions
and
41 deletions
+83
-41
videoAnalyzing(expressions).py
videoAnalyzing(expressions).py
+83
-41
No files found.
videoAnalyzing(expressions).py
View file @
2751651f
...
...
@@ -9,65 +9,107 @@ from keras.models import Sequential, load_model
from
keras.preprocessing.image
import
load_img
from
keras.preprocessing.image
import
img_to_array
import
matplotlib.pyplot
as
plt
from
skimage
import
io
import
os
import
cv2
import
numpy
as
np
Savedmodel
=
tf
.
keras
.
models
.
load_model
(
'./
new model8
.h5'
)
Savedmodel
=
tf
.
keras
.
models
.
load_model
(
'./
emotion_lts
.h5'
)
Savedmodel
.
summary
()
objects
=
(
'Angry'
,
'
Disgust'
,
'Fear'
,
'Happy'
,
'Sad'
,
'Surprise
'
,
'Neutral'
)
objects
=
(
'Angry'
,
'
Happy'
,
'Sad
'
,
'Neutral'
)
vid
=
cv2
.
VideoCapture
(
0
)
#
# def run():
# while True:
#
# _, frame = vid.read()
# frame = imutils.resize(frame, width=500)
#
# # result = api(frame)
#
# cv2.imshow("frame",frame)
# # getPrediction(frame)
#
# # cv.waitKey(0)
# if cv2.waitKey(20) & 0XFF == ord('q'):
# break
#
# vid.release()
# cv2.destroyAllWindows()
def
emotion_analysis
(
emotions
):
objects
=
[
'Angry'
,
'Happy'
,
'Sad'
,
'Neutral'
]
y_pos
=
np
.
arange
(
len
(
objects
))
plt
.
bar
(
y_pos
,
emotions
,
align
=
'center'
,
alpha
=
0.9
)
plt
.
tick_params
(
axis
=
'x'
,
which
=
'both'
,
pad
=
10
,
width
=
4
,
length
=
10
)
plt
.
xticks
(
y_pos
,
objects
)
plt
.
ylabel
(
'percentage'
)
plt
.
title
(
'emotion'
)
def
run
():
while
True
:
_
,
frame
=
vid
.
read
()
frame
=
imutils
.
resize
(
frame
,
width
=
500
)
# result = api(frame)
cv2
.
imshow
(
"frame"
,
frame
)
# getPrediction(frame)
# def getPrediction(img):
#
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
#
# x /= 255
#
# custom = Savedmodel.predict(x)
# # print(custom[0])
# emotion_analysis(custom[0])
#
# x = np.array(x, 'float32')
# x = x.reshape([48, 48]);
#
# plt.gray()
# plt.show()
#
# m = 0.000000000000000000001
# a = custom[0]
# for i in range(0, len(a)):
# if a[i] > m:
# m = a[i]
# ind = i
#
# print('Expression Prediction:', objects[ind])
# cv.waitKey(0)
if
cv2
.
waitKey
(
20
)
&
0XFF
==
ord
(
'q'
):
break
imgdir
=
'./speechVideo'
cap
=
cv2
.
VideoCapture
(
'./speechVideo/speech.mp4'
)
vid
.
release
()
cv2
.
destroyAllWindows
()
while
(
cap
.
isOpened
()):
ret
,
frame
=
cap
.
read
()
# img = image.load_img(frame,grayscale=True, target_size=(48, 48))
frame
=
cv2
.
resize
(
frame
,(
48
,
48
))
frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
x
=
image
.
img_to_array
(
frame
)
x
=
np
.
expand_dims
(
x
,
axis
=
0
)
def
getPrediction
(
img
):
x
/=
255
x
=
image
.
img_to_array
(
img
)
x
=
np
.
expand_dims
(
x
,
axis
=
0
)
custom
=
Savedmodel
.
predict
(
x
)
#print(custom[0])
emotion_analysis
(
custom
[
0
])
x
/=
255
x
=
np
.
array
(
x
,
'float32'
)
x
=
x
.
reshape
([
48
,
48
]);
custom
=
Savedmodel
.
predict
(
x
)
# print(custom[0])
emotion_analysis
(
custom
[
0
])
x
=
np
.
array
(
x
,
'float32'
)
x
=
x
.
reshape
([
48
,
48
]);
m
=
0.000000000000000000001
a
=
custom
[
0
]
for
i
in
range
(
0
,
len
(
a
)):
if
a
[
i
]
>
m
:
m
=
a
[
i
]
ind
=
i
# plt.gray()
# plt.show()
print
(
'Expression Prediction:'
,
objects
[
ind
])
m
=
0.000000000000000000001
a
=
custom
[
0
]
for
i
in
range
(
0
,
len
(
a
)):
if
a
[
i
]
>
m
:
m
=
a
[
i
]
ind
=
i
if
cv2
.
waitKey
(
20
)
&
0XFF
==
ord
(
'q'
):
break
print
(
'Expression Prediction:'
,
objects
[
ind
])
cap
.
release
()
cv2
.
destroyAllWindows
()
def
emotion_analysis
(
emotions
):
objects
=
[
'Angry'
,
'Disgust'
,
'Fear'
,
'Happy'
,
'Sad'
,
'Surprise'
,
'Neutral'
]
y_pos
=
np
.
arange
(
len
(
objects
))
plt
.
bar
(
y_pos
,
emotions
,
align
=
'center'
,
alpha
=
0.9
)
plt
.
tick_params
(
axis
=
'x'
,
which
=
'both'
,
pad
=
10
,
width
=
4
,
length
=
10
)
plt
.
xticks
(
y_pos
,
objects
)
plt
.
ylabel
(
'percentage'
)
plt
.
title
(
'emotion'
)
run
()
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment