Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2021-060
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Chalika Mihiran
2021-060
Commits
f808ffe9
Commit
f808ffe9
authored
Jul 18, 2021
by
Dhananjaya Jayashanka
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Updated videoAnalyzing(expressions).py
parent
2751651f
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
2 additions
and
58 deletions
+2
-58
videoAnalyzing(expressions).py
videoAnalyzing(expressions).py
+2
-58
No files found.
videoAnalyzing(expressions).py
View file @
f808ffe9
# from skimage import io
import
cv2
import
imutils
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow
import
keras
from
keras.preprocessing
import
image
from
keras.preprocessing
import
image
from
keras.models
import
Sequential
,
load_model
from
keras.preprocessing.image
import
load_img
from
keras.preprocessing.image
import
img_to_array
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
skimage
import
io
import
os
import
cv2
import
cv2
import
numpy
as
np
import
numpy
as
np
...
@@ -19,24 +9,7 @@ Savedmodel.summary()
...
@@ -19,24 +9,7 @@ Savedmodel.summary()
objects
=
(
'Angry'
,
'Happy'
,
'Sad'
,
'Neutral'
)
objects
=
(
'Angry'
,
'Happy'
,
'Sad'
,
'Neutral'
)
vid
=
cv2
.
VideoCapture
(
0
)
vid
=
cv2
.
VideoCapture
(
0
)
#
# def run():
# while True:
#
# _, frame = vid.read()
# frame = imutils.resize(frame, width=500)
#
# # result = api(frame)
#
# cv2.imshow("frame",frame)
# # getPrediction(frame)
#
# # cv.waitKey(0)
# if cv2.waitKey(20) & 0XFF == ord('q'):
# break
#
# vid.release()
# cv2.destroyAllWindows()
def
emotion_analysis
(
emotions
):
def
emotion_analysis
(
emotions
):
objects
=
[
'Angry'
,
'Happy'
,
'Sad'
,
'Neutral'
]
objects
=
[
'Angry'
,
'Happy'
,
'Sad'
,
'Neutral'
]
y_pos
=
np
.
arange
(
len
(
objects
))
y_pos
=
np
.
arange
(
len
(
objects
))
...
@@ -47,35 +20,7 @@ def emotion_analysis(emotions):
...
@@ -47,35 +20,7 @@ def emotion_analysis(emotions):
plt
.
title
(
'emotion'
)
plt
.
title
(
'emotion'
)
videoDir
=
'./speechVideo'
# def getPrediction(img):
#
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
#
# x /= 255
#
# custom = Savedmodel.predict(x)
# # print(custom[0])
# emotion_analysis(custom[0])
#
# x = np.array(x, 'float32')
# x = x.reshape([48, 48]);
#
# plt.gray()
# plt.show()
#
# m = 0.000000000000000000001
# a = custom[0]
# for i in range(0, len(a)):
# if a[i] > m:
# m = a[i]
# ind = i
#
# print('Expression Prediction:', objects[ind])
imgdir
=
'./speechVideo'
cap
=
cv2
.
VideoCapture
(
'./speechVideo/speech.mp4'
)
cap
=
cv2
.
VideoCapture
(
'./speechVideo/speech.mp4'
)
while
(
cap
.
isOpened
()):
while
(
cap
.
isOpened
()):
...
@@ -108,7 +53,6 @@ while(cap.isOpened()):
...
@@ -108,7 +53,6 @@ while(cap.isOpened()):
if
cv2
.
waitKey
(
20
)
&
0XFF
==
ord
(
'q'
):
if
cv2
.
waitKey
(
20
)
&
0XFF
==
ord
(
'q'
):
break
break
cap
.
release
()
cap
.
release
()
cv2
.
destroyAllWindows
()
cv2
.
destroyAllWindows
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment