Commit f808ffe9 authored by Dhananjaya Jayashanka's avatar Dhananjaya Jayashanka

Updated videoAnalyzing(expressions).py

parent 2751651f
# from skimage import io
import cv2
import imutils
import numpy as np
import tensorflow as tf import tensorflow as tf
from tensorflow import keras
from keras.preprocessing import image from keras.preprocessing import image
from keras.models import Sequential, load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from skimage import io
import os
import cv2 import cv2
import numpy as np import numpy as np
...@@ -19,24 +9,7 @@ Savedmodel.summary() ...@@ -19,24 +9,7 @@ Savedmodel.summary()
objects = ('Angry', 'Happy', 'Sad', 'Neutral') objects = ('Angry', 'Happy', 'Sad', 'Neutral')
vid = cv2.VideoCapture(0) vid = cv2.VideoCapture(0)
#
# def run():
# while True:
#
# _, frame = vid.read()
# frame = imutils.resize(frame, width=500)
#
# # result = api(frame)
#
# cv2.imshow("frame",frame)
# # getPrediction(frame)
#
# # cv.waitKey(0)
# if cv2.waitKey(20) & 0XFF == ord('q'):
# break
#
# vid.release()
# cv2.destroyAllWindows()
def emotion_analysis(emotions): def emotion_analysis(emotions):
objects = ['Angry', 'Happy', 'Sad', 'Neutral'] objects = ['Angry', 'Happy', 'Sad', 'Neutral']
y_pos = np.arange(len(objects)) y_pos = np.arange(len(objects))
...@@ -47,35 +20,7 @@ def emotion_analysis(emotions): ...@@ -47,35 +20,7 @@ def emotion_analysis(emotions):
plt.title('emotion') plt.title('emotion')
videoDir = './speechVideo'
# def getPrediction(img):
#
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
#
# x /= 255
#
# custom = Savedmodel.predict(x)
# # print(custom[0])
# emotion_analysis(custom[0])
#
# x = np.array(x, 'float32')
# x = x.reshape([48, 48]);
#
# plt.gray()
# plt.show()
#
# m = 0.000000000000000000001
# a = custom[0]
# for i in range(0, len(a)):
# if a[i] > m:
# m = a[i]
# ind = i
#
# print('Expression Prediction:', objects[ind])
imgdir='./speechVideo'
cap = cv2.VideoCapture('./speechVideo/speech.mp4') cap = cv2.VideoCapture('./speechVideo/speech.mp4')
while(cap.isOpened()): while(cap.isOpened()):
...@@ -108,7 +53,6 @@ while(cap.isOpened()): ...@@ -108,7 +53,6 @@ while(cap.isOpened()):
if cv2.waitKey(20) & 0XFF == ord('q'): if cv2.waitKey(20) & 0XFF == ord('q'):
break break
cap.release() cap.release()
cv2.destroyAllWindows() cv2.destroyAllWindows()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment