Commit 2751651f authored by Dhananjaya Jayashanka's avatar Dhananjaya Jayashanka

Updated textAnalyze(NLTK).py.py

parent 8e1b96c1
......@@ -9,65 +9,107 @@ from keras.models import Sequential, load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from skimage import io
import os
import cv2
import numpy as np
Savedmodel = tf.keras.models.load_model('./new model8.h5')
Savedmodel = tf.keras.models.load_model('./emotion_lts.h5')
Savedmodel.summary()
objects = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral')
objects = ('Angry', 'Happy', 'Sad', 'Neutral')
vid = cv2.VideoCapture(0)
#
# def run():
# while True:
#
# _, frame = vid.read()
# frame = imutils.resize(frame, width=500)
#
# # result = api(frame)
#
# cv2.imshow("frame",frame)
# # getPrediction(frame)
#
# # cv.waitKey(0)
# if cv2.waitKey(20) & 0XFF == ord('q'):
# break
#
# vid.release()
# cv2.destroyAllWindows()
def emotion_analysis(emotions):
objects = ['Angry', 'Happy', 'Sad', 'Neutral']
y_pos = np.arange(len(objects))
plt.bar(y_pos, emotions, align='center', alpha=0.9)
plt.tick_params(axis='x', which='both', pad=10, width=4, length=10)
plt.xticks(y_pos, objects)
plt.ylabel('percentage')
plt.title('emotion')
def run():
while True:
_, frame = vid.read()
frame = imutils.resize(frame, width=500)
# result = api(frame)
cv2.imshow("frame",frame)
# getPrediction(frame)
# def getPrediction(img):
#
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
#
# x /= 255
#
# custom = Savedmodel.predict(x)
# # print(custom[0])
# emotion_analysis(custom[0])
#
# x = np.array(x, 'float32')
# x = x.reshape([48, 48]);
#
# plt.gray()
# plt.show()
#
# m = 0.000000000000000000001
# a = custom[0]
# for i in range(0, len(a)):
# if a[i] > m:
# m = a[i]
# ind = i
#
# print('Expression Prediction:', objects[ind])
# cv.waitKey(0)
if cv2.waitKey(20) & 0XFF == ord('q'):
break
imgdir='./speechVideo'
cap = cv2.VideoCapture('./speechVideo/speech.mp4')
vid.release()
cv2.destroyAllWindows()
while(cap.isOpened()):
ret,frame=cap.read()
# img = image.load_img(frame,grayscale=True, target_size=(48, 48))
frame = cv2.resize(frame,(48,48))
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
x = image.img_to_array(frame)
x = np.expand_dims(x, axis = 0)
def getPrediction(img):
x /= 255
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
custom = Savedmodel.predict(x)
#print(custom[0])
emotion_analysis(custom[0])
x /= 255
x = np.array(x, 'float32')
x = x.reshape([48, 48]);
custom = Savedmodel.predict(x)
# print(custom[0])
emotion_analysis(custom[0])
x = np.array(x, 'float32')
x = x.reshape([48, 48]);
m=0.000000000000000000001
a=custom[0]
for i in range(0,len(a)):
if a[i]>m:
m=a[i]
ind=i
# plt.gray()
# plt.show()
print('Expression Prediction:',objects[ind])
m = 0.000000000000000000001
a = custom[0]
for i in range(0, len(a)):
if a[i] > m:
m = a[i]
ind = i
if cv2.waitKey(20) & 0XFF == ord('q'):
break
print('Expression Prediction:', objects[ind])
cap.release()
cv2.destroyAllWindows()
def emotion_analysis(emotions):
objects = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
y_pos = np.arange(len(objects))
plt.bar(y_pos, emotions, align='center', alpha=0.9)
plt.tick_params(axis='x', which='both', pad=10, width=4, length=10)
plt.xticks(y_pos, objects)
plt.ylabel('percentage')
plt.title('emotion')
run()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment