Commit dee7b0ab authored by Dhananjaya Jayashanka's avatar Dhananjaya Jayashanka

added python files for detect emotions

parent 95785624
import string
from collections import Counter
def textAnalyze(speech):
text = speech
# converting to lowercase
lower_case = text.lower()
# Removing punctuations of the full text
cleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))
# splitting text into words
tokenized_words = cleaned_text.split()
stop_words = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself",
"yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself",
"they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that",
"these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having",
"do",
"does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while",
"of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before",
"after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under",
"again",
"further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both",
"each",
"few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so",
"than",
"too", "very", "s", "t", "can", "will", "just", "don", "should", "now"]
# Removing stop words from the tokenized words list
final_words = []
for word in tokenized_words:
if word not in stop_words:
final_words.append(word)
emotion_list = []
with open('Emotion/emotions.txt', 'r') as file:
for line in file:
clear_line = line.replace("\n", '').replace(",", '').replace("'", '').strip()
word, emotion = clear_line.split(':')
if word in final_words:
emotion_list.append(emotion)
print(emotion_list)
w = Counter(emotion_list)
print(w)
return {
"message": w
}
# from skimage import io
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras.preprocessing import image
Savedmodel = tf.keras.models.load_model('emotion_lts.h5')
Savedmodel.summary()
objects = ('Angry', 'Happy', 'Sad', 'Neutral')
vid = cv2.VideoCapture(0)
#
# def run():
# while True:
#
# _, frame = vid.read()
# frame = imutils.resize(frame, width=500)
#
# # result = api(frame)
#
# cv2.imshow("frame",frame)
# # getPrediction(frame)
#
# # cv.waitKey(0)
# if cv2.waitKey(20) & 0XFF == ord('q'):
# break
#
# vid.release()
# cv2.destroyAllWindows()
def emotion_analysis(emotions):
objects = ['Angry', 'Happy', 'Sad', 'Neutral']
y_pos = np.arange(len(objects))
plt.bar(y_pos, emotions, align='center', alpha=0.9)
plt.tick_params(axis='x', which='both', pad=10, width=4, length=10)
plt.xticks(y_pos, objects)
plt.ylabel('percentage')
plt.title('emotion')
def getEmotions(filePath):
cap = cv2.VideoCapture(filePath)
emotions = []
while (cap.isOpened()):
try:
ret, frame = cap.read()
# img = image.load_img(frame,grayscale=True, target_size=(48, 48))
frame = cv2.resize(frame, (48, 48))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
x = image.img_to_array(frame)
x = np.expand_dims(x, axis=0)
x /= 255
custom = Savedmodel.predict(x)
# print(custom[0])
emotion_analysis(custom[0])
x = np.array(x, 'float32')
x = x.reshape([48, 48]);
m = 0.000000000000000000001
a = custom[0]
for i in range(0, len(a)):
if a[i] > m:
m = a[i]
ind = i
print('Expression Prediction:', objects[ind])
emotions.append(objects[ind])
if cv2.waitKey(20) & 0XFF == ord('q'):
break
except:
print("Damaged frame")
break
return emotions
getEmotions("speech.mp4")
cv2.destroyAllWindows()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment