Commit 10bb062c authored by Chalika Mihiran's avatar Chalika Mihiran

Merge branch 'IT18121902' into 'master'

It18121902

See merge request !7
parents 6364ad5e 23ef18a4
......@@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="jdk" jdkName="Python 3.7" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
<component name="PyCharmProfessionalAdvertiser">
<option name="shown" value="true" />
</component>
</project>
\ No newline at end of file
import string
from collections import Counter
def textAnalyze(speech):
text = speech
# converting to lowercase
lower_case = text.lower()
# Removing punctuations of the full text
cleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))
# splitting text into words
tokenized_words = cleaned_text.split()
stop_words = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself",
"yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself",
"they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that",
"these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having",
"do",
"does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while",
"of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before",
"after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under",
"again",
"further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both",
"each",
"few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so",
"than",
"too", "very", "s", "t", "can", "will", "just", "don", "should", "now"]
# Removing stop words from the tokenized words list
final_words = []
for word in tokenized_words:
if word not in stop_words:
final_words.append(word)
emotion_list = []
with open('Emotion/emotions.txt', 'r') as file:
for line in file:
clear_line = line.replace("\n", '').replace(",", '').replace("'", '').strip()
word, emotion = clear_line.split(':')
if word in final_words:
emotion_list.append(emotion)
print(emotion_list)
w = Counter(emotion_list)
print(w)
return {
"message": w
}
# from skimage import io
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras.preprocessing import image
Savedmodel = tf.keras.models.load_model('emotion_lts.h5')
Savedmodel.summary()
objects = ('Angry', 'Happy', 'Sad', 'Neutral')
vid = cv2.VideoCapture(0)
#
# def run():
# while True:
#
# _, frame = vid.read()
# frame = imutils.resize(frame, width=500)
#
# # result = api(frame)
#
# cv2.imshow("frame",frame)
# # getPrediction(frame)
#
# # cv.waitKey(0)
# if cv2.waitKey(20) & 0XFF == ord('q'):
# break
#
# vid.release()
# cv2.destroyAllWindows()
def emotion_analysis(emotions):
objects = ['Angry', 'Happy', 'Sad', 'Neutral']
y_pos = np.arange(len(objects))
plt.bar(y_pos, emotions, align='center', alpha=0.9)
plt.tick_params(axis='x', which='both', pad=10, width=4, length=10)
plt.xticks(y_pos, objects)
plt.ylabel('percentage')
plt.title('emotion')
def getEmotions(filePath):
cap = cv2.VideoCapture(filePath)
emotions = []
while (cap.isOpened()):
try:
ret, frame = cap.read()
# img = image.load_img(frame,grayscale=True, target_size=(48, 48))
frame = cv2.resize(frame, (48, 48))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
x = image.img_to_array(frame)
x = np.expand_dims(x, axis=0)
x /= 255
custom = Savedmodel.predict(x)
# print(custom[0])
emotion_analysis(custom[0])
x = np.array(x, 'float32')
x = x.reshape([48, 48]);
m = 0.000000000000000000001
a = custom[0]
for i in range(0, len(a)):
if a[i] > m:
m = a[i]
ind = i
print('Expression Prediction:', objects[ind])
emotions.append(objects[ind])
if cv2.waitKey(20) & 0XFF == ord('q'):
break
except:
print("Damaged frame")
break
return emotions
getEmotions("speech.mp4")
cv2.destroyAllWindows()
<<<<<<< README.md
# Main Objective
The main objective of this research part is to provide a customized fully detailed feedback for the user (speaker) about the usage of correct facial expressions and usage of eye contact throughout the speech.
=======
**Main objective**
Build the best modern solution for practice English Public Speaking in a proper way.
**Main Research questions**
What is the best modern solution for practice English Public Speaking in a proper way?
**Individual research questions**
* How to analyze content of the speech and give proper feedback for the speaker?
* How to check grammar and richness of grammar use and give proper feedback for the speaker?
* How to check the user’s facial expressions according to the speech content and give proper feedback for the speaker?
* How to check the user’s eye contact continuity throughout the speech and give proper feedback for the speaker?
* How to check identify the user’s pause fillers, filler words, interjections, exclamations, etc?
**Individual Objectives**
* Analyze content of the speech and give proper feedback for the speaker.
* Check grammar and richness of grammar use and give proper feedback for the speaker
* Check the user’s facial expressions according to the speech content and give proper feedback for the speaker.
* Check the user’s eye contact continuity throughout the speech and give proper feedback for the speaker
* Check identify the user’s pause fillers, filler words, interjections, exclamations, etc.
>>>>>>> README.md
import string
from collections import Counter
# import matplotlib.pyplot as plt
# reading text file
text = open("read.txt", encoding="utf-8").read()
# converting to lowercase
lower_case = text.lower()
# Removing punctuations of the full text
cleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))
# splitting text into words
tokenized_words = cleaned_text.split()
stop_words = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself",
"yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself",
"they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do",
"does", "did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while",
"of", "at", "by", "for", "with", "about", "against", "between", "into", "through", "during", "before",
"after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again",
"further", "then", "once", "here", "there", "when", "where", "why", "how", "all", "any", "both", "each",
"few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than",
"too", "very", "s", "t", "can", "will", "just", "don", "should", "now"]
# Removing stop words from the tokenized words list
final_words = []
for word in tokenized_words:
if word not in stop_words:
final_words.append(word)
emotion_list = []
with open('emotions.txt', 'r') as file:
for line in file:
clear_line = line.replace("\n", '').replace(",", '').replace("'", '').strip()
word, emotion = clear_line.split(':')
if word in final_words:
emotion_list.append(emotion)
print(emotion_list)
w = Counter(emotion_list)
print(w)
import tensorflow as tf
from keras.preprocessing import image
import matplotlib.pyplot as plt
import cv2
import numpy as np
from collections import Counter
Savedmodel = tf.keras.models.load_model('./emotion_lts.h5')
Savedmodel.summary()
objects = ('Angry', 'Happy', 'Sad', 'Neutral')
vid = cv2.VideoCapture(0)
def emotion_analysis(emotions):
objects = ['Angry', 'Happy', 'Sad', 'Neutral']
y_pos = np.arange(len(objects))
plt.bar(y_pos, emotions, align='center', alpha=0.9)
plt.tick_params(axis='x', which='both', pad=10, width=4, length=10)
plt.xticks(y_pos, objects)
plt.ylabel('percentage')
plt.title('emotion')
videoDir = './speechVideo'
cap = cv2.VideoCapture('./speechVideo/speech.mp4')
while(cap.isOpened()):
ret,frame=cap.read()
# img = image.load_img(frame,grayscale=True, target_size=(48, 48))
frame = cv2.resize(frame,(48,48))
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
x = image.img_to_array(frame)
x = np.expand_dims(x, axis = 0)
x /= 255
custom = Savedmodel.predict(x)
#print(custom[0])
emotion_analysis(custom[0])
x = np.array(x, 'float32')
x = x.reshape([48, 48]);
m=0.000000000000000000001
a=custom[0]
for i in range(0,len(a)):
if a[i]>m:
m=a[i]
ind=i
print('Expression Prediction:',objects[ind])
if cv2.waitKey(20) & 0XFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment