Commit cc7f90b1 authored by NaweenTharuka's avatar NaweenTharuka

updated: prosodic features

parent d84ae894
# -*- coding: utf-8 -*-
"""emotionDetection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1o8opa083KfLohUfWs-oGfkdo9kuRN_eS
**Importing dependencies**
* pip install fer
* List item
"""
from fer import Video from fer import Video
from fer import FER from fer import FER
import os import os
...@@ -20,13 +5,9 @@ import sys ...@@ -20,13 +5,9 @@ import sys
import pandas as pd import pandas as pd
import numpy as np import numpy as np
"""Upload the video to the Google colab and get the path of the video and xml file"""
location_videofile = "F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\media\\video\\22\\testvideo.mp4" location_videofile = "F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\media\\video\\22\\testvideo.mp4"
cascPath = "F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\users\\models\\abc.xml" cascPath = "F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\users\\models\\abc.xml"
"""Get labels to each emotion"""
def _get_labels(): def _get_labels():
return { return {
0: "angry", 0: "angry",
...@@ -38,10 +19,7 @@ def _get_labels(): ...@@ -38,10 +19,7 @@ def _get_labels():
6: "neutral", 6: "neutral",
} }
"""Build a square (around the face)"""
def tosquare(bbox): def tosquare(bbox):
"""Convert bounding box to square by elongating shorter side."""
x, y, w, h = bbox x, y, w, h = bbox
if h > w: if h > w:
diff = h - w diff = h - w
...@@ -56,15 +34,11 @@ def tosquare(bbox): ...@@ -56,15 +34,11 @@ def tosquare(bbox):
return (x, y, w, h) return (x, y, w, h)
"""Builded square around the face"""
def __apply_offsets(face_coordinates): def __apply_offsets(face_coordinates):
x, y, width, height = face_coordinates x, y, width, height = face_coordinates
x_off, y_off = (10, 10) x_off, y_off = (10, 10)
return (x - x_off, x + width + x_off, y - y_off, y + height + y_off) return (x - x_off, x + width + x_off, y - y_off, y + height + y_off)
"""To correct numbers"""
def __preprocess_input(x, v2=False): def __preprocess_input(x, v2=False):
x = x.astype("float32") x = x.astype("float32")
x = x / 255.0 x = x / 255.0
...@@ -73,8 +47,6 @@ def __preprocess_input(x, v2=False): ...@@ -73,8 +47,6 @@ def __preprocess_input(x, v2=False):
x = x * 2.0 x = x * 2.0
return x return x
"""Add padding to the image"""
def pad(image): def pad(image):
PADDING = 40 PADDING = 40
row, col = image.shape[:2] row, col = image.shape[:2]
...@@ -106,7 +78,6 @@ def detect_emotions(location_videofile, NumberofFrames): ...@@ -106,7 +78,6 @@ def detect_emotions(location_videofile, NumberofFrames):
success,image = vidcap.read() success,image = vidcap.read()
frame_count = vidcap.get(cv2.CAP_PROP_FRAME_COUNT) frame_count = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
print("Frame Count: ", frame_count)
count = 0 count = 0
faceCascade = cv2.CascadeClassifier(cascPath) faceCascade = cv2.CascadeClassifier(cascPath)
...@@ -116,7 +87,7 @@ def detect_emotions(location_videofile, NumberofFrames): ...@@ -116,7 +87,7 @@ def detect_emotions(location_videofile, NumberofFrames):
if success: if success:
if frame_count > NumberofFrames+1: if frame_count > NumberofFrames+1:
count += frame_count/(NumberofFrames+1) # i.e. at 30 fps, this advances one second count += frame_count/(NumberofFrames+1)
else: else:
count += 1 count += 1
vidcap.set(cv2.CAP_PROP_POS_FRAMES, count) vidcap.set(cv2.CAP_PROP_POS_FRAMES, count)
...@@ -135,7 +106,6 @@ def detect_emotions(location_videofile, NumberofFrames): ...@@ -135,7 +106,6 @@ def detect_emotions(location_videofile, NumberofFrames):
face_coordinates = tosquare(face_coordinates) face_coordinates = tosquare(face_coordinates)
x1, x2, y1, y2 = __apply_offsets(face_coordinates) x1, x2, y1, y2 = __apply_offsets(face_coordinates)
# adjust for padding
x1 += PADDING x1 += PADDING
x2 += PADDING x2 += PADDING
y1 += PADDING y1 += PADDING
...@@ -154,10 +124,8 @@ def detect_emotions(location_videofile, NumberofFrames): ...@@ -154,10 +124,8 @@ def detect_emotions(location_videofile, NumberofFrames):
try: try:
gray_face = cv2.resize(gray_face, model.input_shape[1:3]) gray_face = cv2.resize(gray_face, model.input_shape[1:3])
except Exception as e: except Exception as e:
#print("Cannot resize")
continue continue
# Local Keras model
gray_face = __preprocess_input(gray_face, True) gray_face = __preprocess_input(gray_face, True)
gray_face = np.expand_dims(np.expand_dims(gray_face, 0), -1) gray_face = np.expand_dims(np.expand_dims(gray_face, 0), -1)
...@@ -170,9 +138,6 @@ def detect_emotions(location_videofile, NumberofFrames): ...@@ -170,9 +138,6 @@ def detect_emotions(location_videofile, NumberofFrames):
emotions.append( emotions.append(
dict(box=face_coordinates, emotions=labelled_emotions) dict(box=face_coordinates, emotions=labelled_emotions)
) )
#print("Prediction : ", emotions[0]["emotions"])
#plt.imshow(gray_img, interpolation='nearest')
#plt.show()
top_emotions = [max(e["emotions"], key=lambda key: e["emotions"][key]) for e in emotions] top_emotions = [max(e["emotions"], key=lambda key: e["emotions"][key]) for e in emotions]
if len(top_emotions): if len(top_emotions):
for top_emotion in emotions[0]["emotions"]: for top_emotion in emotions[0]["emotions"]:
...@@ -190,4 +155,3 @@ def detect_emotions(location_videofile, NumberofFrames): ...@@ -190,4 +155,3 @@ def detect_emotions(location_videofile, NumberofFrames):
return max(arry, key=arry.get), arry return max(arry, key=arry.get), arry
emo, arr1 = detect_emotions(location_videofile, 300) emo, arr1 = detect_emotions(location_videofile, 300)
# print(emo, arr1)
\ No newline at end of file
This diff is collapsed.
import myprosody as mysp
import pickle
p="happy"
c="F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\users\\myprosody"
mysp.myspsyl(p,c)
mysp.mysppaus(p,c)
mysp.myspsr(p,c)
mysp.myspatc(p,c)
mysp.myspst(p,c)
mysp.myspod(p,c)
mysp.myspbala(p,c)
mysp.myspf0mean(p,c)
mysp.myspf0sd(p,c)
mysp.myspf0med(p,c)
mysp.myspf0min(p,c)
mysp.myspf0max(p,c)
mysp.myspf0q25(p,c)
mysp.myspf0q75(p,c)
mysp.mysptotal(p,c)
mysp.myspgend(p,c)
mysp.mysppron(p,c)
mysp.myprosody(p,c)
mysp.mysplev(p,c)
...@@ -7,8 +7,7 @@ ...@@ -7,8 +7,7 @@
<link rel="stylesheet" href="{% static 'users/css/main.css' %}" type="text/css"> <link rel="stylesheet" href="{% static 'users/css/main.css' %}" type="text/css">
<link rel="stylesheet" href="{% static 'users/css/plugin.css' %}" type="text/css"> <link rel="stylesheet" href="{% static 'users/css/plugin.css' %}" type="text/css">
<link rel="stylesheet" href="{% static 'users/css/style.css' %}" type="text/css"> <link rel="stylesheet" href="{% static 'users/css/style.css' %}" type="text/css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.0-beta3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-eOJMYsd53ii+scO/bJGFsiCZc+5NDVN2yr8+0RDqr0Ql0h+rP48ckxlpbzKgwra6" crossorigin="anonymous"> <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.0-beta3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-eOJMYsd53ii+scO/bJGFsiCZc+5NDVN2yr8+0RDqr0Ql0h+rP48ckxlpbzKgwra6" crossorigin="anonymous">
<link rel="preconnect" href="https://fonts.gstatic.com"> <link rel="preconnect" href="https://fonts.gstatic.com">
<link href="https://fonts.googleapis.com/css2?family=Montserrat:wght@500&family=Open+Sans:wght@800&display=swap" rel="stylesheet"> <link href="https://fonts.googleapis.com/css2?family=Montserrat:wght@500&family=Open+Sans:wght@800&display=swap" rel="stylesheet">
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
{% block content %} {% block content %}
<h2> {{ var1 }} {{ var2 }}</h2> <h2> {{ var1 }} {{ var2 }}</h2>
<!-- {% for key, value in predictions.items %} <!-- {% for key, value in var2.items %}
<h2>{{ forloop.counter }}. {{ key }} : {{ value }}</h2> <h2>{{ forloop.counter }}. {{ key }} : {{ value }}</h2>
{% endfor %} --> {% endfor %} -->
......
...@@ -10,11 +10,16 @@ from .models import Video ...@@ -10,11 +10,16 @@ from .models import Video
from django.template import Template, Context from django.template import Template, Context
import datetime import datetime
from . import emotiondetectionvideo from . import emotiondetectionvideo
from . import emotiondetectionaudio # from . import emotiondetectionaudio
import myprosody as mysp
import pickle
from . import myprosody
def test(request): def test(request):
var1= emotiondetectionaudio.emo_list p="happy"
var2= emotiondetectionaudio.total_predictions_np c="F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\users\\myprosody"
var2= myprosody.myspgend(p,c)
var1= myprosody.myprosody(p,c)
context= { context= {
'var1': var1, 'var1': var1,
'var2': var2 'var2': var2
...@@ -60,7 +65,46 @@ def emotionvideo(request): ...@@ -60,7 +65,46 @@ def emotionvideo(request):
return render(request, 'users/emotionvideo.html',context) return render(request, 'users/emotionvideo.html',context)
def emotionaudioprosody(request): def emotionaudioprosody(request):
return render(request, 'users/prosody.html') p="happy"
c="F:\\CDAP-PRESENTLY\\21_22-j-02\\Presently\\presently\\users\\myprosody"
myspsyl= myprosody.myspsyl(p,c)
mysppaus= myprosody.mysppaus(p,c)
myspsr= myprosody.myspsr(p,c)
myspatc= myprosody.myspatc(p,c)
myspst= myprosody.myspst(p,c)
myspod= myprosody.myspod(p,c)
myspbala= myprosody.myspbala(p,c)
myspf0mean= myprosody.myspf0mean(p,c)
myspf0sd= myprosody.myspf0sd(p,c)
myspf0med= myprosody.myspf0med(p,c)
myspf0min= myprosody.myspf0min(p,c)
myspf0max= myprosody.myspf0max(p,c)
myspf0q25= myprosody.myspf0q25(p,c)
myspf0q75= myprosody.myspf0q75(p,c)
myspgend= myprosody.myspgend(p,c)
mysppron= myprosody.mysppron(p,c)
prosody= myprosody.myprosody(p,c)
context= {
'myspsyl': myspsyl,
'mysppaus': mysppaus,
'myspsr': myspsr,
'myspatc': myspatc,
'myspst': myspst,
'myspod': myspod,
'myspbala': myspbala,
'myspf0mean': myspf0mean,
'myspf0sd': myspf0sd,
'myspf0med': myspf0med,
'myspf0min': myspf0min,
'myspf0max': myspf0max,
'myspf0q25': myspf0q25,
'myspf0q75': myspf0q75,
'myspgend': myspgend,
'mysppron': mysppron,
'prosody': prosody
}
return render(request, 'users/prosody.html',context)
def overallfeedback(request): def overallfeedback(request):
return render(request, 'users/overallfeedback.html') return render(request, 'users/overallfeedback.html')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment