Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
2
2023-029
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
2023-029
2023-029
Commits
64b9fa32
Commit
64b9fa32
authored
Sep 05, 2023
by
janithgamage1.ed
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into feature/UI-API-Connect
parents
d2f07a4e
cfe3d3bb
Changes
9
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
34652 additions
and
1080 deletions
+34652
-1080
Project/Backend/ML_Models/Emotion_Detection_Model/emotion_model.json
...kend/ML_Models/Emotion_Detection_Model/emotion_model.json
+1
-0
Project/Backend/ML_Models/Emotion_Detection_Model/haarcascade_frontalface_default.xml
...otion_Detection_Model/haarcascade_frontalface_default.xml
+33271
-0
Project/Backend/Server_Node/yarn.lock
Project/Backend/Server_Node/yarn.lock
+1073
-1078
Project/Backend/Server_Python/controllers/audio_detect_controler.py
...ckend/Server_Python/controllers/audio_detect_controler.py
+44
-0
Project/Backend/Server_Python/controllers/video_detect_controler.py
...ckend/Server_Python/controllers/video_detect_controler.py
+61
-0
Project/Backend/Server_Python/main.py
Project/Backend/Server_Python/main.py
+5
-1
Project/Backend/Server_Python/requirements.txt
Project/Backend/Server_Python/requirements.txt
+2
-1
Project/Backend/Server_Python/services/audio_detect_service.py
...ct/Backend/Server_Python/services/audio_detect_service.py
+80
-0
Project/Backend/Server_Python/services/video_detection_service.py
...Backend/Server_Python/services/video_detection_service.py
+115
-0
No files found.
Project/Backend/ML_Models/Emotion_Detection_Model/emotion_model.json
0 → 100644
View file @
64b9fa32
{
"class_name"
:
"Sequential"
,
"config"
:
{
"name"
:
"sequential"
,
"layers"
:
[{
"class_name"
:
"InputLayer"
,
"config"
:
{
"batch_input_shape"
:
[
null
,
48
,
48
,
1
],
"dtype"
:
"float32"
,
"sparse"
:
false
,
"ragged"
:
false
,
"name"
:
"conv2d_input"
}},
{
"class_name"
:
"Conv2D"
,
"config"
:
{
"name"
:
"conv2d"
,
"trainable"
:
true
,
"batch_input_shape"
:
[
null
,
48
,
48
,
1
],
"dtype"
:
"float32"
,
"filters"
:
32
,
"kernel_size"
:
[
3
,
3
],
"strides"
:
[
1
,
1
],
"padding"
:
"valid"
,
"data_format"
:
"channels_last"
,
"dilation_rate"
:
[
1
,
1
],
"groups"
:
1
,
"activation"
:
"relu"
,
"use_bias"
:
true
,
"kernel_initializer"
:
{
"class_name"
:
"GlorotUniform"
,
"config"
:
{
"seed"
:
null
}},
"bias_initializer"
:
{
"class_name"
:
"Zeros"
,
"config"
:
{}},
"kernel_regularizer"
:
null
,
"bias_regularizer"
:
null
,
"activity_regularizer"
:
null
,
"kernel_constraint"
:
null
,
"bias_constraint"
:
null
}},
{
"class_name"
:
"Conv2D"
,
"config"
:
{
"name"
:
"conv2d_1"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"filters"
:
64
,
"kernel_size"
:
[
3
,
3
],
"strides"
:
[
1
,
1
],
"padding"
:
"valid"
,
"data_format"
:
"channels_last"
,
"dilation_rate"
:
[
1
,
1
],
"groups"
:
1
,
"activation"
:
"relu"
,
"use_bias"
:
true
,
"kernel_initializer"
:
{
"class_name"
:
"GlorotUniform"
,
"config"
:
{
"seed"
:
null
}},
"bias_initializer"
:
{
"class_name"
:
"Zeros"
,
"config"
:
{}},
"kernel_regularizer"
:
null
,
"bias_regularizer"
:
null
,
"activity_regularizer"
:
null
,
"kernel_constraint"
:
null
,
"bias_constraint"
:
null
}},
{
"class_name"
:
"MaxPooling2D"
,
"config"
:
{
"name"
:
"max_pooling2d"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"pool_size"
:
[
2
,
2
],
"padding"
:
"valid"
,
"strides"
:
[
2
,
2
],
"data_format"
:
"channels_last"
}},
{
"class_name"
:
"Dropout"
,
"config"
:
{
"name"
:
"dropout"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"rate"
:
0.25
,
"noise_shape"
:
null
,
"seed"
:
null
}},
{
"class_name"
:
"Conv2D"
,
"config"
:
{
"name"
:
"conv2d_2"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"filters"
:
128
,
"kernel_size"
:
[
3
,
3
],
"strides"
:
[
1
,
1
],
"padding"
:
"valid"
,
"data_format"
:
"channels_last"
,
"dilation_rate"
:
[
1
,
1
],
"groups"
:
1
,
"activation"
:
"relu"
,
"use_bias"
:
true
,
"kernel_initializer"
:
{
"class_name"
:
"GlorotUniform"
,
"config"
:
{
"seed"
:
null
}},
"bias_initializer"
:
{
"class_name"
:
"Zeros"
,
"config"
:
{}},
"kernel_regularizer"
:
null
,
"bias_regularizer"
:
null
,
"activity_regularizer"
:
null
,
"kernel_constraint"
:
null
,
"bias_constraint"
:
null
}},
{
"class_name"
:
"MaxPooling2D"
,
"config"
:
{
"name"
:
"max_pooling2d_1"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"pool_size"
:
[
2
,
2
],
"padding"
:
"valid"
,
"strides"
:
[
2
,
2
],
"data_format"
:
"channels_last"
}},
{
"class_name"
:
"Conv2D"
,
"config"
:
{
"name"
:
"conv2d_3"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"filters"
:
128
,
"kernel_size"
:
[
3
,
3
],
"strides"
:
[
1
,
1
],
"padding"
:
"valid"
,
"data_format"
:
"channels_last"
,
"dilation_rate"
:
[
1
,
1
],
"groups"
:
1
,
"activation"
:
"relu"
,
"use_bias"
:
true
,
"kernel_initializer"
:
{
"class_name"
:
"GlorotUniform"
,
"config"
:
{
"seed"
:
null
}},
"bias_initializer"
:
{
"class_name"
:
"Zeros"
,
"config"
:
{}},
"kernel_regularizer"
:
null
,
"bias_regularizer"
:
null
,
"activity_regularizer"
:
null
,
"kernel_constraint"
:
null
,
"bias_constraint"
:
null
}},
{
"class_name"
:
"MaxPooling2D"
,
"config"
:
{
"name"
:
"max_pooling2d_2"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"pool_size"
:
[
2
,
2
],
"padding"
:
"valid"
,
"strides"
:
[
2
,
2
],
"data_format"
:
"channels_last"
}},
{
"class_name"
:
"Dropout"
,
"config"
:
{
"name"
:
"dropout_1"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"rate"
:
0.25
,
"noise_shape"
:
null
,
"seed"
:
null
}},
{
"class_name"
:
"Flatten"
,
"config"
:
{
"name"
:
"flatten"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"data_format"
:
"channels_last"
}},
{
"class_name"
:
"Dense"
,
"config"
:
{
"name"
:
"dense"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"units"
:
1024
,
"activation"
:
"relu"
,
"use_bias"
:
true
,
"kernel_initializer"
:
{
"class_name"
:
"GlorotUniform"
,
"config"
:
{
"seed"
:
null
}},
"bias_initializer"
:
{
"class_name"
:
"Zeros"
,
"config"
:
{}},
"kernel_regularizer"
:
null
,
"bias_regularizer"
:
null
,
"activity_regularizer"
:
null
,
"kernel_constraint"
:
null
,
"bias_constraint"
:
null
}},
{
"class_name"
:
"Dropout"
,
"config"
:
{
"name"
:
"dropout_2"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"rate"
:
0.5
,
"noise_shape"
:
null
,
"seed"
:
null
}},
{
"class_name"
:
"Dense"
,
"config"
:
{
"name"
:
"dense_1"
,
"trainable"
:
true
,
"dtype"
:
"float32"
,
"units"
:
7
,
"activation"
:
"softmax"
,
"use_bias"
:
true
,
"kernel_initializer"
:
{
"class_name"
:
"GlorotUniform"
,
"config"
:
{
"seed"
:
null
}},
"bias_initializer"
:
{
"class_name"
:
"Zeros"
,
"config"
:
{}},
"kernel_regularizer"
:
null
,
"bias_regularizer"
:
null
,
"activity_regularizer"
:
null
,
"kernel_constraint"
:
null
,
"bias_constraint"
:
null
}}]},
"keras_version"
:
"2.4.0"
,
"backend"
:
"tensorflow"
}
\ No newline at end of file
Project/Backend/ML_Models/Emotion_Detection_Model/haarcascade_frontalface_default.xml
0 → 100644
View file @
64b9fa32
This diff is collapsed.
Click to expand it.
Project/Backend/Server_Node/yarn.lock
View file @
64b9fa32
This diff is collapsed.
Click to expand it.
Project/Backend/Server_Python/controllers/audio_detect_controler.py
0 → 100644
View file @
64b9fa32
from
fastapi
import
APIRouter
,
FastAPI
,
UploadFile
,
File
,
HTTPException
from
fastapi.responses
import
FileResponse
import
os
from
core.logger
import
setup_logger
from
services.audio_detect_service
import
EmotionPredictionService
import
tensorflow
as
tf
app
=
FastAPI
()
router
=
APIRouter
()
audio
:
UploadFile
logger
=
setup_logger
()
model
=
tf
.
keras
.
models
.
load_model
(
'../ML_Models/Emotion_Detection_Model/mymodel.h5'
)
prediction_service
=
EmotionPredictionService
(
model
)
@
router
.
post
(
"/upload_emotion/audio"
,
tags
=
[
"Emotion Detection"
])
async
def
upload_audio
(
audio
:
UploadFile
=
File
(
...
)):
try
:
file_location
=
f
"files/emotion/audio/{audio.filename}"
with
open
(
file_location
,
"wb"
)
as
file
:
file
.
write
(
audio
.
file
.
read
())
return
{
"text"
:
"OK"
}
except
Exception
as
e
:
logger
.
info
(
f
"Failed to upload file. {e}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
"Failed to upload the audio"
)
@
router
.
post
(
'/predict_emotion/audio'
,
tags
=
[
"Emotion Detection"
])
def
predict_using_audio
(
audio_request
:
UploadFile
=
File
(
...
)):
try
:
return
prediction_service
.
predict_emotion_detection_audio_new
(
audio_request
)
except
Exception
as
e
:
logger
.
info
(
f
"Error. {e}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
"Request Failed."
)
Project/Backend/Server_Python/controllers/video_detect_controler.py
0 → 100644
View file @
64b9fa32
from
fastapi
import
APIRouter
,
FastAPI
,
UploadFile
,
File
,
HTTPException
from
fastapi.responses
import
FileResponse
from
keras.models
import
model_from_json
import
os
from
core.logger
import
setup_logger
from
services.video_detection_service
import
EmotionPredictionService
import
tensorflow
as
tf
import
os
# Get the absolute path to the 'model' directory
model_directory
=
os
.
path
.
abspath
(
'model'
)
# Construct the absolute path to 'emotion_model.json'
json_file_path
=
os
.
path
.
join
(
model_directory
,
'emotion_model.json'
)
# Open the JSON file
# json_file = open(json_file_path, 'r')
app
=
FastAPI
()
router
=
APIRouter
()
video
:
UploadFile
logger
=
setup_logger
()
# Load emotion detection model
json_file
=
open
(
'../ML_Models/Emotion_Detection_Model/emotion_model.json'
,
'r'
)
loaded_model_json
=
json_file
.
read
()
json_file
.
close
()
emotion_model
=
model_from_json
(
loaded_model_json
)
emotion_model
.
load_weights
(
"../ML_Models/Emotion_Detection_Model/emotion_model.h5"
)
prediction_service
=
EmotionPredictionService
(
emotion_model
)
@
router
.
post
(
"/upload_emotion/video"
,
tags
=
[
"Emotion Detection"
])
async
def
upload_video
(
video
:
UploadFile
=
File
(
...
)):
try
:
file_location
=
f
"files/emotion/video/{video.filename}"
with
open
(
file_location
,
"wb"
)
as
file
:
file
.
write
(
video
.
file
.
read
())
return
{
"text"
:
"OK2"
}
except
Exception
as
e
:
logger
.
info
(
f
"Failed to upload file. {e}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
"Failed to upload the video"
)
@
router
.
post
(
'/predict_emotion/video'
,
tags
=
[
"Emotion Detection"
])
def
predict_using_video
(
video_request
:
UploadFile
=
File
(
...
)):
try
:
return
prediction_service
.
predict_emotion_detection_video_new
(
video_request
=
video_request
)
return
{
"text"
:
"OK5"
}
except
Exception
as
e
:
logger
.
info
(
f
"Error. {e}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
"Request Failed."
)
Project/Backend/Server_Python/main.py
View file @
64b9fa32
...
@@ -3,6 +3,8 @@ from controllers import (
...
@@ -3,6 +3,8 @@ from controllers import (
translate_controler
,
translate_controler
,
users_controller
,
users_controller
,
video_to_sign_language_controller
,
video_to_sign_language_controller
,
audio_detect_controler
,
video_detect_controler
)
)
from
fastapi.responses
import
RedirectResponse
from
fastapi.responses
import
RedirectResponse
from
fastapi.middleware.cors
import
CORSMiddleware
from
fastapi.middleware.cors
import
CORSMiddleware
...
@@ -42,6 +44,8 @@ logger = setup_logger()
...
@@ -42,6 +44,8 @@ logger = setup_logger()
app
.
include_router
(
users_controller
.
router
)
app
.
include_router
(
users_controller
.
router
)
app
.
include_router
(
translate_controler
.
router
)
app
.
include_router
(
translate_controler
.
router
)
app
.
include_router
(
video_to_sign_language_controller
.
router
)
app
.
include_router
(
video_to_sign_language_controller
.
router
)
app
.
include_router
(
audio_detect_controler
.
router
)
app
.
include_router
(
video_detect_controler
.
router
)
# Add cores middleware
# Add cores middleware
...
...
Project/Backend/Server_Python/requirements.txt
View file @
64b9fa32
...
@@ -8,3 +8,4 @@ SpeechRecognition==3.10.0
...
@@ -8,3 +8,4 @@ SpeechRecognition==3.10.0
tk==0.1.0
tk==0.1.0
requests==2.31.0
requests==2.31.0
pymongo==4.5.0
pymongo==4.5.0
librosa==0.10.1
\ No newline at end of file
Project/Backend/Server_Python/services/audio_detect_service.py
0 → 100644
View file @
64b9fa32
# from fastapi.types import ModelNameMap
# from sklearn import model_selection
# import tensorflow as tf
import
numpy
as
np
import
librosa
from
fastapi
import
HTTPException
,
UploadFile
from
typing
import
Dict
import
os
from
core.logger
import
setup_logger
logger
=
setup_logger
()
class
EmotionPredictionService
:
def
__init__
(
self
,
model
):
self
.
model
=
model
def
predict_emotion_detection_audio
(
self
,
audio_request
:
UploadFile
)
->
Dict
[
str
,
str
]:
try
:
# Create a temporary file to save the audio
audio_location
=
f
"files/emotion/audio/{audio_request.filename}"
with
open
(
audio_location
,
"wb"
)
as
file
:
file
.
write
(
audio_request
.
file
.
read
())
# Load the audio data from the saved file
y
,
sr
=
librosa
.
load
(
audio_location
)
mfccs
=
np
.
mean
(
librosa
.
feature
.
mfcc
(
y
=
y
,
sr
=
sr
,
n_mfcc
=
40
)
.
T
,
axis
=
0
)
test_point
=
np
.
reshape
(
mfccs
,
newshape
=
(
1
,
40
,
1
))
predictions
=
self
.
model
.
predict
(
test_point
)
emotions
=
{
1
:
'neutral'
,
2
:
'calm'
,
3
:
'happy'
,
4
:
'sad'
,
5
:
'angry'
,
6
:
'fearful'
,
7
:
'disgust'
,
8
:
'surprised'
}
predicted_emotion
=
emotions
[
np
.
argmax
(
predictions
[
0
])
+
1
]
return
{
"predicted_emotion"
:
predicted_emotion
}
except
Exception
as
e
:
logger
.
error
(
f
"Failed to make predictions. {str(e)}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"Failed to make predictions. Error: {str(e)}"
)
def
predict_emotion_detection_audio_new
(
self
,
audio_request
:
UploadFile
)
->
Dict
[
str
,
str
]:
try
:
# Create a temporary file to save the audio
audio_location
=
f
"files/emotion/audio/{audio_request.filename}"
with
open
(
audio_location
,
"wb"
)
as
file
:
file
.
write
(
audio_request
.
file
.
read
())
# Load the audio data from the saved file
y
,
sr
=
librosa
.
load
(
audio_location
)
mfccs
=
np
.
mean
(
librosa
.
feature
.
mfcc
(
y
=
y
,
sr
=
sr
,
n_mfcc
=
40
)
.
T
,
axis
=
0
)
test_point
=
np
.
reshape
(
mfccs
,
newshape
=
(
1
,
40
,
1
))
predictions
=
self
.
model
.
predict
(
test_point
)
emotions
=
{
1
:
'neutral'
,
2
:
'calm'
,
3
:
'happy'
,
4
:
'sad'
,
5
:
'angry'
,
6
:
'fearful'
,
7
:
'disgust'
,
8
:
'surprised'
}
predicted_emotion
=
emotions
[
np
.
argmax
(
predictions
[
0
])
+
1
]
return
{
"predicted_emotion"
:
predicted_emotion
}
except
Exception
as
e
:
logger
.
error
(
f
"Failed to make predictions. {str(e)}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"Failed to make predictions. Error: {str(e)}"
)
Project/Backend/Server_Python/services/video_detection_service.py
0 → 100644
View file @
64b9fa32
from
fastapi
import
FastAPI
,
UploadFile
,
HTTPException
from
typing
import
Dict
import
cv2
import
numpy
as
np
from
keras.models
import
model_from_json
import
os
app
=
FastAPI
()
from
core.logger
import
setup_logger
logger
=
setup_logger
()
# Define the emotion labels
emotion_dict
=
{
0
:
"Angry"
,
1
:
"Disgusted"
,
2
:
"Fearful"
,
3
:
"Happy"
,
4
:
"Neutral"
,
5
:
"Sad"
,
6
:
"Surprised"
}
# Load the emotion detection model
json_file
=
open
(
'../ML_Models/Emotion_Detection_Model/emotion_model.json'
,
'r'
)
loaded_model_json
=
json_file
.
read
()
json_file
.
close
()
emotion_model
=
model_from_json
(
loaded_model_json
)
emotion_model
.
load_weights
(
"../ML_Models/Emotion_Detection_Model/emotion_model.h5"
)
class
EmotionPredictionService
:
def
__init__
(
self
,
model
):
self
.
model
=
model
def
predict_emotion_detection_video
(
video_request
:
UploadFile
)
->
Dict
[
str
,
str
]:
try
:
# Create a temporary file to save the video
video_location
=
f
"files/emotion/video/{video_request.filename}"
with
open
(
video_location
,
"wb"
)
as
file
:
file
.
write
(
video_request
.
file
.
read
())
# Initialize video capture
cap
=
cv2
.
VideoCapture
(
video_location
)
if
not
cap
.
isOpened
():
raise
HTTPException
(
status_code
=
400
,
detail
=
"Failed to open video file."
)
predicted_emotions
=
[]
while
True
:
ret
,
frame
=
cap
.
read
()
if
not
ret
:
break
emotions
=
predict_emotion_from_frame
(
frame
)
predicted_emotions
.
extend
(
emotions
)
cap
.
release
()
os
.
remove
(
video_location
)
return
{
"predicted_emotions"
:
predicted_emotions
}
except
Exception
as
e
:
logger
.
error
(
f
"Failed to make predictions. {str(e)}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"Failed to make predictions. Error: {str(e)}"
)
def
predict_emotion_detection_video_new
(
self
,
video_request
:
UploadFile
)
->
Dict
[
str
,
str
]:
try
:
# Create a temporary file to save the video
video_location
=
f
"files/emotion/video/{video_request.filename}"
with
open
(
video_location
,
"wb"
)
as
file
:
file
.
write
(
video_request
.
file
.
read
())
# Initialize video capture
cap
=
cv2
.
VideoCapture
(
video_location
)
if
not
cap
.
isOpened
():
raise
HTTPException
(
status_code
=
400
,
detail
=
"Failed to open video file."
)
predicted_emotions
=
[]
while
True
:
ret
,
frame
=
cap
.
read
()
if
not
ret
:
break
emotions
=
predict_emotion_from_frame
(
frame
)
predicted_emotions
.
extend
(
emotions
)
cap
.
release
()
os
.
remove
(
video_location
)
return
{
"predicted_emotions"
:
predicted_emotions
}
except
Exception
as
e
:
logger
.
error
(
f
"Failed to make predictions. {str(e)}"
)
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"Failed to make predictions. Error: {str(e)}"
)
# Function to predict emotion from a video frame
def
predict_emotion_from_frame
(
frame
):
gray_frame
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2GRAY
)
face_detector
=
cv2
.
CascadeClassifier
(
'../ML_Models/Emotion_Detection_Model/haarcascade_frontalface_default.xml'
)
num_faces
=
face_detector
.
detectMultiScale
(
gray_frame
,
scaleFactor
=
1.3
,
minNeighbors
=
5
)
emotions
=
[]
for
(
x
,
y
,
w
,
h
)
in
num_faces
:
roi_gray_frame
=
gray_frame
[
y
:
y
+
h
,
x
:
x
+
w
]
cropped_img
=
np
.
expand_dims
(
np
.
expand_dims
(
cv2
.
resize
(
roi_gray_frame
,
(
48
,
48
)),
-
1
),
0
)
emotion_prediction
=
emotion_model
.
predict
(
cropped_img
)
maxindex
=
int
(
np
.
argmax
(
emotion_prediction
))
emotions
.
append
(
emotion_dict
[
maxindex
])
return
emotions
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment