Commit 174e7029 authored by janithgamage1.ed's avatar janithgamage1.ed

Merge branch 'IT20251000' of http://gitlab.sliit.lk/tmp-23-029/2023-029 into IT20251000

parents 3df10e0a e844e5d9
{"class_name": "Sequential", "config": {"name": "sequential", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "conv2d_input"}}, {"class_name": "Conv2D", "config": {"name": "conv2d", "trainable": true, "batch_input_shape": [null, 48, 48, 1], "dtype": "float32", "filters": 32, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "dtype": "float32", "filters": 64, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "dtype": "float32", "filters": 128, "kernel_size": [3, 3], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "groups": 1, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_2", "trainable": true, "dtype": "float32", "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "dtype": "float32", "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Flatten", "config": {"name": "flatten", "trainable": true, "dtype": "float32", "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 1024, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "dtype": "float32", "rate": 0.5, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 7, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.4.0", "backend": "tensorflow"}
\ No newline at end of file
......@@ -830,6 +830,15 @@
"integrity" "sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g=="
"resolved" "https://registry.npmjs.org/react/-/react-16.14.0.tgz"
"version" "16.14.0"
dependencies:
"loose-envify" "^1.1.0"
"object-assign" "^4.1.1"
"prop-types" "^15.6.2"
"readable-stream@^2.2.2":
"integrity" "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA=="
"resolved" "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
"version" "2.3.8"
dependencies:
"core-util-is" "~1.0.0"
"inherits" "~2.0.3"
......
......@@ -107,6 +107,7 @@ target/
# IPython
profile_default/
ipython_config.py
app.log
# pyenv
# For a library or package, you might want to ignore these files since the code is
......
from fastapi import APIRouter,FastAPI, UploadFile ,File,HTTPException
from fastapi.responses import FileResponse
import os
from core.logger import setup_logger
from services.audio_detect_service import EmotionPredictionService
import tensorflow as tf
app = FastAPI()
router = APIRouter()
audio: UploadFile
logger = setup_logger()
model = tf.keras.models.load_model('../ML_Models/Emotion_Detection_Model/mymodel.h5')
prediction_service = EmotionPredictionService(model)
@router.post("/upload_emotion/audio", tags=["Emotion Detection"])
async def upload_audio(audio: UploadFile = File(...)):
try:
file_location = f"files/emotion/audio/{audio.filename}"
with open(file_location, "wb") as file:
file.write(audio.file.read())
return {"text": "OK"}
except Exception as e:
logger.info(f"Failed to upload file. {e}")
raise HTTPException(
status_code=500,
detail="Failed to upload the audio"
)
@router.post('/predict_emotion/audio', tags=["Emotion Detection"])
def predict_using_audio(audio_request: UploadFile = File(...)):
try:
return prediction_service.predict_emotion_detection_audio_new(audio_request)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
status_code=500,
detail="Request Failed."
)
from fastapi import APIRouter,FastAPI, UploadFile ,File,HTTPException
from fastapi.responses import FileResponse
from keras.models import model_from_json
import os
from core.logger import setup_logger
from services.video_detection_service import EmotionPredictionService
import tensorflow as tf
import os
# Get the absolute path to the 'model' directory
model_directory = os.path.abspath('model')
# Construct the absolute path to 'emotion_model.json'
json_file_path = os.path.join(model_directory, 'emotion_model.json')
# Open the JSON file
# json_file = open(json_file_path, 'r')
app = FastAPI()
router = APIRouter()
video: UploadFile
logger = setup_logger()
# Load emotion detection model
json_file = open('../ML_Models/Emotion_Detection_Model/emotion_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
emotion_model = model_from_json(loaded_model_json)
emotion_model.load_weights("../ML_Models/Emotion_Detection_Model/emotion_model.h5")
prediction_service = EmotionPredictionService(emotion_model)
@router.post("/upload_emotion/video", tags=["Emotion Detection"])
async def upload_video(video: UploadFile = File(...)):
try:
file_location = f"files/emotion/video/{video.filename}"
with open(file_location, "wb") as file:
file.write(video.file.read())
return {"text": "OK2"}
except Exception as e:
logger.info(f"Failed to upload file. {e}")
raise HTTPException(
status_code=500,
detail="Failed to upload the video"
)
@router.post('/predict_emotion/video', tags=["Emotion Detection"])
def predict_using_video(video_request: UploadFile = File(...)):
try:
return prediction_service.predict_emotion_detection_video_new(video_request= video_request)
return {"text": "OK5"}
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
status_code=500,
detail="Request Failed."
)
......@@ -84,7 +84,7 @@ async def uploaded_video(file: UploadFile = File(...)):
return JSONResponse(content={"error": str(e)}, status_code=500)
unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4}
unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4,"ගුඩ්":5, "මෝනිං":6, "උඹ":7, "ආවේ":8, "ඇයි":9}
def translate_text(text, target_language):
......
......@@ -3,6 +3,8 @@ from controllers import (
translate_controler,
users_controller,
video_to_sign_language_controller,
audio_detect_controler,
video_detect_controler
)
from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware
......@@ -42,6 +44,8 @@ logger = setup_logger()
app.include_router(users_controller.router)
app.include_router(translate_controler.router)
app.include_router(video_to_sign_language_controller.router)
app.include_router(audio_detect_controler.router)
app.include_router(video_detect_controler.router)
# Add cores middleware
......@@ -57,7 +61,8 @@ origins = [
"http://localhost:51373",
"http://localhost:51489",
"https://v6p9d9t4.ssl.hwcdn.net",
"https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app"
# "https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app",
"https://64f7cfd336356b18eb42de2b--lambent-unicorn-97396a.netlify.app"
]
app.add_middleware(
......
......@@ -8,3 +8,4 @@ SpeechRecognition==3.10.0
tk==0.1.0
requests==2.31.0
pymongo==4.5.0
librosa==0.10.1
\ No newline at end of file
# from fastapi.types import ModelNameMap
# from sklearn import model_selection
# import tensorflow as tf
import numpy as np
import librosa
from fastapi import HTTPException, UploadFile
from typing import Dict
import os
from core.logger import setup_logger
logger = setup_logger()
class EmotionPredictionService:
def __init__(self, model):
self.model = model
def predict_emotion_detection_audio(self,audio_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the audio
audio_location = f"files/emotion/audio/{audio_request.filename}"
with open(audio_location, "wb") as file:
file.write(audio_request.file.read())
# Load the audio data from the saved file
y, sr = librosa.load(audio_location)
mfccs = np.mean(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40).T, axis=0)
test_point = np.reshape(mfccs, newshape=(1, 40, 1))
predictions = self.model.predict(test_point)
emotions = {
1: 'neutral', 2: 'calm', 3: 'happy', 4: 'sad',
5: 'angry', 6: 'fearful', 7: 'disgust', 8: 'surprised'
}
predicted_emotion = emotions[np.argmax(predictions[0]) + 1]
return {"predicted_emotion": predicted_emotion}
except Exception as e:
logger.error(f"Failed to make predictions. {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Failed to make predictions. Error: {str(e)}"
)
def predict_emotion_detection_audio_new(self, audio_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the audio
audio_location = f"files/emotion/audio/{audio_request.filename}"
with open(audio_location, "wb") as file:
file.write(audio_request.file.read())
# Load the audio data from the saved file
y, sr = librosa.load(audio_location)
mfccs = np.mean(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40).T, axis=0)
test_point = np.reshape(mfccs, newshape=(1, 40, 1))
predictions = self.model.predict(test_point)
emotions = {
1: 'neutral', 2: 'calm', 3: 'happy', 4: 'sad',
5: 'angry', 6: 'fearful', 7: 'disgust', 8: 'surprised'
}
predicted_emotion = emotions[np.argmax(predictions[0]) + 1]
return {"predicted_emotion": predicted_emotion}
except Exception as e:
logger.error(f"Failed to make predictions. {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Failed to make predictions. Error: {str(e)}"
)
from fastapi import FastAPI, UploadFile, HTTPException
from typing import Dict
import cv2
import numpy as np
from keras.models import model_from_json
import os
app = FastAPI()
from core.logger import setup_logger
logger = setup_logger()
# Define the emotion labels
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# Load the emotion detection model
json_file = open('../ML_Models/Emotion_Detection_Model/emotion_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
emotion_model = model_from_json(loaded_model_json)
emotion_model.load_weights("../ML_Models/Emotion_Detection_Model/emotion_model.h5")
class EmotionPredictionService:
def __init__(self, model):
self.model = model
def predict_emotion_detection_video(video_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/emotion/video/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Initialize video capture
cap = cv2.VideoCapture(video_location)
if not cap.isOpened():
raise HTTPException(
status_code=400,
detail="Failed to open video file."
)
predicted_emotions = []
while True:
ret, frame = cap.read()
if not ret:
break
emotions = predict_emotion_from_frame(frame)
predicted_emotions.extend(emotions)
cap.release()
os.remove(video_location)
return {"predicted_emotions": predicted_emotions}
except Exception as e:
logger.error(f"Failed to make predictions. {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Failed to make predictions. Error: {str(e)}"
)
def predict_emotion_detection_video_new(self,video_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/emotion/video/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Initialize video capture
cap = cv2.VideoCapture(video_location)
if not cap.isOpened():
raise HTTPException(
status_code=400,
detail="Failed to open video file."
)
predicted_emotions = []
while True:
ret, frame = cap.read()
if not ret:
break
emotions = predict_emotion_from_frame(frame)
predicted_emotions.extend(emotions)
cap.release()
os.remove(video_location)
return {"predicted_emotions": predicted_emotions}
except Exception as e:
logger.error(f"Failed to make predictions. {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Failed to make predictions. Error: {str(e)}"
)
# Function to predict emotion from a video frame
def predict_emotion_from_frame(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_detector = cv2.CascadeClassifier('../ML_Models/Emotion_Detection_Model/haarcascade_frontalface_default.xml')
num_faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
emotions = []
for (x, y, w, h) in num_faces:
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, (48, 48)), -1), 0)
emotion_prediction = emotion_model.predict(cropped_img)
maxindex = int(np.argmax(emotion_prediction))
emotions.append(emotion_dict[maxindex])
return emotions
......@@ -31,3 +31,5 @@ source-folder > yarn start
- Support for react-script.
- Code splitting.
- CSS-in-JS.
- test
......@@ -271,7 +271,8 @@ const VideoTranslate = () => {
{/* Conditionally render the Unity WebGL build */}
{showUnityWebGL && (
<iframe
src="https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app/"
// src="https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app/"
src="https://64f7cfd336356b18eb42de2b--lambent-unicorn-97396a.netlify.app/"
width="700px"
height="700px" // Adjust the height as needed
title="Unity WebGL"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment