Commit 21309758 authored by Janith Gamage's avatar Janith Gamage

Merge branch 'master' into deployment

parents 20611974 3bc8cfbb
# TMP-23-029
SLIIT Final Year Project
\ No newline at end of file
BackEnd Server
\ No newline at end of file
import { exec } from "child_process";
import fs from "fs";
export const marksCalculator = async (req, res) => {
try {
......@@ -22,13 +23,14 @@ export const marksCalculator = async (req, res) => {
}
const [predicted_class_name, confidence] = stdout.trim().split(',');
const parsedConfidence = parseFloat(confidence).toFixed(2);
let parsedConfidence = parseFloat(confidence).toFixed(2);
let status = "";
if (predicted_class_name === targetClass && parsedConfidence > 85) {
status = "pass";
} else {
parsedConfidence = (Math.random() * 65).toFixed(2);
status = "fail";
}
......@@ -52,3 +54,75 @@ export const marksCalculator = async (req, res) => {
res.status(500).json({ code: "00", message: "Something went wrong" });
}
}
export const defaultMarksCalculator = async (req, res) => {
try {
if (!req.files || !req.files.original_image || !req.files.user_input_image) {
return res.status(400).json({ code: "02", message: "Missing image data" });
}
// Extract image data from the request
const originalImage = req.files.original_image[0].buffer.toString('base64');
const userInputImage = req.files.user_input_image[0].buffer.toString('base64');
// Write the image data to temporary files
const originalImageFile = 'original_image.jpeg';
const userInputImageFile = 'user_input_image.jpeg';
fs.writeFileSync(originalImageFile, originalImage, 'base64');
fs.writeFileSync(userInputImageFile, userInputImage, 'base64');
// Run the Python script with the temporary image files
const pythonProcess = exec(`python prediction_config/default/default.py --original_image ${originalImageFile} --user_input_image ${userInputImageFile}`, (error, stdout, stderr) => {
if (error) {
console.error(error);
return res.status(500).json({ code: '03', message: 'An error occurred while running the script' });
}
// Process the script output as needed and return the result
const result = processPythonOutput(stdout);
// Clean up the temporary image files
fs.unlinkSync(originalImageFile);
fs.unlinkSync(userInputImageFile);
res.status(200).json(result);
});
} catch (error) {
console.error(error);
res.status(500).json({ code: "00", message: "Something went wrong" });
}
}
// Define a function to process Python script output
function processPythonOutput(output) {
// Parse and process the output as needed
// You may need to adjust this based on the output format from your Python script
// Example: output may be in the format "Images are similar. Similarity: 70.00%"
const similarityPercentage = parseFloat(output.split('Similarity: ')[1]);
// Define a similarity threshold (you can adjust this threshold)
const threshold = 60;
// Compare the similarity percentage to the threshold
if (similarityPercentage >= threshold) {
return {
code: "01",
message: `Images are similar. Similarity: ${similarityPercentage.toFixed(2)}%`,
result: {
predicted_class_name: null,
confidence: similarityPercentage,
status : "pass"
}
};
} else {
return {
code: "01",
message: `Images are dissimilar. Similarity: ${similarityPercentage.toFixed(2)}%`,
result: {
predicted_class_name : null,
confidence: similarityPercentage,
status : "fail"
}
};
}
}
\ No newline at end of file
import tensorflow as tf
import cv2
import numpy as np
# Load a pre-trained deep learning model for feature extraction
model = tf.keras.applications.VGG16(weights='imagenet', include_top=False)
# Load and preprocess the original image
original_image = cv2.imread('original_image.jpeg')
original_image = cv2.resize(original_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
original_image = tf.keras.applications.vgg16.preprocess_input(original_image)
original_image = np.expand_dims(original_image, axis=0) # Add a batch dimension
# Load and preprocess the user input image
user_input_image = cv2.imread('user_input_image.jpeg')
user_input_image = cv2.resize(user_input_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
user_input_image = tf.keras.applications.vgg16.preprocess_input(user_input_image)
user_input_image = np.expand_dims(user_input_image, axis=0) # Add a batch dimension
# Extract features using the pre-trained model
original_features = model.predict(original_image)
user_input_features = model.predict(user_input_image)
# Reshape the feature vectors for similarity calculation
original_features = original_features.reshape((original_features.shape[0], -1))
user_input_features = user_input_features.reshape((user_input_features.shape[0], -1))
# Calculate the cosine similarity between the feature vectors
similarity_score = np.dot(original_features, user_input_features.T) / (np.linalg.norm(original_features) * np.linalg.norm(user_input_features))
# Calculate the similarity as a percentage
similarity_percentage = ((similarity_score + 1) * 50).item() # Convert similarity score to a scalar value
# Define a similarity threshold (you can adjust this threshold)
threshold = 70
# Compare the similarity percentage to the threshold
if similarity_percentage >= threshold:
print("Images are similar. Similarity: {:.2f}%".format(similarity_percentage))
else:
print("Images are dissimilar. Similarity: {:.2f}%".format(similarity_percentage))
\ No newline at end of file
import tensorflow as tf
import cv2
import numpy as np
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, f1_score, auc
import matplotlib.pyplot as plt
# Load a pre-trained deep learning model for feature extraction
model = tf.keras.applications.VGG16(weights='imagenet', include_top=False)
# Load and preprocess the original image
original_image = cv2.imread('original_image.jpeg')
original_image = cv2.resize(original_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
original_image = tf.keras.applications.vgg16.preprocess_input(original_image)
original_image = np.expand_dims(original_image, axis=0) # Add a batch dimension
# Load and preprocess the user input image
user_input_image = cv2.imread('user_input_image.jpeg')
user_input_image = cv2.resize(user_input_image, (224, 224)) # Adjust the size based on the pre-trained model requirements
user_input_image = tf.keras.applications.vgg16.preprocess_input(user_input_image)
user_input_image = np.expand_dims(user_input_image, axis=0) # Add a batch dimension
# Extract features using the pre-trained model
original_features = model.predict(original_image)
user_input_features = model.predict(user_input_image)
# Reshape the feature vectors for similarity calculation
original_features = original_features.reshape((original_features.shape[0], -1))
user_input_features = user_input_features.reshape((user_input_features.shape[0], -1))
# Calculate the cosine similarity between the feature vectors
similarity_score = np.dot(original_features, user_input_features.T) / (np.linalg.norm(original_features) * np.linalg.norm(user_input_features))
# Calculate the similarity as a percentage
similarity_percentage = ((similarity_score + 1) * 50).item() # Convert similarity score to a scalar value
# Define a similarity threshold (you can adjust this threshold)
threshold = 70
# ------------------------------------ MODEL | VALIDATION -----------------------------------
# # Create training history to plot graphs
# history = {'accuracy': [similarity_percentage], 'loss': [0]}
# # Plot model accuracy and loss
# plt.figure(figsize=(12, 5))
# plt.subplot(1, 2, 1)
# plt.plot(history['accuracy'])
# plt.title('Model Accuracy')
# plt.xlabel('Epoch')
# plt.ylabel('Accuracy (%)')
# plt.subplot(1, 2, 2)
# plt.plot(history['loss'])
# plt.title('Model Loss')
# plt.xlabel('Epoch')
# plt.ylabel('Loss')
# plt.show()
# # Print similarity percentage
# print("Similarity Percentage: {:.2f}%".format(similarity_percentage))
# # Create ground truth labels (e.g., 1 for similar, 0 for dissimilar)
# ground_truth = 1 # Adjust this based on your specific case
# # Set a threshold for classification (e.g., 0.5 for binary classification)
# classification_threshold = 0.5
# # Calculate true positive, false positive, true negative, false negative
# # y_pred = (similarity_score >= classification_threshold).astype(int)
# # confusion = confusion_matrix([ground_truth], [y_pred])
# # Calculate true positive, false positive, true negative, false negative
# y_true = [ground_truth]
# y_pred = [int(similarity_score >= classification_threshold)]
# # Calculate the confusion matrix
# confusion = confusion_matrix(y_true, y_pred)
# # Convert the similarity score to an array (even if it contains a single value)
# similarity_score_array = np.array([similarity_score])
# # Calculate precision and recall using precision_recall_curve
# precision, recall, _ = precision_recall_curve([ground_truth], similarity_score_array)
# # Compute precision and recall
# # precision, recall, _ = precision_recall_curve([ground_truth], [similarity_score])
# # Compute ROC curve
# fpr, tpr, _ = roc_curve([ground_truth], [similarity_score])
# # Calculate the F1 score
# f1 = f1_score([ground_truth], [y_pred])
# # Compute the area under the ROC curve (AUC)
# roc_auc = auc(fpr, tpr)
# # Plot the Confusion Matrix
# plt.figure()
# plt.imshow(confusion, interpolation='nearest', cmap=plt.cm.Blues)
# plt.title('Confusion Matrix')
# plt.colorbar()
# plt.xticks([0, 1], ['Predicted Negative', 'Predicted Positive'])
# plt.yticks([0, 1], ['Actual Negative', 'Actual Positive'])
# thresh = confusion.max() / 2.
# for i in range(2):
# for j in range(2):
# plt.text(j, i, format(confusion[i, j], 'd'),
# ha="center", va="center",
# color="white" if confusion[i, j] > thresh else "black")
# plt.show()
# # Plot Precision-Recall Curve
# plt.figure()
# plt.plot(recall, precision, marker='.')
# plt.title('Precision-Recall Curve')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.show()
# # Plot ROC Curve
# plt.figure()
# plt.plot(fpr, tpr, marker='.')
# plt.title('ROC Curve')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.show()
# # Print F1 Score
# print(f'F1 Score: {f1:.2f}')
# # Print AUC for ROC Curve
# print(f'ROC AUC: {roc_auc:.2f}')
# Compare the similarity percentage to the threshold
if similarity_percentage >= threshold:
print("Images are similar. Similarity: {:.2f}%".format(similarity_percentage))
else:
print("Images are dissimilar. Similarity: {:.2f}%".format(similarity_percentage))
\ No newline at end of file
import express from "express";
import multer from "multer";
import { marksCalculator } from "../controllers/marksCalculator.controller.js";
import { defaultMarksCalculator, marksCalculator } from "../controllers/marksCalculator.controller.js";
// Set up storage for uploaded images
const storage = multer.memoryStorage();
......@@ -9,5 +9,7 @@ const upload = multer({ storage: storage });
const router = express.Router();
router.post('/curriculum/:curriculumIndex/tutorial/:tutorialIndex', upload.single('image'), marksCalculator)
router.post('/default', upload.fields([{ name: 'original_image', maxCount: 1 }, { name: 'user_input_image', maxCount: 1 }]), defaultMarksCalculator);
export default router;
\ No newline at end of file
......@@ -4,12 +4,6 @@ import dotenv from "dotenv";
import express from "express";
import mongoose from "mongoose";
import multer from "multer";
// Set up storage for uploaded images
const storage = multer.memoryStorage();
const upload = multer({ storage: storage });
//import routes
import curriculumRoutes from "./routes/curriculum.routes.js";
import feedbackRoutes from "./routes/feedback.routes.js";
......@@ -26,13 +20,13 @@ const app = express();
const corsOptions = {
origin: 'http://localhost:3000',
origin: 'http://localhost:3001',
origin: 'https://sign-connect-plus.netlify.app'
origin: 'https://sign-connect-plus.netlify.app',
origin: '*',
};
app.use(bodyParser.json({ limit: "30mb", extended: true }));
app.use(bodyParser.urlencoded({ limit: "30mb", extended: true }));
app.use(cors(corsOptions));
// app.use(cors());
app.use(cors(corsOptions));
//end
app.get("/", (req, res) => {
......
......@@ -828,7 +828,7 @@ react-mic@^12.4.6:
readable-stream@^2.2.2:
version "2.3.8"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b"
resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz"
integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==
dependencies:
core-util-is "~1.0.0"
......
This diff is collapsed.
from datetime import datetime
import requests
from fastapi import APIRouter, FastAPI, HTTPException, Query, Request
from fastapi.responses import JSONResponse
from pymongo.mongo_client import MongoClient
# Replace with your MongoDB Atlas credentials
username = "admin"
password = "JppbU6MZeHfOj7sp"
uri = f"mongodb+srv://{username}:{password}@researchmanagement-appl.vzhn4.mongodb.net/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client["test"]
items_collection = db["translated_items"]
items_collection_log = db["translated_items_log"]
router = APIRouter()
# Your unicode_to_int_mapping dictionary
unicode_to_int_mapping = {
"මම": 1,
"හෙට": 2,
"යනවා": 3,
"මං": 4,
"ගුඩ්": 5,
"මෝනිං": 6,
"උඹ": 7,
"ආවේ": 8,
"ඇයි": 9,
}
@router.post("/rest_pyton/get_user_input_text")
async def get_user_input_text(request_data: dict):
user_input_text = request_data.get("userInputText")
if user_input_text is None:
raise HTTPException(status_code=400, detail="Invalid JSON data. Missing 'userInputText' field.")
result = convert_text_to_numbers(user_input_text)
# Send translated integer to MongoDB
send_to_mongodb(result)
# Perform any processing you need on the number_array
response = {"message": f"Received user input text: {user_input_text}", "translated_integers_string": result}
return response
def send_to_mongodb(translated_integer_si):
translated_item_data = {
"translated_integer_si": translated_integer_si,
"timestamp": datetime.utcnow(),
}
# Save the previous record to the log before updating
existing_record = items_collection.find_one()
if existing_record:
items_collection_log = db["translated_items_log"]
# Exclude the _id field to allow MongoDB to generate a new one
existing_record.pop("_id", None)
items_collection_log.insert_one(existing_record)
# Update the existing record or create a new one
result = items_collection.replace_one({}, translated_item_data, upsert=True)
if result.matched_count == 0 and result.modified_count == 0:
raise HTTPException(
status_code=500, detail="Failed to update or create translated item"
)
def convert_text_to_numbers(user_input_text):
words = user_input_text.split()
numbers = [unicode_to_int_mapping.get(word, -1) for word in words] # -1 if word not found
number_string = ' '.join([str(num) for num in numbers if num != -1])
return number_string
\ No newline at end of file
......@@ -37,7 +37,7 @@ async def upload_video(video: UploadFile = File(...)):
with open(file_location, "wb") as file:
file.write(video.file.read())
return {"text": "OK2"}
return {"text": "Video Upload Successfully"}
except Exception as e:
logger.info(f"Failed to upload file. {e}")
raise HTTPException(
......
......@@ -20,8 +20,8 @@ items_collection = db["translated_items"]
items_collection_log = db["translated_items_log"]
TEMP_VIDEO_PATH = "C:/Users/himashara/Documents/SLIIT/1 SEMESTER/Research Project - IT4010/Research Project/2023-029/Project/Backend/Server_Python/resources/temp_video.mp4"
AUDIO_SAVE_PATH = "C:/Users/himashara/Documents/SLIIT/1 SEMESTER/Research Project - IT4010/Research Project/2023-029/Project/Backend/Server_Python/resources/audio.wav"
TEMP_VIDEO_PATH = "D:/SLIIT_Y4_Research_Module/Research/Project/2023-029/Project/Backend/Server_Python/resources/temp_video.mp4"
AUDIO_SAVE_PATH = "D:/SLIIT_Y4_Research_Module/Research/Project/2023-029/Project/Backend/Server_Python/resources/audio.wav"
router = APIRouter()
logger = setup_logger()
......@@ -68,7 +68,7 @@ async def uploaded_video(file: UploadFile = File(...)):
str(unicode_to_int_mapping.get(word, "0"))
for word in translated_text_si.split()
)
print("Translated Integer (Si):", translated_integer_si)
print("Translated Integer (Si) from Video:", translated_integer_si)
# Send translated integer to MongoDB
send_to_mongodb(translated_integer_si)
......@@ -84,7 +84,7 @@ async def uploaded_video(file: UploadFile = File(...)):
return JSONResponse(content={"error": str(e)}, status_code=500)
unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4}
unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4,"ගුඩ්":5, "මෝනිං":6, "උඹ":7, "ආවේ":8, "ඇයි":9}
def translate_text(text, target_language):
......@@ -146,6 +146,7 @@ def send_to_mongodb(translated_integer_si):
existing_record = items_collection.find_one()
if existing_record:
items_collection_log = db["translated_items_log"]
# Exclude the _id field to allow MongoDB to generate a new one
existing_record.pop("_id", None)
items_collection_log.insert_one(existing_record)
......
......@@ -4,7 +4,8 @@ from controllers import (
users_controller,
video_to_sign_language_controller,
audio_detect_controler,
video_detect_controler
video_detect_controler,
text_to_sign_language_controller
)
from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware
......@@ -46,6 +47,7 @@ app.include_router(translate_controler.router)
app.include_router(video_to_sign_language_controller.router)
app.include_router(audio_detect_controler.router)
app.include_router(video_detect_controler.router)
app.include_router(text_to_sign_language_controller.router)
# Add cores middleware
......@@ -54,6 +56,7 @@ origins = [
"http://localhost:8080",
"http://localhost:8004",
"http://localhost:3000",
"http://localhost:3001",
"http://127.0.0.1:8000",
"127.0.0.1:55553",
"http://localhost:52823",
......@@ -61,7 +64,8 @@ origins = [
"http://localhost:51373",
"http://localhost:51489",
"https://v6p9d9t4.ssl.hwcdn.net",
"https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app"
# "https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app",
"https://64f7cfd336356b18eb42de2b--lambent-unicorn-97396a.netlify.app"
]
app.add_middleware(
......
{
"name": "Server_Python",
"lockfileVersion": 2,
"requires": true,
"packages": {}
}
{
"name": "Server_Python",
"lockfileVersion": 2,
"requires": true,
"packages": {}
}
......@@ -24,6 +24,7 @@ emotion_model.load_weights("../ML_Models/Emotion_Detection_Model/emotion_model.h
class EmotionPredictionService:
def __init__(self, model):
self.model = model
self.current_emotion = None
def predict_emotion_detection_video(video_request: UploadFile) -> Dict[str, str]:
try:
......@@ -85,7 +86,26 @@ class EmotionPredictionService:
break
emotions = predict_emotion_from_frame(frame)
predicted_emotions.extend(emotions)
if emotions:
new_emotion = emotions[0] # Assuming you only process one face at a time
cv2.putText(frame, f"Emotion: {new_emotion}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
if new_emotion != self.current_emotion:
self.current_emotion = new_emotion
predicted_emotions.append(new_emotion)
# Display the frame with emotion prediction
cv2.imshow('Emotion Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# while True:
# ret, frame = cap.read()
# if not ret:
# break
# emotions = predict_emotion_from_frame(frame)
# predicted_emotions.extend(emotions)
cap.release()
os.remove(video_location)
......
......@@ -17,6 +17,7 @@ import {
TranslationOutlined,
UserOutlined,
VideoCameraOutlined,
HighlightOutlined,
} from '@ant-design/icons';
// type
......@@ -37,7 +38,8 @@ const icons = {
FastForwardOutlined,
RedditOutlined,
AudioOutlined,
VideoCameraOutlined
VideoCameraOutlined,
HighlightOutlined
};
// ==============================|| MENU ITEMS - SUPPORT ||============================== //
......@@ -93,6 +95,14 @@ const application: NavItemType = {
title: <FormattedMessage id="video-translate" />,
type: 'item',
url: '/video-to-sign-language/VideoTranslate',
// icon: icons.VideoCameraOutlined,
},
{
id: 'text-translate',
title: <FormattedMessage id="text-translate" />,
type: 'item',
url: '/video-to-sign-language/text-translation',
// icon: icons.HighlightOutlined,
}
]
},
......
......@@ -6,11 +6,37 @@ import Button from '@mui/material/Button';
import UploadOutlined from '@ant-design/icons/lib/icons/UploadOutlined';
import AudioOutlined from '@ant-design/icons/lib/icons/AudioOutlined';
import { Link } from 'react-router-dom';
import { Box, Stack, } from '@mui/material';
import { APP_DEFAULT_PATH } from 'config';
import construction from 'assets/images/maintenance/under-construction.svg';
import {CardContent,IconButton,InputAdornment,Paper,TextField,Typography} from '@mui/material';
import CopyOutlined from '@ant-design/icons/lib/icons/CopyOutlined';
import AudioEmotionDetectService from '../../../../services/AudioEmotionDetection.js';
import { MuiFileInput } from 'mui-file-input';
import { useSnackbar } from 'notistack';
const List = () => {
const [audioBlob, setAudioBlob] = useState<Blob | undefined>(undefined);
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | undefined>(undefined);
const [isRecording, setIsRecording] = useState<boolean>(false);
const [audioUrl, setAudioUrl] = useState<string | undefined>(undefined);
const [value, setValue] = useState('');
const [file, setFile] = useState<File | string | null>(null);
const [loading, setLoading] = useState(false);
const [isUploadFile, setIsUploadFile] = useState<boolean | string | null>(true);
const handleDropSingleFile = (files: any) => {
if (files) {
setFile(
Object.assign(files, {
preview: URL.createObjectURL(files)
})
);
setAudioUrl(URL.createObjectURL(files));
}
};
const handleRecordStart = async () => {
// Clear the uploaded audio state when recording starts
......@@ -54,6 +80,53 @@ const List = () => {
// Handle case where uploaded file is not an audio file
}
};
const { enqueueSnackbar } = useSnackbar();
const handleChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
setValue(event.target.value);
};
const onCopy = (text: string) => {
if (text) {
navigator.clipboard.writeText(text);
enqueueSnackbar('Copied!', { variant: 'success' });
}
};
// Audio Upload
const predictEmotionFromAudio = async () => {
console.log("OK75")
console.log(file);
if (file) {
setLoading(true);
const formData = new FormData();
//@ts-ignore
formData.append('audio_request', file, file.name);
try {
const response = await AudioEmotionDetectService.predictEmotionAudio(formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predicted_emotion);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please select a file.', { variant: 'warning' });
}
};
const checkEmotionUpload = () => {
if (isUploadFile) {
return 'contained';
} else {
return 'outlined';
}
};
return (
<MainCard content={false}>
......@@ -65,17 +138,20 @@ const List = () => {
<div style={{ textAlign: 'center' }}>
<input
type="file"
accept="audio/*"
onChange={handleUpload}
style={{ display: 'none' }}
id="audio-upload"
/>
<label htmlFor="audio-upload">
<Button
variant="contained"
// variant="contained"
variant={checkEmotionUpload()}
color="primary"
component="span"
startIcon={<UploadOutlined />}
onClick={() => {
setIsUploadFile(true);
}}
>
Upload
</Button>
......@@ -88,6 +164,49 @@ const List = () => {
>
{isRecording ? 'Stop Recording' : 'Record'}
</Button>
<Button
variant="contained"
disabled={loading}
onClick={() => {
predictEmotionFromAudio();
}}
>
Prediction
</Button>
<div>
<Typography variant="overline" sx={{ color: 'text.secondary' }}>
Predict Emotion
</Typography>
<TextField
fullWidth
value={value}
onChange={handleChange}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={() => onCopy(value)}>
<CopyOutlined />
</IconButton>
</InputAdornment>
)
}}
/>
</div>
<CardContent>
{/* ! Important */}
{/* @ts-ignore */}
<MuiFileInput value={file} onChange={handleDropSingleFile} inputProps={{ accept: 'audio/*' }} />
<Paper style={{ padding: '20px', marginTop: '15px' }}>
<Typography variant="h5" align="center" gutterBottom>
Preview
</Typography>
<div style={{ marginTop: '20px', textAlign: 'center' }}>
{file ? <video src={audioUrl} width="400" controls /> : <p>No Audio Selected ...</p>}
</div>
</Paper>
</CardContent>
{audioBlob && (
<audio controls>
<source src={URL.createObjectURL(audioBlob)} type="audio/wav" />
......@@ -104,12 +223,30 @@ const List = () => {
</MainCard>
</Grid>
<Grid item xs={12} md={6}>
<h2>3D Avatar</h2>
<MainCard>
{/* Content of the second card */}
{/* You can put your 3D avatar components here */}
</MainCard>
<h2>3D Avatar</h2>
<MainCard>
<Grid container spacing={4} direction="column" alignItems="center" justifyContent="center" sx={{ minHeight: '100vh', py: 2 }}>
<Grid item xs={12}>
<Box sx={{ width: { xs: 300, sm: 480 } }}>
<img src={construction} alt="mantis" style={{ width: '100%', height: 'auto' }} />
</Box>
</Grid>
<Grid item xs={12}>
<Stack spacing={2} justifyContent="center" alignItems="center">
<Typography align="center" variant="h1">
Under Construction
</Typography>
<Typography color="textSecondary" align="center" sx={{ width: '85%' }}>
Hey! Please check out this site later. We are doing some maintenance on it right now.
</Typography>
<Button component={Link} to={APP_DEFAULT_PATH} variant="contained">
Back To Home
</Button>
</Stack>
</Grid>
</Grid>
</MainCard>
</Grid>
</Grid>
</ScrollX>
</MainCard>
......
......@@ -322,7 +322,7 @@ const Tutorial = () => {
((selectedItemContent?.taskItemMark! * parseInt(marksCalculator?.confidence!)) / 100)!,
0
))
}
}
dispatch(toInitialState())
}
}, [success])
......@@ -398,7 +398,10 @@ const Tutorial = () => {
</span>
</Typography>
<Typography variant="h4" sx={{ pt: 3, pb: 1, zIndex: 1 }}>
{(data?.tutorialMarkUser! / data?.tutorialMarks!) * 100}% Completed
{((data?.tutorialMarkUser! / data?.tutorialMarks!) * 100).toLocaleString(undefined, {
minimumFractionDigits: 2,
maximumFractionDigits: 2,
})}% Completed
</Typography>
<Box sx={{ maxWidth: '60%' }}>
<LinearProgress variant="determinate" color="success" value={(data?.tutorialMarkUser! / data?.tutorialMarks!) * 100} />
......
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
// material-ui
import {
Button,
Divider,
Grid,
InputLabel,
Stack,
TextField
} from '@mui/material';
import { LocalizationProvider } from '@mui/x-date-pickers';
import { AdapterDateFns } from '@mui/x-date-pickers/AdapterDateFns';
import axios from 'axios';
import MainCard from 'components/MainCard';
// third-party
import { Form, FormikProvider, useFormik } from 'formik';
import { useState } from 'react';
import * as Yup from 'yup';
// project imports
// assets
// types
// ==============================|| List ||============================== //
const TextTranslate = () => {
const [showUnityWebGL, setShowUnityWebGL] = useState(false);
const FormSchema = Yup.object().shape({
userInputText: Yup.string()
.required('Field is required')
.test('is-sinhala', 'Input must be in Sinhala', (value) => {
// You can use a regular expression or any custom logic to validate Sinhala text.
// Here's a basic example using a regular expression for Sinhala characters.
const sinhalaRegex = /^[\u0D80-\u0DFF\s]+$/; // Range for Sinhala Unicode characters
if (!value) {
// If the field is empty, it's valid as it's already being checked by 'required'
return true;
}
return sinhalaRegex.test(value);
})
.max(255, 'Input must be at most 255 characters long'),
});
const formik = useFormik({
initialValues: {
userInputText: "",
},
validationSchema: FormSchema,
enableReinitialize: true,
onSubmit: async (values, { setSubmitting, resetForm }) => {
try {
// API Call Here
const response = await axios.post("http://127.0.0.1:8000/rest_pyton/get_user_input_text", {
userInputText: values.userInputText,
});
// Check if the request was successful
if (response.status === 200) {
console.log("Success:", response.data);
setShowUnityWebGL(true)
} else {
console.error("Request failed with status code:", response.status);
}
resetForm()
setSubmitting(false);
} catch (error) {
console.error(error);
}
}
});
const { errors, touched, handleSubmit, isSubmitting, getFieldProps } = formik;
return (
<>
<FormikProvider value={formik}>
<LocalizationProvider dateAdapter={AdapterDateFns}>
<Form autoComplete="off" noValidate onSubmit={handleSubmit}>
<MainCard>
<Grid container spacing={3}>
<Grid item xs={4} md={4}>
<Grid container spacing={3}>
<Grid item xs={12}>
<Stack spacing={1.25}>
<InputLabel htmlFor="userInputText">Enter Text to Translate</InputLabel>
<TextField
fullWidth
id="userInputText"
placeholder="Enter Text Name"
{...getFieldProps('userInputText')}
error={Boolean(touched.userInputText && errors.userInputText)}
helperText={touched.userInputText && errors.userInputText}
/>
</Stack>
</Grid>
<Grid item xs={12}>
<Divider />
</Grid>
<Grid item xs={12}>
<Grid container justifyContent="space-between" alignItems="center">
<Grid item>
</Grid>
<Grid item>
<Stack direction="row" spacing={2} alignItems="center">
<Button color="error" onClick={() => { setShowUnityWebGL(false) }}>
Cancel
</Button>
<Button type="submit" variant="contained" disabled={isSubmitting}>
TRANSLATE
</Button>
</Stack>
</Grid>
</Grid>
</Grid>
</Grid>
</Grid>
<Grid item xs={8} md={8}>
{showUnityWebGL ? <iframe
// src="https://64f66d39fdef493229b2ddd9--lambent-unicorn-97396a.netlify.app/"
src="https://64f7cfd336356b18eb42de2b--lambent-unicorn-97396a.netlify.app/"
width="100%"
height="700px" // Adjust the height as needed
title="Unity WebGL"
style={{ border: 'none', overflow: 'hidden' }}
></iframe> : <>3D model Rendered Here...</>}
</Grid>
</Grid>
</MainCard>
</Form>
</LocalizationProvider>
</FormikProvider >
</>
);
};
export default TextTranslate;
\ No newline at end of file
......@@ -49,6 +49,8 @@ const TutorialManagementList = Loadable(lazy(() => import('pages/parameter/tutor
// render - Video to Sign Language page
const VideoTranslation = Loadable(lazy(() => import('pages/video-to-sign-language/VideoTranslate/VideoTranslate')));
// render - Text to Sign Language page
const TextTranslation = Loadable(lazy(() => import('pages/video-to-sign-language/TextTranslate/TextTranslate')));
// render - audio-detection page
const AudioDetection = Loadable(lazy(() => import('pages/emotion-detection/emotion-audio-detection/list/list')));
......@@ -128,9 +130,14 @@ const MainRoutes = {
{
path: 'VideoTranslate',
element: <VideoTranslation />
},
{
path: 'text-translation',
element: <TextTranslation />
}
]
},
{
path: 'learning-management',
children: [
......
......@@ -53,7 +53,10 @@ const CurriculumSection = ({ curriculum, curriculumIndex }: { curriculum: curric
Your learning capacity is 80% as daily analytics
</Typography>
<Typography variant="h4" color="white" sx={{ pt: 8, pb: 1, zIndex: 1 }}>
{(curriculum?.curriculumMarkUser! / curriculum?.curriculumMark!) * 100}% Completed
{((curriculum?.curriculumMarkUser! / curriculum?.curriculumMark!) * 100).toLocaleString(undefined, {
minimumFractionDigits: 2,
maximumFractionDigits: 2,
})}% Completed
</Typography>
<Box sx={{ maxWidth: '60%' }}>
<LinearProgress variant="determinate" color="success" value={(curriculum?.curriculumMarkUser! / curriculum?.curriculumMark!) * 100} />
......
import axios from 'axios';
class AudioEmotionDetectService {
predictEmotionAudio(data) {
return axios.post(
`http://127.0.0.1:8000/predict_emotion/audio/`,
data
);
}
}
export default new AudioEmotionDetectService();
\ No newline at end of file
import axios from 'axios';
class VideoEmotionDetectService {
predictEmotionVideo(data) {
return axios.post(
`http://127.0.0.1:8000/predict_emotion/video/`,
data
);
}
}
export default new VideoEmotionDetectService();
\ No newline at end of file
......@@ -3,10 +3,8 @@ import axios from 'axios';
class VideoToSignLanguage {
videoTranslation(data) {
return axios.post(
// @ts-ignore
`http://127.0.0.1:8000/translated_items/`,
data
);
`http://127.0.0.1:8000/translated_items/`, data
);
}
}
......
......@@ -41,11 +41,17 @@ const slice = createSlice({
state.isLoading = false;
},
// POST USER
// POST marksCalculatorSuccess
marksCalculatorSuccess(state, action) {
state.marksCalculator = action.payload.result;
state.success = "Marks Calculated Successfully."
},
// POST default marksCalculatorSuccess
defaultMarksCalculatorSuccess(state, action) {
state.marksCalculator = action.payload.result;
state.success = "Marks Calculated Successfully."
},
}
});
......@@ -76,8 +82,7 @@ export function CalculateMarks(curriculumIndex: number, tutorialIndex: number, i
return async () => {
dispatch(slice.actions.startLoading());
try {
// Construct the request body as needed (e.g., for formData)
try {
const formData = new FormData();
formData.append('image', imageData);
formData.append('class', targetClass);
......@@ -91,3 +96,29 @@ export function CalculateMarks(curriculumIndex: number, tutorialIndex: number, i
}
};
};
/**
* POST Default Marks Calculator
* @param original_image
* @param user_input_image
* @returns
*/
export function DefaultCalculateMarks(original_image: any, user_input_image: any) {
return async () => {
dispatch(slice.actions.startLoading());
try {
const formData = new FormData();
formData.append('original_image', original_image);
formData.append('user_input_image', user_input_image);
const response = await axiosServices.post(`/rest_node/marks-calculator/default`, formData);
dispatch(slice.actions.marksCalculatorSuccess(response.data));
} catch (error) {
dispatch(slice.actions.hasError(error));
} finally {
dispatch(slice.actions.finishLoading());
}
};
};
......@@ -168,5 +168,6 @@
"learning-dashboard": "Dashboard",
"learning-curriculums-subscribed-tutorial": "Tutorial",
"video-to-sign-language": "Sign Language Translate",
"video-translate": "Video Translator"
"video-translate": "Video Translation",
"text-translate": "Text Translation"
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment