Commit e3c48555 authored by Gamage B.G.J's avatar Gamage B.G.J

Merge branch 'IT20251000' into 'master'

It20251000

See merge request !19
parents a3bf1116 32b029be
...@@ -4,5 +4,9 @@ ...@@ -4,5 +4,9 @@
"Janith", "Janith",
"leaderboard", "leaderboard",
"SLIIT" "SLIIT"
] ],
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"python.formatting.provider": "none"
} }
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "4a663198",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from PIL import Image\n",
"import numpy as np\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from tensorflow import keras\n",
"from tensorflow.keras import layers\n",
"from keras.models import load_model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "6a893211",
"metadata": {},
"outputs": [],
"source": [
"# Set the dataset path and image size\n",
"dataset_path = \"C:/Users/himashara/Documents/signlanguage/training/\"\n",
"image_size = (480, 480) # Adjust the image size as per your requirements\n",
"\n",
"# Load and preprocess the dataset\n",
"def load_dataset():\n",
" labels = []\n",
" images = []\n",
" for label in os.listdir(dataset_path):\n",
" label_path = os.path.join(dataset_path, label)\n",
" if os.path.isdir(label_path):\n",
" for image_file in os.listdir(label_path):\n",
" image_path = os.path.join(label_path, image_file)\n",
" if os.path.isfile(image_path):\n",
" image = Image.open(image_path)\n",
" image = image.resize(image_size)\n",
" image = np.array(image)\n",
" images.append(image)\n",
" labels.append(label)\n",
" images = np.array(images)\n",
" labels = np.array(labels)\n",
" return images, labels\n",
"\n",
"images, labels = load_dataset()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "ef0e4281",
"metadata": {},
"outputs": [],
"source": [
"# Encode the labels into numerical values\n",
"label_encoder = LabelEncoder()\n",
"labels = label_encoder.fit_transform(labels)\n",
"\n",
"# Split the dataset into training and testing sets\n",
"X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42)\n",
"\n",
"# Preprocess the input data\n",
"X_train = X_train.astype(\"float32\") / 255.0\n",
"X_test = X_test.astype(\"float32\") / 255.0\n",
"num_classes = len(np.unique(labels))\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "555ffeee",
"metadata": {},
"outputs": [],
"source": [
"# Define the model architecture\n",
"model = keras.Sequential([\n",
" layers.Conv2D(32, (3, 3), activation=\"relu\", input_shape=(image_size[0], image_size[1], 3)),\n",
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
" layers.Flatten(),\n",
" layers.Dense(64, activation=\"relu\"),\n",
" layers.Dense(num_classes, activation=\"softmax\")\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "eb07946c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/10\n",
"22/22 [==============================] - 58s 3s/step - loss: 58.0361 - accuracy: 0.1509 - val_loss: 2.3127 - val_accuracy: 0.2059\n",
"Epoch 2/10\n",
"22/22 [==============================] - 59s 3s/step - loss: 2.3554 - accuracy: 0.2278 - val_loss: 2.0756 - val_accuracy: 0.1824\n",
"Epoch 3/10\n",
"22/22 [==============================] - 67s 3s/step - loss: 2.0049 - accuracy: 0.3195 - val_loss: 2.0513 - val_accuracy: 0.2765\n",
"Epoch 4/10\n",
"22/22 [==============================] - 65s 3s/step - loss: 1.9182 - accuracy: 0.3580 - val_loss: 1.8208 - val_accuracy: 0.3235\n",
"Epoch 5/10\n",
"22/22 [==============================] - 87s 4s/step - loss: 1.7040 - accuracy: 0.3905 - val_loss: 1.7384 - val_accuracy: 0.3294\n",
"Epoch 6/10\n",
"22/22 [==============================] - 70s 3s/step - loss: 1.6378 - accuracy: 0.4024 - val_loss: 1.9270 - val_accuracy: 0.3059\n",
"Epoch 7/10\n",
"22/22 [==============================] - 71s 3s/step - loss: 1.6423 - accuracy: 0.3905 - val_loss: 1.7039 - val_accuracy: 0.3529\n",
"Epoch 8/10\n",
"22/22 [==============================] - 66s 3s/step - loss: 1.5635 - accuracy: 0.4083 - val_loss: 1.6209 - val_accuracy: 0.3294\n",
"Epoch 9/10\n",
"22/22 [==============================] - 67s 3s/step - loss: 1.4948 - accuracy: 0.4038 - val_loss: 1.5666 - val_accuracy: 0.3765\n",
"Epoch 10/10\n",
"22/22 [==============================] - 74s 3s/step - loss: 1.4630 - accuracy: 0.4615 - val_loss: 1.5821 - val_accuracy: 0.3588\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x26f794884c0>"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Compile and train the model\n",
"model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
"model.fit(X_train, y_train, batch_size=32, epochs=10, validation_data=(X_test, y_test))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "81b6613d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6/6 [==============================] - 7s 1s/step - loss: 1.5821 - accuracy: 0.3588\n",
"Test Loss: 1.5821244716644287\n",
"Test Accuracy: 0.3588235378265381\n"
]
}
],
"source": [
"# Evaluate the model on the test dataset\n",
"loss, accuracy = model.evaluate(X_test, y_test)\n",
"print(\"Test Loss:\", loss)\n",
"print(\"Test Accuracy:\", accuracy)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "65ccc47b",
"metadata": {},
"outputs": [],
"source": [
"# Save the trained model\n",
"model.save(\"letter_classification_model.h5\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0db64f31",
"metadata": {},
"outputs": [],
"source": [
"# ---------------new updated ---------------"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0ce9c60",
"metadata": {},
"outputs": [],
"source": [
"# import os\n",
"# from PIL import Image\n",
"# import numpy as np\n",
"# from keras.models import load_model\n",
"# from sinling import SinhalaTokenizer\n",
"# from IPython.display import Image as DisplayImage, display\n",
"\n",
"# image_size = (480, 480)\n",
"\n",
"# # Dataset path\n",
"# dataset_path = \"C:/Users/himashara/Documents/signlanguage/training/\"\n",
"\n",
"# # Translation mapping from Sinhala Unicode to Singlish\n",
"# sinhala_to_singlish = {\n",
"# \"අ\": \"A\",\n",
"# \"ආ\": \"Aah\",\n",
"# \"ඇ\": \"Aeh\",\n",
"# \"ඉ\": \"Ee\",\n",
"# \"ඊ\": \"Eeh\",\n",
"# \"උ\": \"Uh\",\n",
"# \"ඌ\": \"Uhh\",\n",
"# \"එ\": \"A\",\n",
"# \"ඒ\": \"Ae\",\n",
"# \"ඔ\": \"O\",\n",
"# \"ඕ\": \"Ohh\",\n",
"# \"ක්\": \"K\",\n",
"# \"ග්\": \"Ig\",\n",
"# \"ටී\": \"Tee\",\n",
"# \"ට\": \"T\"\n",
"# }\n",
"\n",
"# # Function to retrieve the letter image\n",
"# def get_letter_image(letter):\n",
"# if os.path.exists(os.path.join(dataset_path, letter)):\n",
"# letter_path = os.path.join(dataset_path, letter)\n",
"# image_files = os.listdir(letter_path)\n",
"# image_path = os.path.join(letter_path, image_files[0]) # Assuming only one image per letter\n",
"# return Image.open(image_path)\n",
"# return None\n",
"\n",
"# # Load the pre-trained model\n",
"# model = load_model(\"letter_classification_model.h5\")\n",
"\n",
"# # Sinhala tokenizer\n",
"# tokenizer = SinhalaTokenizer()\n",
"\n",
"# # Function to preprocess Sinhala text\n",
"# def preprocess_sinhala_text(text):\n",
"# tokens = tokenizer.tokenize(text)\n",
"# return ' '.join(tokens)\n",
"\n",
"# # User input\n",
"# unicode_input = input(\"Enter a Sinhala Unicode string: \")\n",
"\n",
"# # Remove spaces from the user input\n",
"# unicode_input = unicode_input.replace(\" \", \"\")\n",
"\n",
"# # Translate Sinhala Unicode to Singlish\n",
"# singlish_input = ''.join([sinhala_to_singlish.get(c, c) for c in unicode_input])\n",
"\n",
"# # Print the translated Singlish string\n",
"# print(\"Translated Singlish string:\", singlish_input)\n",
"\n",
"# # Retrieve letter images for the Singlish input\n",
"# letter_images = []\n",
"# combined_string = \"\"\n",
"# for letter in singlish_input:\n",
"# combined_string += letter\n",
"# letter_image = get_letter_image(combined_string)\n",
"# if letter_image is not None:\n",
"# letter_image = letter_image.resize(image_size) # Adjust the size if needed\n",
"# letter_image = np.array(letter_image) / 255.0 # Normalize the image if needed\n",
"# letter_images.append(letter_image)\n",
"# combined_string = \"\"\n",
"\n",
"# # Combine the letter images into a GIF\n",
"# gif_images = [Image.fromarray(np.uint8(letter_image * 255)) for letter_image in letter_images]\n",
"\n",
"# # Save the combined images as a GIF\n",
"# output_path = \"C:/Users/himashara/Documents/signlanguage/generated-gif/generated.gif\"\n",
"# gif_images[0].save(output_path, save_all=True, append_images=gif_images[1:], loop=0, duration=1200)\n",
"\n",
"# # Display the generated GIF\n",
"# display(DisplayImage(filename=output_path))\n",
"# print(\"GIF saved successfully!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ea87d3ca",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 67,
"id": "fc22ffc8",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import tkinter as tk\n",
"from tkinter import ttk\n",
"from PIL import Image, ImageTk, ImageSequence\n",
"import numpy as np\n",
"from keras.models import load_model\n",
"from sinling import SinhalaTokenizer\n",
"\n",
"# Create Tkinter window\n",
"window = tk.Tk()\n",
"window.title(\"Sinhala to Singlish Translator\")\n",
"\n",
"# Set up the layout using Tkinter's grid system\n",
"# Step 1: Text area to enter Sinhala Unicode string\n",
"unicode_label = ttk.Label(window, text=\"Enter a Sinhala Unicode string:\")\n",
"unicode_label.grid(row=0, column=0, sticky=tk.W)\n",
"unicode_entry = ttk.Entry(window, width=30)\n",
"unicode_entry.grid(row=0, column=1, columnspan=2, pady=10)\n",
"\n",
"# Step 2: Convert button\n",
"def convert_unicode_to_singlish():\n",
" # Retrieve the Unicode input from the text entry\n",
" unicode_input = unicode_entry.get()\n",
"\n",
" # Remove spaces from the user input\n",
" unicode_input = unicode_input.replace(\" \", \"\")\n",
"\n",
" # Translate Sinhala Unicode to Singlish\n",
" singlish_input = ''.join([sinhala_to_singlish.get(c, c) for c in unicode_input])\n",
"\n",
" # Update the Singlish text display\n",
" singlish_text.delete(1.0, tk.END)\n",
" singlish_text.insert(tk.END, singlish_input)\n",
"\n",
" # Retrieve letter images for the Singlish input\n",
" letter_images = []\n",
" combined_string = \"\"\n",
" for letter in singlish_input:\n",
" combined_string += letter\n",
" letter_image = get_letter_image(combined_string)\n",
" if letter_image is not None:\n",
" letter_image = letter_image.resize(image_size) # Adjust the size if needed\n",
" letter_image = np.array(letter_image) / 255.0 # Normalize the image if needed\n",
" letter_images.append(letter_image)\n",
" combined_string = \"\"\n",
"\n",
" # Combine the letter images into a GIF\n",
" gif_images = []\n",
" for letter_image in letter_images:\n",
" image_pil = Image.fromarray((letter_image * 255).astype(np.uint8))\n",
" gif_images.append(image_pil)\n",
"\n",
" # Save the combined images as a GIF\n",
" output_path = \"C:/Users/himashara/Documents/signlanguage/generated-gif/generated.gif\"\n",
" gif_images[0].save(output_path, save_all=True, append_images=gif_images[1:], loop=0, duration=1200)\n",
"\n",
" # Display the generated GIF\n",
" display_gif(output_path)\n",
"\n",
"def clear_results():\n",
" # Clear the entered Unicode string\n",
" unicode_entry.delete(0, tk.END)\n",
"\n",
" # Clear the Singlish text display\n",
" singlish_text.delete(1.0, tk.END)\n",
"\n",
" # Clear the GIF display\n",
" gif_label.configure(image=None)\n",
" gif_label.image = None\n",
"\n",
" # Cancel the pending after() calls\n",
" window.after_cancel(gif_animation_id)\n",
"\n",
"def display_gif(output_path):\n",
" # Load the GIF and extract frames using ImageSequence module\n",
" gif_image = Image.open(output_path)\n",
" frames = [frame.copy() for frame in ImageSequence.Iterator(gif_image)]\n",
"\n",
" # Create a Tkinter label widget to display the GIF frames\n",
" global gif_label\n",
" gif_label = ttk.Label(window)\n",
" gif_label.grid(row=3, column=1, columnspan=2, pady=10)\n",
"\n",
" # Recursive function to animate the GIF frames\n",
" def animate_frame(frame_index):\n",
" # Display the current frame on the label\n",
" frame = frames[frame_index]\n",
" frame_image = ImageTk.PhotoImage(frame)\n",
" gif_label.configure(image=frame_image)\n",
" gif_label.image = frame_image\n",
"\n",
" # Calculate the index of the next frame\n",
" next_frame_index = (frame_index + 1) % len(frames)\n",
"\n",
" # Call the function again after a certain delay\n",
" global gif_animation_id\n",
" gif_animation_id = window.after(1200, animate_frame, next_frame_index)\n",
"\n",
" # Start the animation by calling the recursive function with the first frame index\n",
" animate_frame(0)\n",
"\n",
"convert_button = ttk.Button(window, text=\"Convert\", command=convert_unicode_to_singlish)\n",
"convert_button.grid(row=1, column=0, pady=10)\n",
"\n",
"clear_button = ttk.Button(window, text=\"Clear Results\", command=clear_results)\n",
"clear_button.grid(row=1, column=1, pady=10)\n",
"\n",
"# Step 3: View area to display Translated Singlish string\n",
"singlish_label = ttk.Label(window, text=\"Translated Singlish string:\")\n",
"singlish_label.grid(row=2, column=0, sticky=tk.W)\n",
"singlish_text = tk.Text(window, height=1, width=30)\n",
"singlish_text.grid(row=2, column=1, columnspan=2, pady=10)\n",
"\n",
"# Dataset path\n",
"dataset_path = \"C:/Users/himashara/Documents/signlanguage/training/\"\n",
"\n",
"# Translation mapping from Sinhala Unicode to Singlish\n",
"sinhala_to_singlish = {\n",
" \"අ\": \"A\",\n",
" \"ආ\": \"Aah\",\n",
" \"ඇ\": \"Aeh\",\n",
" \"ඉ\": \"Ee\",\n",
" \"ඊ\": \"Eeh\",\n",
" \"උ\": \"Uh\",\n",
" \"ඌ\": \"Uhh\",\n",
" \"එ\": \"A\",\n",
" \"ඒ\": \"Ae\",\n",
" \"ඔ\": \"O\",\n",
" \"ඕ\": \"Ohh\",\n",
" \"ක්\": \"K\",\n",
" \"ග්\": \"Ig\",\n",
" \"ටී\": \"Tee\",\n",
" \"ට\": \"T\"\n",
"}\n",
"\n",
"# Image size\n",
"image_size = (480, 480)\n",
"\n",
"# Function to retrieve the letter image\n",
"def get_letter_image(letter):\n",
" if os.path.exists(os.path.join(dataset_path, letter)):\n",
" letter_path = os.path.join(dataset_path, letter)\n",
" image_files = os.listdir(letter_path)\n",
" image_path = os.path.join(letter_path, image_files[0]) # Assuming only one image per letter\n",
" return Image.open(image_path)\n",
" return None\n",
"\n",
"# Load the pre-trained model\n",
"model = load_model(\"letter_classification_model.h5\")\n",
"\n",
"# Sinhala tokenizer\n",
"tokenizer = SinhalaTokenizer()\n",
"\n",
"# Function to preprocess Sinhala text\n",
"def preprocess_sinhala_text(text):\n",
" tokens = tokenizer.tokenize(text)\n",
" return ' '.join(tokens)\n",
"\n",
"# Start the Tkinter event loop\n",
"window.mainloop()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b2efcfc9",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "0e25256c",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "1028b834",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
...@@ -11,9 +11,11 @@ from utils import mappings ...@@ -11,9 +11,11 @@ from utils import mappings
router = APIRouter() router = APIRouter()
logger = setup_logger() logger = setup_logger()
class ImageRequest(BaseModel): class ImageRequest(BaseModel):
image: UploadFile image: UploadFile
# Load your Keras model # Load your Keras model
# model = tf.keras.models.load_model('../ML_Models/sign_language_to_text/models/sign_language_model.h5') # model = tf.keras.models.load_model('../ML_Models/sign_language_to_text/models/sign_language_model.h5')
model= None model= None
...@@ -29,7 +31,6 @@ prediction_service = SignLanguagePredictionService(model, CLASSES, mappings,spee ...@@ -29,7 +31,6 @@ prediction_service = SignLanguagePredictionService(model, CLASSES, mappings,spee
@router.post("/upload/video", tags=["Sign Language"]) @router.post("/upload/video", tags=["Sign Language"])
async def upload_video(video: UploadFile = File(...)): async def upload_video(video: UploadFile = File(...)):
try: try:
file_location = f"files/{video.filename}" file_location = f"files/{video.filename}"
with open(file_location, "wb") as file: with open(file_location, "wb") as file:
file.write(video.file.read()) file.write(video.file.read())
...@@ -69,7 +70,4 @@ def predict_using_video(video_request: UploadFile = File(...), speed: int = Quer ...@@ -69,7 +70,4 @@ def predict_using_video(video_request: UploadFile = File(...), speed: int = Quer
return prediction_service.predict_sign_language_video_with_speed_levels(video_request, speed=speed) return prediction_service.predict_sign_language_video_with_speed_levels(video_request, speed=speed)
except Exception as e: except Exception as e:
logger.info(f"Error. {e}") logger.info(f"Error. {e}")
raise HTTPException( raise HTTPException(status_code=500, detail="Request Failed.")
status_code=500,
detail="Request Failed."
)
\ No newline at end of file
from datetime import datetime
import moviepy.editor as mp
import requests
import speech_recognition as sr
from fastapi import APIRouter, File, HTTPException, UploadFile
from fastapi.responses import JSONResponse
from pymongo.mongo_client import MongoClient
from core.logger import setup_logger
# Replace with your MongoDB Atlas credentials
username = "admin"
password = "JppbU6MZeHfOj7sp"
uri = f"mongodb+srv://{username}:{password}@researchmanagement-appl.vzhn4.mongodb.net/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client["test"]
items_collection = db["translated_items"]
items_collection_log = db["translated_items_log"]
TEMP_VIDEO_PATH = "C:/Users/himashara/Documents/SLIIT/1 SEMESTER/Research Project - IT4010/Research Project/2023-029/Project/Backend/Server_Python/resources/temp_video.mp4"
AUDIO_SAVE_PATH = "C:/Users/himashara/Documents/SLIIT/1 SEMESTER/Research Project - IT4010/Research Project/2023-029/Project/Backend/Server_Python/resources/audio.wav"
router = APIRouter()
logger = setup_logger()
@router.post("/rest_pyton/uploaded_video")
async def uploaded_video(file: UploadFile = File(...)):
try:
# Process the uploaded video
video_content = await file.read()
temp_video_path = TEMP_VIDEO_PATH
with open(temp_video_path, "wb") as temp_video_file:
temp_video_file.write(video_content)
audio_save_path = AUDIO_SAVE_PATH
video = mp.VideoFileClip(temp_video_path)
audio = video.audio
audio.write_audiofile(audio_save_path)
r = sr.Recognizer()
with sr.AudioFile(audio_save_path) as source:
audio = r.record(source)
recognized_text = r.recognize_google(audio, language="si-LK")
translated_text_si = translate_text(recognized_text, "si")
translated_text_en = translate_text(recognized_text, "en")
translated_integer_si = " ".join(
str(unicode_to_int_mapping.get(word, "0"))
for word in translated_text_si.split()
)
print("Translated Integer (Si):", translated_integer_si)
# Send translated integer to MongoDB
send_to_mongodb(translated_integer_si)
return JSONResponse(
content={
"translated_text_si": translated_text_si,
"translated_text_en": translated_text_en,
}
)
# return JSONResponse(content={"translated_text_si": "test"})
except Exception as e:
return JSONResponse(content={"error": str(e)}, status_code=500)
unicode_to_int_mapping = {"මම": 1, "හෙට": 2, "යනවා": 3, "මං": 4}
def translate_text(text, target_language):
url = "https://translate.googleapis.com/translate_a/single"
params = {
"client": "gtx",
"sl": "auto",
"tl": target_language,
"dt": "t",
"q": text,
}
response = requests.get(url, params=params)
translation_data = response.json()
translated_text = translation_data[0][0][0]
return translated_text
# v1
# def send_to_mongodb(translated_integer_si):
# translated_item_data = {
# "translated_integer_si": translated_integer_si,
# "timestamp": datetime.utcnow(),
# }
# result = items_collection.insert_one(translated_item_data)
# if not result.inserted_id:
# raise HTTPException(status_code=500, detail="Failed to create translated item")
# v2
# def send_to_mongodb(translated_integer_si):
# translated_item_data = {
# "translated_integer_si": translated_integer_si,
# "timestamp": datetime.utcnow(),
# }
# # Save the previous record to the log before updating
# existing_record = items_collection.find_one()
# if existing_record:
# items_collection_log = db["translated_items_log"]
# items_collection_log.insert_one(existing_record)
# # Update the existing record or create a new one
# result = items_collection.replace_one({}, translated_item_data, upsert=True)
# if result.matched_count == 0 and result.modified_count == 0:
# raise HTTPException(
# status_code=500, detail="Failed to update or create translated item"
# )
# v3
def send_to_mongodb(translated_integer_si):
translated_item_data = {
"translated_integer_si": translated_integer_si,
"timestamp": datetime.utcnow(),
}
# Save the previous record to the log before updating
existing_record = items_collection.find_one()
if existing_record:
items_collection_log = db["translated_items_log"]
# Exclude the _id field to allow MongoDB to generate a new one
existing_record.pop("_id", None)
items_collection_log.insert_one(existing_record)
# Update the existing record or create a new one
result = items_collection.replace_one({}, translated_item_data, upsert=True)
if result.matched_count == 0 and result.modified_count == 0:
raise HTTPException(
status_code=500, detail="Failed to update or create translated item"
)
from fastapi import FastAPI, HTTPException
from pymongo.mongo_client import MongoClient
from pydantic import BaseModel
from typing import List
from bson import ObjectId
from datetime import datetime
app = FastAPI()
# Replace with your MongoDB Atlas credentials
username = "admin"
password = "JppbU6MZeHfOj7sp"
uri = f"mongodb+srv://{username}:{password}@researchmanagement-appl.vzhn4.mongodb.net/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client["test"]
items_collection = db["translated_items"]
class ItemCreate(BaseModel):
item_name: str
item_description: str
class Item(BaseModel):
item_name: str
item_description: str
_id: str
class TranslatedItemCreate(BaseModel):
translated_integer_si: str
@app.on_event("startup")
async def startup_db_client():
app.mongodb_client = MongoClient(uri)
try:
app.mongodb_client.admin.command("ping")
print("Pinged your deployment. You successfully connected to MongoDB!")
except Exception as e:
print(e)
@app.on_event("shutdown")
async def shutdown_db_client():
app.mongodb_client.close()
@app.get("/")
async def read_root():
return {"message": "FastAPI with MongoDB integration"}
# send tranlated integer to mongodb
@app.post("/translated_items/", response_model=TranslatedItemCreate)
async def create_translated_item(translated_item_create: TranslatedItemCreate):
translated_item_data = {
"translated_integer_si": translated_item_create.translated_integer_si,
"timestamp": datetime.utcnow(),
}
result = items_collection.insert_one(translated_item_data)
if result.inserted_id:
# return translated_item_data
return {**translated_item_data, "_id": str(result.inserted_id)}
else:
raise HTTPException(status_code=500, detail="Failed to create translated item")
@app.put("/items/{item_id}", response_model=Item)
async def update_item(item_id: str, item_update: ItemCreate):
item_object_id = ObjectId(item_id) # Convert item_id to ObjectId
update_data = {
"item_name": item_update.item_name,
"item_description": item_update.item_description,
}
result = items_collection.update_one({"_id": item_object_id}, {"$set": update_data})
if result.modified_count > 0:
updated_item = items_collection.find_one({"_id": item_object_id})
return {**updated_item, "_id": str(updated_item["_id"])}
else:
raise HTTPException(status_code=404, detail="Item not found")
@app.get("/items/", response_model=List[Item])
async def get_all_items():
items = list(items_collection.find())
items_with_string_id = [{**item, "_id": str(item["_id"])} for item in items]
print(items_with_string_id)
return items_with_string_id
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8000)
from fastapi import FastAPI from fastapi import FastAPI
from controllers import translate_controler, users_controller from controllers import translate_controler, users_controller, video_to_sign_language_controller
from fastapi.responses import RedirectResponse from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from pymongo.mongo_client import MongoClient
from core.logger import setup_logger from core.logger import setup_logger
app = FastAPI()
# Replace with your MongoDB Atlas credentials
username = "admin"
password = "JppbU6MZeHfOj7sp"
uri = f"mongodb+srv://{username}:{password}@researchmanagement-appl.vzhn4.mongodb.net/?retryWrites=true&w=majority"
client = MongoClient(uri)
db = client["test"]
items_collection = db["translated_items"]
@app.on_event("startup")
async def startup_db_client():
app.mongodb_client = MongoClient(uri)
try:
app.mongodb_client.admin.command("ping")
print("Pinged your deployment. You successfully connected to MongoDB!")
except Exception as e:
print(e)
@app.on_event("shutdown")
async def shutdown_db_client():
app.mongodb_client.close()
app = FastAPI()
logger = setup_logger() logger = setup_logger()
app.include_router(users_controller.router) app.include_router(users_controller.router)
app.include_router(translate_controler.router) app.include_router(translate_controler.router)
app.include_router(video_to_sign_language_controller.router)
# Add cores middleware # Add cores middleware
...@@ -21,6 +47,7 @@ origins = [ ...@@ -21,6 +47,7 @@ origins = [
"http://localhost:8080", "http://localhost:8080",
"http://localhost:8004", "http://localhost:8004",
"http://localhost:3000", "http://localhost:3000",
"http://127.0.0.1:8000"
] ]
app.add_middleware(CORSMiddleware, app.add_middleware(CORSMiddleware,
...@@ -32,4 +59,4 @@ app.add_middleware(CORSMiddleware, ...@@ -32,4 +59,4 @@ app.add_middleware(CORSMiddleware,
@app.get('/') @app.get('/')
async def root(): async def root():
url = app.docs_url or '/docs' url = app.docs_url or '/docs'
return RedirectResponse(url) return RedirectResponse(url)
\ No newline at end of file
...@@ -2,4 +2,8 @@ fastapi ...@@ -2,4 +2,8 @@ fastapi
uvicorn uvicorn
python-multipart==0.0.6 python-multipart==0.0.6
tensorflow==2.12.0 tensorflow==2.12.0
opencv-python==4.7.0.72 opencv-python==4.7.0.72
\ No newline at end of file moviepy==1.0.3
SpeechRecognition==3.10.0
tk==0.1.0
requests==2.31.0
\ No newline at end of file
...@@ -110,7 +110,11 @@ ...@@ -110,7 +110,11 @@
"util": "^0.12.5", "util": "^0.12.5",
"uuid": "^9.0.0", "uuid": "^9.0.0",
"web-vitals": "^3.3.1", "web-vitals": "^3.3.1",
"yup": "^1.1.1" "yup": "^1.1.1",
"@material-ui/core": "^4.12.4",
"@mui/icons-material": "^5.14.6",
"react-material-file-upload": "^0.0.4",
"react-webcam": "^7.1.1"
}, },
"scripts": { "scripts": {
"start": "react-app-rewired start", "start": "react-app-rewired start",
......
...@@ -3,19 +3,20 @@ import { FormattedMessage } from 'react-intl'; ...@@ -3,19 +3,20 @@ import { FormattedMessage } from 'react-intl';
// assets // assets
import { import {
AudioOutlined,
BookOutlined, BookOutlined,
CarryOutOutlined, CarryOutOutlined,
CreditCardOutlined, CreditCardOutlined,
FastForwardOutlined,
FileTextOutlined, FileTextOutlined,
LoginOutlined, LoginOutlined,
RedditOutlined,
RocketOutlined, RocketOutlined,
ShoppingOutlined, ShoppingOutlined,
TeamOutlined, TeamOutlined,
TranslationOutlined, TranslationOutlined,
UserOutlined, UserOutlined,
AudioOutlined,
VideoCameraOutlined, VideoCameraOutlined,
RedditOutlined,
} from '@ant-design/icons'; } from '@ant-design/icons';
// type // type
...@@ -33,9 +34,10 @@ const icons = { ...@@ -33,9 +34,10 @@ const icons = {
RocketOutlined, RocketOutlined,
BookOutlined, BookOutlined,
TranslationOutlined, TranslationOutlined,
AudioOutlined, FastForwardOutlined,
VideoCameraOutlined,
RedditOutlined, RedditOutlined,
AudioOutlined,
VideoCameraOutlined
}; };
// ==============================|| MENU ITEMS - SUPPORT ||============================== // // ==============================|| MENU ITEMS - SUPPORT ||============================== //
...@@ -80,13 +82,27 @@ const application: NavItemType = { ...@@ -80,13 +82,27 @@ const application: NavItemType = {
icon: icons.TranslationOutlined, icon: icons.TranslationOutlined,
url: '/ssl-translate/process', url: '/ssl-translate/process',
}, },
{
id: 'video-to-sign-language',
title: <FormattedMessage id="video-to-sign-language" />,
type: 'collapse',
icon: icons.RedditOutlined,
children: [
{
id: 'video-translate',
title: <FormattedMessage id="video-translate" />,
type: 'item',
url: '/video-to-sign-language/VideoTranslate',
}
]
},
{ {
id: 'emotion-detection', id: 'emotion-detection',
title: <FormattedMessage id="emotion-detection" />, title: <FormattedMessage id="emotion-detection" />,
type: 'collapse', type: 'collapse',
icon: icons.RedditOutlined, icon: icons.RedditOutlined,
children: [ children: [
{ {
id: 'audio-detection', id: 'audio-detection',
title: <FormattedMessage id="audio-detection" />, title: <FormattedMessage id="audio-detection" />,
...@@ -101,7 +117,7 @@ const application: NavItemType = { ...@@ -101,7 +117,7 @@ const application: NavItemType = {
icon: icons.VideoCameraOutlined, icon: icons.VideoCameraOutlined,
url: '/emotion-detection/video-detection', url: '/emotion-detection/video-detection',
}, },
] ]
}, },
...@@ -137,7 +153,7 @@ const application: NavItemType = { ...@@ -137,7 +153,7 @@ const application: NavItemType = {
type: 'item', type: 'item',
url: '/learning-management/curriculums-subscribed', url: '/learning-management/curriculums-subscribed',
}, },
{ {
id: 'learning-curriculums-subscribed-tutorial', id: 'learning-curriculums-subscribed-tutorial',
title: <FormattedMessage id="learning-curriculums-subscribed-tutorial" />, title: <FormattedMessage id="learning-curriculums-subscribed-tutorial" />,
type: 'item', type: 'item',
......
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
import axios from 'axios';
class VideoToSignLanguage {
videoTranslation(data) {
return axios.post(
// @ts-ignore
`http://127.0.0.1:8000/translated_items/`,
data
);
}
}
export default new VideoToSignLanguage();
// project import
import {
Box,
Button,
ButtonGroup,
Card,
CardContent,
CardHeader,
Container,
Grid,
IconButton,
InputAdornment,
LinearProgress,
Paper,
// Slider,
Stack,
TextField,
Typography
} from '@mui/material';
import MainCard from 'components/MainCard';
import ScrollX from 'components/ScrollX';
import { CloudUploadOutlined, CopyOutlined, HighlightOutlined, TranslationOutlined } from '@ant-design/icons';
import axios from 'axios';
import { MuiFileInput } from 'mui-file-input';
import { useSnackbar } from 'notistack';
import { useState } from 'react';
// ==============================|| List ||============================== //
const VideoTranslate = () => {
const [file, setFile] = useState<File | string | null>(null);
const [videoUrl, setVideoUrl] = useState('');
const [loading, setLoading] = useState(false);
const [value, setValue] = useState('');
const [translatedTextSi, setTranslatedTextSi] = useState('');
const [translatedTextEn, setTranslatedTextEn] = useState('');
const handleDropSingleFile = (files: any) => {
if (files) {
setFile(
Object.assign(files, {
preview: URL.createObjectURL(files)
})
);
setVideoUrl(URL.createObjectURL(files));
}
};
const handleChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
setValue(event.target.value);
};
// ----------------- Video Upload ------------------------------------------------
// const TranslateVideoToSignLanguage = async () => {
// if (file) {
// setLoading(true);
// const formData = new FormData();
// //@ts-ignore
// formData.append('video', file, file.name);
// try {
// const response = await VideoToSignLanguageService.videoTranslation(formData);
// const { translated_text_si, translated_text_en } = response.data;
// setTranslatedTextSi(translated_text_si);
// setTranslatedTextEn(translated_text_en);
// if (response.status == 200) {
// console.log(response.data);
// // setValue(response.data.predictions);
// } else {
// enqueueSnackbar('Something went Wrong!', { variant: 'error' });
// }
// // setLoading(false);
// } catch (error) {
// console.log(error);
// setLoading(false);
// enqueueSnackbar('Something went Wrong!', { variant: 'error' });
// }
// } else {
// enqueueSnackbar('Please select a file.', { variant: 'warning' });
// }
// };
async function uploadVideo() {
setLoading(true)
if (file) {
const formData = new FormData();
formData.append('file', file);
try {
const response = await axios.post('http://127.0.0.1:8000/rest_pyton/uploaded_video', formData, {
headers: {
'Content-Type': 'multipart/form-data',
},
});
setTranslatedTextEn(response.data.translated_text_en)
setTranslatedTextSi(response.data.translated_text_si)
setLoading(false)
} catch (error) {
console.error('Error:', error);
setLoading(false)
}
} else {
console.error('No file selected.');
setLoading(false)
}
}
const { enqueueSnackbar } = useSnackbar();
const onCopy = (text: string) => {
if (text) {
navigator.clipboard.writeText(text);
enqueueSnackbar('Copied!', { variant: 'success' });
}
};
return (
<MainCard content={false}>
<ScrollX>
{/* Content Here */}
<Container
sx={{
padding: 3,
bgcolor: '#625D5D',
color: '#fafafb',
borderRadius: 6,
// boxShadow: '0px 4px 6px rgba(0, 0, 0, 0.1)' // Subtle box shadow
}}
>
{/* Double Button Here */}
<ButtonGroup
disableElevation
variant="contained"
aria-label="Customized buttons"
sx={{
marginBottom: '20px',
backgroundColor: '#ff3c3c', // Change background color
'& .MuiButton-root': { // Apply styles to individual buttons
color: 'white', // Text color
'&:hover': {
backgroundColor: '#000000' // Change color on hover
}
}
}}
>
<Button
sx={{
bgcolor: '#ff3c3c',
padding: '10px 50px',
fontSize: '1.05rem', // Larger font size
'& .anticon': {
fontSize: '1.2rem', // Larger icon size
},
}}
// variant={checkTranalationTypeForUpload()}
startIcon={<CloudUploadOutlined />}
// onClick={() => {
// setIsUploadFile(true);
// }}
>
Upload
</Button>
<Button
sx={{
bgcolor: '#ff3c3c',
padding: '10px 50px',
fontSize: '1.05rem', // Larger font size
'& .anticon': {
fontSize: '1.2rem', // Larger icon size
},
}}
// variant={checkTranalationTypeForRecord()}
startIcon={<HighlightOutlined />}
// onClick={() => {
// setIsUploadFile(false);
// }}
>
Text
</Button>
</ButtonGroup>
{/* Video uploading */}
<Box sx={{ flexGrow: 1 }}>
<Card>
<CardHeader title="Upload a video | Drag & Drop or Select File" />
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<Card sx={{ marginBottom: '20px', marginLeft: '10px', padding: '35px 10px' }}>
<CardContent>
{/* ! Important */}
{/* @ts-ignore */}
<MuiFileInput value={file} onChange={handleDropSingleFile} inputProps={{ accept: 'video/*' }} />
<Paper style={{ padding: '20px', marginTop: '15px' }}>
<Typography variant="h5" align="center" gutterBottom>
Preview
</Typography>
<div style={{ marginTop: '20px', textAlign: 'center' }}>
{file ? <video src={videoUrl} width="400" controls /> : <p>No Video Selected ...</p>}
</div>
</Paper>
</CardContent>
</Card>
</Grid>
<Grid item xs={12} md={6}>
<Card sx={{ p: 5, minHeight: 300, marginBottom: '10px', marginRight: '10px' }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={1}>
{/* <Grid item xs={12} md={6}> */}
{/* <h3>Set Sign Speed </h3> */}
{/* <Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/> */}
{/* <h4>Speed - {speed}</h4> */}
{/* </Grid> */}
<Grid item xs={12} md={6} container direction="row" justifyContent="flex-start" alignItems="center">
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3
}}
disabled={loading}
onClick={uploadVideo}
endIcon={<TranslationOutlined />}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
<LinearProgress />
<center>
<Typography variant="h5" component="div" sx={{ marginTop: 2 }}>
Loading...
</Typography>
</center>
</CardContent>
</Card>
) : (
<div>
{/* -------- Translated Avatar ------------------------- */}
<Typography variant="overline" sx={{ color: 'text.secondary', marginBottom: 2 }}>
Translated Avatar
</Typography>
<Paper elevation={3} sx={{ p: 2, maxWidth: 600, margin: '0 auto', marginBottom: 3 }}>
<video controls width="100%" height="auto">
{/* <source src="your-video-url.mp4" type="video/mp4" /> */}
Your browser does not support the video tag.
</video>
</Paper>
{/* -------- Translated Sinhala Unicode ------------------------- */}
<Typography variant="overline" sx={{ color: 'text.secondary', fontStyle: 'italic', marginBottom: 2 }}>
Sinhala Unicode
</Typography>
<TextField
sx={{
marginBottom: 2
}}
fullWidth
value={translatedTextSi}
onChange={handleChange}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={() => onCopy(value)}>
<CopyOutlined />
</IconButton>
</InputAdornment>
)
}}
/>
{/* -------- Translated English Unicode ------------------------- */}
<Typography variant="overline" sx={{ color: 'text.secondary', fontStyle: 'italic' }}>
English Unicode
</Typography>
<TextField
fullWidth
value={translatedTextEn}
onChange={handleChange}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={() => onCopy(value)}>
<CopyOutlined />
</IconButton>
</InputAdornment>
)
}}
/>
</div>
)}
</Stack>
</Box>
</Card>
</Grid>
</Grid>
</Card>
</Box>
</Container>
</ScrollX>
</MainCard>
);
};
export default VideoTranslate;
// // project import
// import { useState } from 'react';
// import MainCard from 'components/MainCard';
// import ScrollX from 'components/ScrollX';
// // assets
// //types
// // ==============================|| List ||============================== //
// const VideoTranslate = () => {
// const [sinhalaTranslation, setSinhalaTranslation] = useState('');
// const [singlishTranslation, setSinglishTranslation] = useState('');
// const handleConvertClick = () => {
// // Perform the video translation logic here
// // You can use the values from `sinhalaTranslation` and `singlishTranslation`
// };
// return (
// <MainCard content={false}>
// <ScrollX>
// <h3> Video Translation here </h3>
// <div>
// <h4>Sinhala Unicode Translation</h4>
// <textarea
// value={sinhalaTranslation}
// onChange={(e) => setSinhalaTranslation(e.target.value)}
// />
// </div>
// <div>
// <h4>Singlish Translation</h4>
// <textarea
// value={singlishTranslation}
// onChange={(e) => setSinglishTranslation(e.target.value)}
// />
// </div>
// <div>
// <button onClick={handleConvertClick}>Convert Video</button>
// </div>
// </ScrollX>
// </MainCard>
// );
// };
// export default VideoTranslate;
// import { Alert, Button, Card, CardActions, CardContent, CardHeader, Container, Grid, Typography } from '@mui/material';
// import Head from 'next/head';
// import { useCallback, useState } from 'react';
// import { ComingSoonIllustration } from 'assets/illustrations';
// import CustomBreadcrumbs from 'components/custom-breadcrumbs/CustomBreadcrumbs';
// import Iconify from 'components/iconify/Iconify';
// import HomeWidget from 'sections/@dashboard/spokenLanguageTranslation-module/HomeWidget';
// import _mock from '../../../_mock';
// import Editor from '../../../components/editor';
// import { useSettingsContext } from '../../../components/settings';
// import { Upload } from '../../../components/upload';
// import DashboardLayout from '../../../layout/dashboard';
// import {
// CarouselBasic3
// } from '../../../sections/_examples/extra/carousel';
// SpokenLanguageTranslationHomePage.getLayout = (page: React.ReactElement) => <DashboardLayout>{page}</DashboardLayout>;
// export default function SpokenLanguageTranslationHomePage() {
// const { themeStretch } = useSettingsContext();
// const [translateType, setTranslateType] = useState<"video" | "text">()
// const [file, setFile] = useState<File | string | null>(null);
// const [quillSimple, setQuillSimple] = useState('');
// const handleHomeWidgetOnClick = (value: string) => {
// if (!value) return
// if (value == "Video Translate") {
// setTranslateType("video")
// return
// }
// if (value == "Text Translate") {
// setTranslateType("text")
// return
// }
// }
// const handleDropSingleFile = useCallback((acceptedFiles: File[]) => {
// const file = acceptedFiles[0];
// if (file) {
// setFile(
// Object.assign(file, {
// preview: URL.createObjectURL(file),
// })
// );
// }
// }, []);
// const _carouselsExample = [...Array(5)].map((_, index) => ({
// id: _mock.id(index),
// title: _mock.text.title(index),
// image: _mock.image.cover(index),
// description: _mock.text.description(index),
// }));
// return (
// <>
// <Head>
// <title> Spoken Language Translation Home | SignLink </title>
// </Head>
// <Container maxWidth={themeStretch ? false : 'xl'}>
// <CustomBreadcrumbs
// heading="Spoken Language Translation"
// links={[
// {
// name: 'Dashboard',
// href: '#',
// icon: <Iconify icon="eva:home-fill" />,
// },
// { name: 'Spoken Language Translation Module', href: '#', icon: <Iconify icon="eva:cube-outline" /> },
// { name: 'Spoken Language Translation', icon: <Iconify icon="eva:cube-outline" /> },
// ]}
// />
// </Container>
// <Container maxWidth={themeStretch ? false : 'xl'}>
// <Grid container spacing={2} rowSpacing={3}>
// <Grid item xs={12} md={6} lg={6}>
// <HomeWidget
// title="Video Translate"
// subTitle="Spoken Language Video Convert to sign language"
// icon="eva:video-fill"
// color='warning'
// handleClick={handleHomeWidgetOnClick}
// />
// </Grid>
// <Grid item xs={12} md={6} lg={6}>
// <HomeWidget
// title="Text Translate"
// subTitle="Spoken Language Text Convert to sign language"
// icon="eva:text-fill"
// color="success"
// handleClick={handleHomeWidgetOnClick}
// />
// </Grid>
// {!translateType && <>
// <Grid item xs={12} md={12} lg={12}>
// <Alert severity='info' >Select a Translation type</Alert>
// </Grid>
// </>}
// {translateType == "video" && <>
// <Grid item xs={12} md={8} lg={8}>
// <Card>
// <CardHeader
// title="Upload Video"
// subheader="Upload you're video to convert to sigh language"
// />
// <CardContent>
// <Upload file={file} onDrop={handleDropSingleFile} onDelete={() => setFile(null)} />
// </CardContent>
// <CardActions>
// <Button variant="contained" color='error' fullWidth onClick={() => { setFile(null) }}>
// Reset
// </Button>
// <Button variant="contained" fullWidth>
// Convert
// </Button>
// </CardActions>
// </Card>
// </Grid>
// <Grid item xs={12} md={4} lg={4}>
// <Card>
// <CardHeader title="3D Model" />
// <CardContent>
// <Typography variant="h3" paragraph>
// Coming Soon!
// </Typography>
// <Typography sx={{ color: 'text.secondary' }}>
// We are currently working hard on this page!
// </Typography>
// <ComingSoonIllustration sx={{ my: 1, height: 200 }} />
// </CardContent>
// </Card>
// </Grid>
// </>}
// {translateType == "text" && <>
// <Grid item xs={12} md={8} lg={8}>
// <Card sx={{ height: "100%" }}>
// <CardHeader
// title="Enter text"
// subheader="Enter you're text to convert to sigh language"
// />
// <CardContent>
// <Editor
// simple
// id="simple-editor"
// value={quillSimple}
// onChange={(value) => setQuillSimple(value)}
// />
// </CardContent>
// <CardActions>
// <Button variant="contained" color='error' fullWidth onClick={() => { setQuillSimple('') }}>
// Reset
// </Button>
// <Button variant="contained" fullWidth>
// Convert
// </Button>
// </CardActions>
// </Card>
// </Grid>
// <Grid item xs={12} md={4} lg={4}>
// <Card>
// <CardHeader title="Converted Output" />
// <CardContent>
// <CarouselBasic3 data={_carouselsExample} />
// </CardContent>
// </Card>
// </Grid>
// </>}
// </Grid>
// </Container>
// </>
// );
// }
...@@ -46,6 +46,8 @@ const CurriculumManagementList = Loadable(lazy(() => import('pages/parameter/cur ...@@ -46,6 +46,8 @@ const CurriculumManagementList = Loadable(lazy(() => import('pages/parameter/cur
// render - parameter tutorial management page // render - parameter tutorial management page
const TutorialManagementList = Loadable(lazy(() => import('pages/parameter/tutorial-management/list/list'))); const TutorialManagementList = Loadable(lazy(() => import('pages/parameter/tutorial-management/list/list')));
// render - Video to Sign Language page
const VideoTranslation = Loadable(lazy(() => import('pages/video-to-sign-language/VideoTranslate/VideoTranslate')));
// render - audio-detection page // render - audio-detection page
...@@ -120,6 +122,15 @@ const MainRoutes = { ...@@ -120,6 +122,15 @@ const MainRoutes = {
] ]
}, },
{
path: 'video-to-sign-language',
children: [
{
path: 'VideoTranslate',
element: <VideoTranslation />
}
]
},
{ {
path: 'learning-management', path: 'learning-management',
children: [ children: [
......
...@@ -166,5 +166,7 @@ ...@@ -166,5 +166,7 @@
"audio-detection": "Audio Detection", "audio-detection": "Audio Detection",
"video-detection": "Video Detection", "video-detection": "Video Detection",
"learning-dashboard": "Dashboard", "learning-dashboard": "Dashboard",
"learning-curriculums-subscribed-tutorial": "Tutorial" "learning-curriculums-subscribed-tutorial": "Tutorial",
"video-to-sign-language": "Sign Language Translate",
"video-translate": "Video Translator"
} }
\ No newline at end of file
...@@ -31,15 +31,40 @@ The main objective of this project is to develop a Sign Language Translation Sys ...@@ -31,15 +31,40 @@ The main objective of this project is to develop a Sign Language Translation Sys
### Member: Ranaweera R M S H (IT20251000) ### Member: Ranaweera R M S H (IT20251000)
**Research Question:** **Research Question:**
- How can sign language translation be achieved by identifying audio and text components in a video and translating them into sign language using 3D components? - How can sign language translation be achieved by identifying the audio components in a video and the text, and translating them into sign language using 3D components?
**Objectives:** **Objectives:**
- Pre-process video data to extract audio and text components. - Pre-process video data to extract audio components.
- Develop techniques to translate audio and text into sign language. - Develop techniques to translate audio into sign language.
- Integrate sign language translation with 3D components to create a visually appealing sign language video. - Develop techniques to translate text into sign language.
- Evaluate the accuracy of sign language translation and the overall effectiveness of the demonstration. - Integrate sign language translation with 3D components to create visually appealing sign language videos.
- Evaluate the accuracy of sign language translation.
- Evaluate the overall effectiveness of the sign language translation demonstration.
- Apply computer graphics techniques to enhance the realism and visual quality of the sign language animations. - Apply computer graphics techniques to enhance the realism and visual quality of the sign language animations.
## Novelty and Contributions
- **Novel Approach:**
> The research question proposes a novel approach to sign language translation by utilizing audio components from a video and text input. This approach combines audio analysis and text translation with 3D components to create visually appealing sign language videos.
- **Audio Component Extraction:**
> The research aims to develop techniques to pre-process video data and extract audio components. This extraction process is crucial for identifying and analyzing the audio elements necessary for sign language translation.
- **Audio-to-Sign Language Translation:**
> The research contributes to the development of techniques that translate audio components into sign language. By analyzing the audio data, the system can generate accurate sign language gestures and expressions corresponding to the spoken words or sounds.
- **Text-to-Sign Language Translation:**
> Another significant contribution is the development of techniques to translate text input into sign language. This enables users to input text directly, which the system converts into sign language gestures and expressions, providing a means of communication for individuals who are deaf or hard of hearing.
- **Integration of 3D Components:**
> The research focuses on integrating sign language translation with 3D components to enhance the visual quality and realism of the sign language animations. This integration adds depth and realism to the sign language gestures and creates visually appealing videos that effectively convey the intended message.
- **Evaluation of Translation Accuracy:**
> The research evaluates the accuracy of the sign language translation by comparing the generated sign language gestures with established sign language conventions. This evaluation ensures that the translations are linguistically and culturally appropriate, enhancing the overall effectiveness of the system.
- **Computer Graphics Enhancements:**
> The research contributes to the application of computer graphics techniques to enhance the realism and visual quality of the sign language animations. By leveraging graphics capabilities, the system can create visually engaging and expressive sign language videos, improving the user experience.
### Member: A V R Dilshan (IT20005276) ### Member: A V R Dilshan (IT20005276)
**Research Question:** **Research Question:**
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment