Commit f8c53ceb authored by janithgamage1.ed's avatar janithgamage1.ed

Merge branch 'master' into IT20005276

parents 6366744b 68684f39
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"# frame = extract_hand_shape(frame)\n",
"# frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e7e05125",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_19904\\1480809324.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 32\u001b[0m \u001b[1;31m# Display the hand shape with white background\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape with White Background'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhand_shape_with_white_bg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 34\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n"
]
}
],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "93d1ae9d",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16572\\1170496581.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;31m# Display the hand shape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mframe\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -16,6 +16,9 @@
files/*
!files/
*.pyc
*~
*.swp
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
......
......@@ -4,3 +4,36 @@
2023-05-19 00:32:48,522 - ERROR - Received request at root endpoint.
2023-05-19 23:09:38,565 - INFO - Failed to make predictions. name 'CLASSES' is not defined
2023-05-19 23:09:38,565 - INFO - Failed to make predictions. name 'CLASSES' is not defined
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,936 - INFO - Error.
2023-05-24 20:05:37,936 - INFO - Error.
2023-05-24 20:05:37,936 - INFO - Error.
2023-07-12 05:50:25,202 - INFO - Error. 'SignLanguagePredictionService' object has no attribute 'predict_sign_language_video2'
2023-07-12 05:50:25,202 - INFO - Error. 'SignLanguagePredictionService' object has no attribute 'predict_sign_language_video2'
2023-07-12 05:50:25,202 - INFO - Error. 'SignLanguagePredictionService' object has no attribute 'predict_sign_language_video2'
2023-07-12 06:33:48,435 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:33:48,435 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:33:48,435 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:27,777 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:27,777 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:27,777 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:33,502 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:33,502 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:33,502 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:39:07,178 - INFO - Failed to make predictions. local variable 'threshold_percentage' referenced before assignment
2023-07-12 06:39:07,178 - INFO - Failed to make predictions. local variable 'threshold_percentage' referenced before assignment
2023-07-12 06:39:07,178 - INFO - Failed to make predictions. local variable 'threshold_percentage' referenced before assignment
2023-07-12 06:39:07,180 - INFO - Error.
2023-07-12 06:39:07,180 - INFO - Error.
2023-07-12 06:39:07,180 - INFO - Error.
2023-08-03 06:00:41,581 - INFO - Failed to make predictions. [WinError 32] The process cannot access the file because it is being used by another process: 'files/test_video.mp4'
2023-08-03 06:00:41,581 - INFO - Failed to make predictions. [WinError 32] The process cannot access the file because it is being used by another process: 'files/test_video.mp4'
2023-08-03 06:00:41,581 - INFO - Failed to make predictions. [WinError 32] The process cannot access the file because it is being used by another process: 'files/test_video.mp4'
2023-08-03 06:00:41,626 - INFO - Error.
2023-08-03 06:00:41,626 - INFO - Error.
2023-08-03 06:00:41,626 - INFO - Error.
import base64
import os
import cv2
from fastapi import APIRouter, File, HTTPException,UploadFile
import numpy as np
from fastapi import APIRouter, File, HTTPException, Query,UploadFile
from pydantic import BaseModel
import tensorflow as tf
from core.logger import setup_logger
from core import setup_logger
from services.translate_service import SignLanguagePredictionService
from utils import mappings
......@@ -23,12 +20,13 @@ model= None
CLASSES = mappings.classes
NUM_CLASSES = len(mappings.classes) # number of classes
IMG_SIZE = 224 # image size
speed_levels = mappings.speed_levels
# Instantiate the service class
prediction_service = SignLanguagePredictionService(model, CLASSES, mappings)
prediction_service = SignLanguagePredictionService(model, CLASSES, mappings,speed_levels)
@router.post("/upload/video")
@router.post("/upload/video", tags=["Sign Language"])
async def upload_video(video: UploadFile = File(...)):
try:
......@@ -44,7 +42,7 @@ async def upload_video(video: UploadFile = File(...)):
detail="Failed to upload the video"
)
@router.post('/predict-sign-language/image')
@router.post('/predict-sign-language/image', tags=["Sign Language"])
def predict_using_image(image_request: UploadFile = File(...)):
try:
return prediction_service.predict_sign_language(image_request)
......@@ -54,10 +52,21 @@ def predict_using_image(image_request: UploadFile = File(...)):
status_code=500,
detail="Request Failed."
)
@router.post('/predict-sign-language/video')
@router.post('/predict-sign-language/video', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...)):
try:
return prediction_service.predict_sign_language_video(video_request)
return prediction_service.predict_sign_language_video_new(video_request)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
status_code=500,
detail="Request Failed."
)
@router.post('/predict-sign-language/video/speed_levels', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...), speed: int = Query(...)):
try:
return prediction_service.predict_sign_language_video_with_speed_levels(video_request, speed=speed)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
......
......@@ -7,12 +7,12 @@ def test():
# Your code here
return {"pong"}
@router.get("/users")
@router.get("/test")
def get_users():
# Your code here
return {"message": "Get users endpoint"}
@router.post("/users")
@router.post("/test-api")
def create_user():
# Your code here
return {"message": "Create user endpoint"}
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
......@@ -2,7 +2,9 @@ from fastapi import FastAPI
from controllers import translate_controler, users_controller
from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware
from core import setup_logger
from core.logger import setup_logger
app = FastAPI()
......@@ -17,6 +19,8 @@ app.include_router(translate_controler.router)
origins = [
"http://localhost",
"http://localhost:8080",
"http://localhost:8004",
"http://localhost:3000",
]
app.add_middleware(CORSMiddleware,
......
......@@ -3,22 +3,21 @@ import cv2
import numpy as np
from fastapi import HTTPException, UploadFile
from typing import Dict
from typing import Counter, Dict
import tensorflow as tf
from core.logger import setup_logger
from core import setup_logger
from utils import mappings
logger = setup_logger()
IMG_SIZE = 224 # image size
class SignLanguagePredictionService:
def __init__(self, model, classes, mappings):
def __init__(self, model, classes, mappings,speed_levels):
self.model = model
self.classes = classes
self.mappings = mappings
self.speed_levels = speed_levels
def predict_sign_language(self, image_request: UploadFile) -> Dict[str, str]:
try:
......@@ -62,7 +61,7 @@ class SignLanguagePredictionService:
frame_count = 0
# Loop through the frames of the video
while frame_count < 20:
while frame_count < 50:
success, frame = video.read()
if not success:
break
......@@ -95,3 +94,150 @@ class SignLanguagePredictionService:
status_code=500,
detail="Failed to make predictions"
)
def predict_sign_language_video_new(self, video_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
frame_count = 0
# Loop through the frames of the video
while frame_count < 50:
success, frame = video.read()
if not success:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
video.release()
# Delete the video file
os.remove(video_location)
threshold_percentage = 60
predictions = get_predicted_percentage(predictions, threshold_percentage)
return {'frame_count': frame_count, 'predictions': predictions }
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def predict_sign_language_video_with_speed_levels(self, video_request: UploadFile, speed: int) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
final_predictions = []
frame_count = 0
# Determine the number of frames per sign based on the speed level
frames_per_sign = self.speed_levels.get(speed, 50) # Default to level 1 if speed level is not provided
# Loop through the frames of the video
while frame_count <= video.get(cv2.CAP_PROP_FRAME_COUNT):
success, frame = video.read()
if not success:
break
# TODO Add to Config
if frame_count >= 500:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
# TODO Add to Config
threshold_percentage = 60
# Check if the required number of frames per sign has been reached
if frame_count % frames_per_sign == 0:
predictions = get_predicted_percentage(predictions, threshold_percentage)
final_predictions = final_predictions+predictions
predictions = []
video.release()
# Delete the video file
os.remove(video_location)
return {'frame_count': frame_count, 'predictions': final_predictions}
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def extract_hand_shape(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
hand_contour = contours[0]
hand_shape = np.zeros_like(image)
cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)
return hand_shape
def get_predicted_percentage(array, threshold):
counts = Counter(array)
total_elements = len(array)
percentages = {}
for element, count in counts.items():
percentage = (count / total_elements) * 100
percentages[element] = percentage
elements_above_threshold = [element for element, percentage in percentages.items() if percentage > threshold]
return elements_above_threshold
\ No newline at end of file
......@@ -28,3 +28,12 @@ classes =['A',
'T',
'Uh',
'Uhh']
speed_levels = {
1: 50, # 10 frames per sign for level 1
2: 40, # 20 frames per sign for level 2
3: 30, # 30 frames per sign for level 3
4: 20, # 40 frames per sign for level 4
5: 10 # 50 frames per sign for level 5
}
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -22,7 +22,9 @@
"@fullcalendar/timegrid": "^6.1.5",
"@fullcalendar/timeline": "^6.1.5",
"@hello-pangea/dnd": "^16.2.0",
"@material-ui/core": "^4.12.4",
"@mui/base": "^5.0.0-alpha.126",
"@mui/icons-material": "^5.14.6",
"@mui/lab": "^5.0.0-alpha.127",
"@mui/material": "^5.12.1",
"@mui/system": "^5.12.1",
......@@ -54,6 +56,7 @@
"jwt-decode": "^3.1.2",
"lodash": "^4.17.21",
"match-sorter": "^6.3.1",
"mui-file-input": "^3.0.0",
"notistack": "^3.0.1",
"process": "^0.11.10",
"react": "^18.2.0",
......@@ -76,6 +79,7 @@
"react-infinite-scroll-component": "^6.1.0",
"react-intersection-observer": "^9.4.3",
"react-intl": "^6.4.0",
"react-material-file-upload": "^0.0.4",
"react-number-format": "^5.1.4",
"react-organizational-chart": "^2.2.1",
"react-quill": "^2.0.0",
......@@ -92,6 +96,7 @@
"react-table-sticky": "^1.1.3",
"react-timer-hook": "^3.0.5",
"react-to-print": "^2.14.12",
"react-webcam": "^7.1.1",
"react-window": "^1.8.9",
"react-zoom-pan-pinch": "^3.0.7",
"react18-input-otp": "^1.1.3",
......
.videoContainer {
position: relative;
width: 50%;
padding-top: 56.25%; /* 16:9 aspect ratio */
}
.futuristicVideo {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
border: none;
outline: none;
background-color: black;
opacity: 0.8;
filter: blur(4px);
transition: opacity 0.5s, filter 0.5s;
}
.futuristicVideo:hover {
opacity: 1;
filter: blur(0);
}
import { useRef, useState } from 'react';
import { Button, Grid } from '@mui/material';
import Webcam from 'react-webcam';
import { PauseCircleOutlined, PlayCircleOutlined } from '@ant-design/icons';
//@ts-ignore
const WebcamStreamCapture = ({ onVideoRecorded }) => {
const webcamRef = useRef(null);
const mediaRecorderRef = useRef(null);
const [capturing, setCapturing] = useState(false);
const [recordedChunks, setRecordedChunks] = useState([]);
const handleStartCaptureClick = () => {
setRecordedChunks([]);
setCapturing(true);
//@ts-ignore
mediaRecorderRef.current = new MediaRecorder(webcamRef.current.stream, {
mimeType: 'video/webm'
});
//@ts-ignore
mediaRecorderRef.current.addEventListener('dataavailable', handleDataAvailable);
//@ts-ignore
mediaRecorderRef.current.start();
};
//@ts-ignore
const handleDataAvailable = ({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
};
const handleStopCaptureClick = () => {
//@ts-ignore
mediaRecorderRef.current.stop();
setCapturing(false);
};
const handleDownload = () => {
if (recordedChunks.length) {
const blob = new Blob(recordedChunks, {
type: 'video/webm' // Use 'video/webm' to match MediaRecorder mimeType
});
const url = URL.createObjectURL(blob);
onVideoRecorded(url); // Pass the blob URL to the parent component
setRecordedChunks([]);
}
};
return (
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
<Webcam audio={false} ref={webcamRef} style={{ width: '100%', maxWidth: '500px' }} />
</center>
</Grid>
<Grid item xs={12}>
<center>
{capturing ? (
<Button onClick={handleStopCaptureClick} startIcon={<PauseCircleOutlined />} color="error" variant="contained">
Stop Capture
</Button>
) : (
<Button onClick={handleStartCaptureClick} startIcon={<PlayCircleOutlined />} color="error" variant="contained">
Start Capture
</Button>
)}
{recordedChunks.length > 0 && (
<Button onClick={handleDownload} variant="contained" sx={{ ml: 1 }}>
Download
</Button>
)}
</center>
</Grid>
<Grid item xs={12}>
{recordedChunks.length > 0 && (
<center>
{/* @ts-ignore */}
<video
src={recordedChunks.length > 0 ? URL.createObjectURL(new Blob(recordedChunks, { type: 'video/webm' })) : undefined}
controls
autoPlay
/>
</center>
)}
</Grid>
</Grid>
);
};
export default WebcamStreamCapture;
// material-ui
// third-party
// project import
import MainCard from 'components/MainCard';
import ScrollX from 'components/ScrollX';
import {
Box,
Button,
ButtonGroup,
Card,
CardContent,
CardHeader,
Container,
Grid,
IconButton,
InputAdornment,
LinearProgress,
Paper,
Slider,
Stack,
TextField,
Typography
} from '@mui/material';
// layouts
// sections
import { useState } from 'react';
import { useSnackbar } from 'notistack';
import { CloudUploadOutlined, CopyOutlined, TranslationOutlined, VideoCameraOutlined } from '@ant-design/icons';
import { MuiFileInput } from 'mui-file-input';
import SignLanguageToTextService from '../../../services/SignLanguageToText.js';
import WebcamStreamCapture from './WebcamStreamCapture';
// assets
......@@ -15,15 +39,365 @@ import ScrollX from 'components/ScrollX';
// ==============================|| Process ||============================== //
const Process = () => {
const [file, setFile] = useState<File | string | null>(null);
const [isUploadFile, setIsUploadFile] = useState<boolean | string | null>(true);
const [videoUrl, setVideoUrl] = useState('');
const [loading, setLoading] = useState(false);
const [value, setValue] = useState('');
const [speed, setSpeed] = useState(0);
const [recordedVideoUrl, setRecordedVideoUrl] = useState(null);
const handleDropSingleFile = (files: any) => {
if (files) {
setFile(
Object.assign(files, {
preview: URL.createObjectURL(files)
})
);
setVideoUrl(URL.createObjectURL(files));
}
};
const checkTranalationTypeForUpload = () => {
if (isUploadFile) {
return 'contained';
} else {
return 'outlined';
}
};
const checkTranalationTypeForRecord = () => {
if (!isUploadFile) {
return 'contained';
} else {
return 'outlined';
}
};
const { enqueueSnackbar } = useSnackbar();
const onCopy = (text: string) => {
if (text) {
navigator.clipboard.writeText(text);
enqueueSnackbar('Copied!', { variant: 'success' });
}
};
const handleChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
setValue(event.target.value);
};
// Video Upload
const translateSignLanguageToText = async () => {
if (file) {
setLoading(true);
const formData = new FormData();
//@ts-ignore
formData.append('video_request', file, file.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions.join(''));
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please select a file.', { variant: 'warning' });
}
};
const translateSignLanguageToTextRecord = async () => {
console.log('TEST TES ');
console.log(recordedVideoUrl);
if (recordedVideoUrl) {
setLoading(true);
const formData = new FormData();
//@ts-ignore
formData.append('video_request', recordedVideoUrl, recordedVideoUrl.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions.join(''));
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please record a video.', { variant: 'warning' });
}
};
function valuetext(value: number) {
setSpeed(value);
return `$${value}°C`;
}
const handleVideoRecorded = (url: any) => {
setRecordedVideoUrl(url);
};
return (
<>
<MainCard content={false}>
<ScrollX>
{/* content here */}
<Container
sx={{
padding: 2
}}
>
<ButtonGroup disableElevation variant="contained" aria-label="Disabled elevation buttons" sx={{ marginBottom: '10px' }}>
<Button
variant={checkTranalationTypeForUpload()}
startIcon={<CloudUploadOutlined />}
onClick={() => {
setIsUploadFile(true);
}}
>
Upload
</Button>
<Button
variant={checkTranalationTypeForRecord()}
startIcon={<VideoCameraOutlined />}
onClick={() => {
setIsUploadFile(false);
}}
>
Record
</Button>
</ButtonGroup>
{isUploadFile ? (
<Box sx={{ flexGrow: 1 }}>
<Card>
<CardHeader title="Upload a video containing Sign Language" />
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<Card sx={{ marginBottom: '10px', marginLeft: '10px' }}>
<CardContent>
{/* ! Important */}
{/* @ts-ignore */}
<MuiFileInput value={file} onChange={handleDropSingleFile} inputProps={{ accept: 'video/*' }} />
{/* {file && (
<Paper style={{ padding: '20px' }}>
<Typography variant="h5" align="center" gutterBottom>
Preview
</Typography>
<div style={{ marginTop: '20px', textAlign: 'center' }}>
<video src={videoUrl} width="400" controls />
</div>
</Paper>
)} */}
<Paper style={{ padding: '20px', marginTop: '15px' }}>
<Typography variant="h5" align="center" gutterBottom>
Preview
</Typography>
<div style={{ marginTop: '20px', textAlign: 'center' }}>
{file ? <video src={videoUrl} width="400" controls /> : <p>No Video Selected ...</p>}
</div>
</Paper>
</CardContent>
</Card>
</Grid>
<Grid item xs={12} md={6}>
<Card sx={{ p: 5, minHeight: 300, marginBottom: '10px', marginRight: '10px' }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h3>Set Sign Speed </h3>
<Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/>
<h4>Speed - {speed}</h4>
</Grid>
<Grid item xs={12} md={6} container direction="row" justifyContent="flex-end" alignItems="center">
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3
}}
disabled={loading}
onClick={() => {
translateSignLanguageToText();
}}
endIcon={<TranslationOutlined />}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
<LinearProgress />
<center>
<Typography variant="h5" component="div" sx={{ marginTop: 2 }}>
Loading...
</Typography>
</center>
</CardContent>
</Card>
) : (
<div>
<Typography variant="overline" sx={{ color: 'text.secondary' }}>
Translated Text
</Typography>
<TextField
fullWidth
value={value}
onChange={handleChange}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={() => onCopy(value)}>
<CopyOutlined />
</IconButton>
</InputAdornment>
)
}}
/>
</div>
)}
</Stack>
</Box>
</Card>
</Grid>
</Grid>
</Card>
</Box>
) : (
// Video Capture
<Box sx={{ flexGrow: 1 }}>
<Card>
<CardHeader title="Capture a video containing Sign Language" />
<Grid container spacing={2}>
{/* Paste Here */}
<Grid item xs={12} md={6}>
<Card sx={{ marginBottom: '10px', marginLeft: '10px' }}>
<CardContent>
<WebcamStreamCapture onVideoRecorded={handleVideoRecorded} />
</CardContent>
{recordedVideoUrl && (
<div>
<h2>Recorded Video</h2>
<video src={recordedVideoUrl} controls autoPlay />
</div>
)}
</Card>
</Grid>
<Grid item xs={12} md={6}>
<Card sx={{ p: 5, minHeight: 300, marginBottom: '10px', marginRight: '10px' }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h3>Set Sign Speed </h3>
<Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/>
<h4>Speed - {speed}</h4>
</Grid>
<Grid item xs={12} md={6} container direction="row" justifyContent="flex-end" alignItems="center">
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3
}}
disabled={loading}
onClick={() => {
translateSignLanguageToTextRecord();
}}
endIcon={<TranslationOutlined />}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
<LinearProgress />
<center>
<Typography variant="h5" component="div" sx={{ marginTop: 2 }}>
Loading...
</Typography>
</center>
</CardContent>
</Card>
) : (
<div>
<Typography variant="overline" sx={{ color: 'text.secondary' }}>
Translated Text
</Typography>
<TextField
fullWidth
value={value}
onChange={handleChange}
InputProps={{
endAdornment: (
<InputAdornment position="end">
<IconButton onClick={() => onCopy(value)}>
<CopyOutlined />
</IconButton>
</InputAdornment>
)
}}
/>
</div>
)}
</Stack>
</Box>
</Card>
</Grid>
</Grid>
</Card>
</Box>
)}
</Container>
</ScrollX>
</MainCard>
</>
)
}
);
};
export default Process;
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
......@@ -1119,6 +1119,13 @@
dependencies:
regenerator-runtime "^0.13.11"
"@babel/runtime@^7.22.10", "@babel/runtime@^7.4.4", "@babel/runtime@^7.8.3":
version "7.22.11"
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.22.11.tgz#7a9ba3bbe406ad6f9e8dd4da2ece453eb23a77a4"
integrity sha512-ee7jVNlWN09+KftVOu9n7S8gQzD/Z6hN/I8VBRXW4P1+Xe7kJGXMwu8vds4aGIMHZnNbdpSWCfZZtinytpcAvA==
dependencies:
regenerator-runtime "^0.14.0"
"@babel/template@^7.18.10", "@babel/template@^7.20.7", "@babel/template@^7.3.3":
version "7.20.7"
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.20.7.tgz#a15090c2839a83b02aa996c0b4994005841fd5a8"
......@@ -1367,6 +1374,11 @@
"@emotion/sheet" "^1.2.1"
"@emotion/utils" "^1.2.0"
"@emotion/hash@^0.8.0":
version "0.8.0"
resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.8.0.tgz#bbbff68978fefdbe68ccb533bc8cbe1d1afb5413"
integrity sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==
"@emotion/hash@^0.9.0":
version "0.9.0"
resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.0.tgz#c5153d50401ee3c027a57a177bc269b16d889cb7"
......@@ -2357,6 +2369,61 @@
resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz#b2ac626d6cb9c8718ab459166d4bb405b8ffa78b"
integrity sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==
"@material-ui/core@^4.12.4":
version "4.12.4"
resolved "https://registry.yarnpkg.com/@material-ui/core/-/core-4.12.4.tgz#4ac17488e8fcaf55eb6a7f5efb2a131e10138a73"
integrity sha512-tr7xekNlM9LjA6pagJmL8QCgZXaubWUwkJnoYcMKd4gw/t4XiyvnTkjdGrUVicyB2BsdaAv1tvow45bPM4sSwQ==
dependencies:
"@babel/runtime" "^7.4.4"
"@material-ui/styles" "^4.11.5"
"@material-ui/system" "^4.12.2"
"@material-ui/types" "5.1.0"
"@material-ui/utils" "^4.11.3"
"@types/react-transition-group" "^4.2.0"
clsx "^1.0.4"
hoist-non-react-statics "^3.3.2"
popper.js "1.16.1-lts"
prop-types "^15.7.2"
react-is "^16.8.0 || ^17.0.0"
react-transition-group "^4.4.0"
"@material-ui/styles@^4.11.5":
version "4.11.5"
resolved "https://registry.yarnpkg.com/@material-ui/styles/-/styles-4.11.5.tgz#19f84457df3aafd956ac863dbe156b1d88e2bbfb"
integrity sha512-o/41ot5JJiUsIETME9wVLAJrmIWL3j0R0Bj2kCOLbSfqEkKf0fmaPt+5vtblUh5eXr2S+J/8J3DaCb10+CzPGA==
dependencies:
"@babel/runtime" "^7.4.4"
"@emotion/hash" "^0.8.0"
"@material-ui/types" "5.1.0"
"@material-ui/utils" "^4.11.3"
clsx "^1.0.4"
csstype "^2.5.2"
hoist-non-react-statics "^3.3.2"
jss "^10.5.1"
jss-plugin-camel-case "^10.5.1"
jss-plugin-default-unit "^10.5.1"
jss-plugin-global "^10.5.1"
jss-plugin-nested "^10.5.1"
jss-plugin-props-sort "^10.5.1"
jss-plugin-rule-value-function "^10.5.1"
jss-plugin-vendor-prefixer "^10.5.1"
prop-types "^15.7.2"
"@material-ui/system@^4.12.2":
version "4.12.2"
resolved "https://registry.yarnpkg.com/@material-ui/system/-/system-4.12.2.tgz#f5c389adf3fce4146edd489bf4082d461d86aa8b"
integrity sha512-6CSKu2MtmiJgcCGf6nBQpM8fLkuB9F55EKfbdTC80NND5wpTmKzwdhLYLH3zL4cLlK0gVaaltW7/wMuyTnN0Lw==
dependencies:
"@babel/runtime" "^7.4.4"
"@material-ui/utils" "^4.11.3"
csstype "^2.5.2"
prop-types "^15.7.2"
"@material-ui/types@5.1.0":
version "5.1.0"
resolved "https://registry.yarnpkg.com/@material-ui/types/-/types-5.1.0.tgz#efa1c7a0b0eaa4c7c87ac0390445f0f88b0d88f2"
integrity sha512-7cqRjrY50b8QzRSYyhSpx4WRw2YuO0KKIGQEVk5J8uoz2BanawykgZGoWEqKm7pVIbzFDN0SpPcVV4IhOFkl8A==
"@material-ui/types@^4.0.0":
version "4.1.1"
resolved "https://registry.yarnpkg.com/@material-ui/types/-/types-4.1.1.tgz#b65e002d926089970a3271213a3ad7a21b17f02b"
......@@ -2364,6 +2431,15 @@
dependencies:
"@types/react" "*"
"@material-ui/utils@^4.11.3":
version "4.11.3"
resolved "https://registry.yarnpkg.com/@material-ui/utils/-/utils-4.11.3.tgz#232bd86c4ea81dab714f21edad70b7fdf0253942"
integrity sha512-ZuQPV4rBK/V1j2dIkSSEcH5uT6AaHuKWFfotADHsC0wVL1NLd2WkFCm4ZZbX33iO4ydl6V0GPngKm8HZQ2oujg==
dependencies:
"@babel/runtime" "^7.4.4"
prop-types "^15.7.2"
react-is "^16.8.0 || ^17.0.0"
"@messageformat/core@^3.0.1":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@messageformat/core/-/core-3.1.0.tgz#d4d2f5c3555228a6b5980b122a02b53dfc6458bd"
......@@ -2419,6 +2495,13 @@
resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.12.1.tgz#f828554889e6ab7345395626bb46e561c113435e"
integrity sha512-rNiQYHtkXljcvCEnhWrJzie1ifff5O98j3uW7ZlchFgD8HWxEcz/QoxZvo+sCKC9aayAgxi9RsVn2VjCyp5CrA==
"@mui/icons-material@^5.14.6":
version "5.14.6"
resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-5.14.6.tgz#0efdcba2c30d6b22e6ead787b67247da173bd11a"
integrity sha512-7Cujy7lRGTj2T3SvY9C9ZOTFDtrXJogeNnRcU/ODyNoxwskMNPFOcc15F+98MAdJenBVLJPYu+vPP6DUvEpNrA==
dependencies:
"@babel/runtime" "^7.22.10"
"@mui/lab@^5.0.0-alpha.127":
version "5.0.0-alpha.127"
resolved "https://registry.yarnpkg.com/@mui/lab/-/lab-5.0.0-alpha.127.tgz#158f59cb0ab993be840cbf79ab643ca32fd9a5d8"
......@@ -3610,6 +3693,13 @@
dependencies:
"@types/react" "*"
"@types/react-transition-group@^4.2.0":
version "4.4.6"
resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.6.tgz#18187bcda5281f8e10dfc48f0943e2fdf4f75e2e"
integrity sha512-VnCdSxfcm08KjsJVQcfBmhEQAPnLB8G08hAxn39azX1qYBQ/5RVQuoHuKIcfKOdncuaUvEpFKFzEvbtIMsfVew==
dependencies:
"@types/react" "*"
"@types/react-transition-group@^4.4.5":
version "4.4.5"
resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.5.tgz#aae20dcf773c5aa275d5b9f7cdbca638abc5e416"
......@@ -4975,7 +5065,7 @@ clone@^2.1.1, clone@^2.1.2:
resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f"
integrity sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==
clsx@^1.1.0, clsx@^1.1.1, clsx@^1.2.1:
clsx@^1.0.4, clsx@^1.1.0, clsx@^1.1.1, clsx@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12"
integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==
......@@ -5411,6 +5501,14 @@ css-tree@~2.2.0:
mdn-data "2.0.28"
source-map-js "^1.0.1"
css-vendor@^2.0.8:
version "2.0.8"
resolved "https://registry.yarnpkg.com/css-vendor/-/css-vendor-2.0.8.tgz#e47f91d3bd3117d49180a3c935e62e3d9f7f449d"
integrity sha512-x9Aq0XTInxrkuFeHKbYC7zWY8ai7qJ04Kxd9MnvbC1uO5DagxoHQjm4JvG+vCdXOoFtCjbL2XSZfxmoYa9uQVQ==
dependencies:
"@babel/runtime" "^7.8.3"
is-in-browser "^1.0.2"
css-what@^3.2.1:
version "3.4.2"
resolved "https://registry.yarnpkg.com/css-what/-/css-what-3.4.2.tgz#ea7026fcb01777edbde52124e21f327e7ae950e4"
......@@ -5521,6 +5619,11 @@ cssstyle@^2.3.0:
dependencies:
cssom "~0.3.6"
csstype@^2.5.2:
version "2.6.21"
resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.21.tgz#2efb85b7cc55c80017c66a5ad7cbd931fda3a90e"
integrity sha512-Z1PhmomIfypOpoMjRQB70jfvy/wxT50qW08YXO5lMIJkrdq4yOTR+AW7FqutScmB9NkLwxo+jU+kZLbofZZq/w==
csstype@^3.0.2, csstype@^3.1.2:
version "3.1.2"
resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.2.tgz#1d4bf9d572f11c14031f0436e1c10bc1f571f50b"
......@@ -6657,6 +6760,13 @@ file-loader@^6.2.0:
loader-utils "^2.0.0"
schema-utils "^3.0.0"
file-selector@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/file-selector/-/file-selector-0.4.0.tgz#59ec4f27aa5baf0841e9c6385c8386bef4d18b17"
integrity sha512-iACCiXeMYOvZqlF1kTiYINzgepRBymz1wwjiuup9u9nayhb6g4fSwiyJ/6adli+EPwrWtpgQAh2PoS7HukEGEg==
dependencies:
tslib "^2.0.3"
file-selector@^0.6.0:
version "0.6.0"
resolved "https://registry.yarnpkg.com/file-selector/-/file-selector-0.6.0.tgz#fa0a8d9007b829504db4d07dd4de0310b65287dc"
......@@ -7394,6 +7504,11 @@ hyphen@^1.6.4:
resolved "https://registry.yarnpkg.com/hyphen/-/hyphen-1.6.5.tgz#956a4c929c111441dc798ad88de3c941e1b383d3"
integrity sha512-MZbhHutRaHCUxjvJBYqL51Ntjbq16LemuJr2u+LpKd3UwyNHZsZAKh5uD+KmdAHtWpteupOqQTTezVGR/al43w==
hyphenate-style-name@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/hyphenate-style-name/-/hyphenate-style-name-1.0.4.tgz#691879af8e220aea5750e8827db4ef62a54e361d"
integrity sha512-ygGZLjmXfPHj+ZWh6LwbC37l43MhfztxetbFCoYTM2VjkIUpeHgSNn7QIyVFj7YQ1Wl9Cbw5sholVJPzWvC2MQ==
iconv-lite@0.4.24:
version "0.4.24"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b"
......@@ -7673,6 +7788,11 @@ is-hexadecimal@^1.0.0:
resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz#cc35c97588da4bd49a8eedd6bc4082d44dcb23a7"
integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==
is-in-browser@^1.0.2, is-in-browser@^1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/is-in-browser/-/is-in-browser-1.1.3.tgz#56ff4db683a078c6082eb95dad7dc62e1d04f835"
integrity sha512-FeXIBgG/CPGd/WUxuEyvgGTEfwiG9Z4EKGxjNMRqviiIIfsmgrpnHLffEDdwUHqNva1VEW91o3xBT/m8Elgl9g==
is-map@^2.0.1, is-map@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127"
......@@ -8572,6 +8692,76 @@ jsonwebtoken@^9.0.0:
ms "^2.1.1"
semver "^7.3.8"
jss-plugin-camel-case@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-camel-case/-/jss-plugin-camel-case-10.10.0.tgz#27ea159bab67eb4837fa0260204eb7925d4daa1c"
integrity sha512-z+HETfj5IYgFxh1wJnUAU8jByI48ED+v0fuTuhKrPR+pRBYS2EDwbusU8aFOpCdYhtRc9zhN+PJ7iNE8pAWyPw==
dependencies:
"@babel/runtime" "^7.3.1"
hyphenate-style-name "^1.0.3"
jss "10.10.0"
jss-plugin-default-unit@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-default-unit/-/jss-plugin-default-unit-10.10.0.tgz#db3925cf6a07f8e1dd459549d9c8aadff9804293"
integrity sha512-SvpajxIECi4JDUbGLefvNckmI+c2VWmP43qnEy/0eiwzRUsafg5DVSIWSzZe4d2vFX1u9nRDP46WCFV/PXVBGQ==
dependencies:
"@babel/runtime" "^7.3.1"
jss "10.10.0"
jss-plugin-global@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-global/-/jss-plugin-global-10.10.0.tgz#1c55d3c35821fab67a538a38918292fc9c567efd"
integrity sha512-icXEYbMufiNuWfuazLeN+BNJO16Ge88OcXU5ZDC2vLqElmMybA31Wi7lZ3lf+vgufRocvPj8443irhYRgWxP+A==
dependencies:
"@babel/runtime" "^7.3.1"
jss "10.10.0"
jss-plugin-nested@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-nested/-/jss-plugin-nested-10.10.0.tgz#db872ed8925688806e77f1fc87f6e62264513219"
integrity sha512-9R4JHxxGgiZhurDo3q7LdIiDEgtA1bTGzAbhSPyIOWb7ZubrjQe8acwhEQ6OEKydzpl8XHMtTnEwHXCARLYqYA==
dependencies:
"@babel/runtime" "^7.3.1"
jss "10.10.0"
tiny-warning "^1.0.2"
jss-plugin-props-sort@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-props-sort/-/jss-plugin-props-sort-10.10.0.tgz#67f4dd4c70830c126f4ec49b4b37ccddb680a5d7"
integrity sha512-5VNJvQJbnq/vRfje6uZLe/FyaOpzP/IH1LP+0fr88QamVrGJa0hpRRyAa0ea4U/3LcorJfBFVyC4yN2QC73lJg==
dependencies:
"@babel/runtime" "^7.3.1"
jss "10.10.0"
jss-plugin-rule-value-function@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-rule-value-function/-/jss-plugin-rule-value-function-10.10.0.tgz#7d99e3229e78a3712f78ba50ab342e881d26a24b"
integrity sha512-uEFJFgaCtkXeIPgki8ICw3Y7VMkL9GEan6SqmT9tqpwM+/t+hxfMUdU4wQ0MtOiMNWhwnckBV0IebrKcZM9C0g==
dependencies:
"@babel/runtime" "^7.3.1"
jss "10.10.0"
tiny-warning "^1.0.2"
jss-plugin-vendor-prefixer@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss-plugin-vendor-prefixer/-/jss-plugin-vendor-prefixer-10.10.0.tgz#c01428ef5a89f2b128ec0af87a314d0c767931c7"
integrity sha512-UY/41WumgjW8r1qMCO8l1ARg7NHnfRVWRhZ2E2m0DMYsr2DD91qIXLyNhiX83hHswR7Wm4D+oDYNC1zWCJWtqg==
dependencies:
"@babel/runtime" "^7.3.1"
css-vendor "^2.0.8"
jss "10.10.0"
jss@10.10.0, jss@^10.5.1:
version "10.10.0"
resolved "https://registry.yarnpkg.com/jss/-/jss-10.10.0.tgz#a75cc85b0108c7ac8c7b7d296c520a3e4fbc6ccc"
integrity sha512-cqsOTS7jqPsPMjtKYDUpdFC0AbhYFLTcuGRqymgmdJIeQ8cH7+AgX7YSgQy79wXloZq2VvATYxUOUQEvS1V/Zw==
dependencies:
"@babel/runtime" "^7.3.1"
csstype "^3.0.2"
is-in-browser "^1.1.3"
tiny-warning "^1.0.2"
"jsx-ast-utils@^2.4.1 || ^3.0.0", jsx-ast-utils@^3.3.3:
version "3.3.3"
resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz#76b3e6e6cece5c69d49a5792c3d01bd1a0cdc7ea"
......@@ -9051,6 +9241,13 @@ ms@2.1.3, ms@^2.1.1:
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2"
integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
mui-file-input@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/mui-file-input/-/mui-file-input-3.0.0.tgz#c4a96273071b7c2ec0808f873822747335396b65"
integrity sha512-Nin+6Xq+X7MW3iNzYuTExx552uWrnXtF30ZzUig/zJQP5vVRB7yj4iro8ETKeQ5nrYM2zdvlHyPIoD2cAsGsmQ==
dependencies:
pretty-bytes "^6.1.1"
multicast-dns@^7.2.5:
version "7.2.5"
resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-7.2.5.tgz#77eb46057f4d7adbd16d9290fa7299f6fa64cced"
......@@ -9557,6 +9754,11 @@ pkg-up@^3.1.0:
dependencies:
find-up "^3.0.0"
popper.js@1.16.1-lts:
version "1.16.1-lts"
resolved "https://registry.yarnpkg.com/popper.js/-/popper.js-1.16.1-lts.tgz#cf6847b807da3799d80ee3d6d2f90df8a3f50b05"
integrity sha512-Kjw8nKRl1m+VrSFCoVGPph93W/qrSO7ZkqPpTf7F4bk/sqcfWK019dWBUpE/fBOsOQY1dks/Bmcbfn1heM/IsA==
postcss-attribute-case-insensitive@^5.0.2:
version "5.0.2"
resolved "https://registry.yarnpkg.com/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-5.0.2.tgz#03d761b24afc04c09e757e92ff53716ae8ea2741"
......@@ -10165,6 +10367,11 @@ pretty-bytes@^5.3.0, pretty-bytes@^5.4.1:
resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.6.0.tgz#356256f643804773c82f64723fe78c92c62beaeb"
integrity sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==
pretty-bytes@^6.1.1:
version "6.1.1"
resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-6.1.1.tgz#38cd6bb46f47afbf667c202cfc754bffd2016a3b"
integrity sha512-mQUvGU6aUFQ+rNvTIAcZuWGRT9a6f6Yrg9bHs4ImKF+HZCEK+plBvnAZYSIQztknZF2qnzNtr6F8s0+IuptdlQ==
pretty-error@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-4.0.0.tgz#90a703f46dd7234adb46d0f84823e9d1cb8f10d6"
......@@ -10602,6 +10809,15 @@ react-draggable@^4.4.5:
clsx "^1.1.1"
prop-types "^15.8.1"
react-dropzone@^11.4.2:
version "11.7.1"
resolved "https://registry.yarnpkg.com/react-dropzone/-/react-dropzone-11.7.1.tgz#3851bb75b26af0bf1b17ce1449fd980e643b9356"
integrity sha512-zxCMwhfPy1olUEbw3FLNPLhAm/HnaYH5aELIEglRbqabizKAdHs0h+WuyOpmA+v1JXn0++fpQDdNfUagWt5hJQ==
dependencies:
attr-accept "^2.2.2"
file-selector "^0.4.0"
prop-types "^15.8.1"
react-dropzone@^14.2.3:
version "14.2.3"
resolved "https://registry.yarnpkg.com/react-dropzone/-/react-dropzone-14.2.3.tgz#0acab68308fda2d54d1273a1e626264e13d4e84b"
......@@ -10676,7 +10892,7 @@ react-is@^16.12.0, react-is@^16.13.1, react-is@^16.7.0:
resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4"
integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==
react-is@^17.0.1:
"react-is@^16.8.0 || ^17.0.0", react-is@^17.0.1:
version "17.0.2"
resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0"
integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==
......@@ -10686,6 +10902,13 @@ react-is@^18.0.0, react-is@^18.2.0:
resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.2.0.tgz#199431eeaaa2e09f86427efbb4f1473edb47609b"
integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==
react-material-file-upload@^0.0.4:
version "0.0.4"
resolved "https://registry.yarnpkg.com/react-material-file-upload/-/react-material-file-upload-0.0.4.tgz#c829723076d1da52ddc360827f170acc8c321406"
integrity sha512-gXRPpOc3hZdrNiVR4SptGCGrSjOVQkkxnSsOU4++937qcmp4W3N8n/s0LPIc8Pd3T4McK0XaWegGAUPEO5riaA==
dependencies:
react-dropzone "^11.4.2"
react-number-format@^5.1.4:
version "5.1.4"
resolved "https://registry.yarnpkg.com/react-number-format/-/react-number-format-5.1.4.tgz#23057d94a4f1b08e12ee41328e86be929b60a791"
......@@ -10883,7 +11106,7 @@ react-to-print@^2.14.12:
dependencies:
prop-types "^15.8.1"
react-transition-group@^4.4.5:
react-transition-group@^4.4.0, react-transition-group@^4.4.5:
version "4.4.5"
resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-4.4.5.tgz#e53d4e3f3344da8521489fbef8f2581d42becdd1"
integrity sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==
......@@ -10893,6 +11116,11 @@ react-transition-group@^4.4.5:
loose-envify "^1.4.0"
prop-types "^15.6.2"
react-webcam@^7.1.1:
version "7.1.1"
resolved "https://registry.yarnpkg.com/react-webcam/-/react-webcam-7.1.1.tgz#e6290b192cde0d2a1039051a019a18e998d7fb39"
integrity sha512-2W5WN8wmEv8ZlxvyAlOxVuw6new8Bi7+KSPqoq5oa7z1KSKZ72ucaKqCFRtHSuFjZ5sh5ioS9lp4BGwnaZ6lDg==
react-window@^1.8.9:
version "1.8.9"
resolved "https://registry.yarnpkg.com/react-window/-/react-window-1.8.9.tgz#24bc346be73d0468cdf91998aac94e32bc7fa6a8"
......@@ -11017,6 +11245,11 @@ regenerator-runtime@^0.13.11, regenerator-runtime@^0.13.9:
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9"
integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==
regenerator-runtime@^0.14.0:
version "0.14.0"
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45"
integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==
regenerator-transform@^0.15.1:
version "0.15.1"
resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.1.tgz#f6c4e99fc1b4591f780db2586328e4d9a9d8dc56"
......
......@@ -42,11 +42,13 @@
"amazon-cognito-identity-js": "^5.2.11",
"apexcharts": "^3.36.0",
"autosuggest-highlight": "^3.3.4",
"axios": "^1.1.2",
"axios": "^1.4.0",
"change-case": "^4.1.2",
"date-fns": "^2.29.3",
"firebase": "^9.12.1",
"form-data": "^4.0.0",
"framer-motion": "^7.5.3",
"fs": "^0.0.1-security",
"highlight.js": "^11.6.0",
"i18next": "^21.10.0",
"i18next-browser-languagedetector": "^6.1.8",
......
......@@ -3,7 +3,7 @@ import { PATH_DASHBOARD } from './routes/paths';
// API
// ----------------------------------------------------------------------
export const BACKEND_URL = 'http://127.0.0.1:8000/';
export const HOST_API_KEY = process.env.HOST_API_KEY || '';
export const FIREBASE_API = {
......
......@@ -21,6 +21,7 @@ import {
Paper,
CircularProgress,
LinearProgress,
Slider,
} from '@mui/material';
// layouts
import MainLayout from '../layouts/main';
......@@ -41,6 +42,9 @@ import { useSnackbar } from 'notistack';
import useCopyToClipboard from 'src/hooks/useCopyToClipboard';
import Iconify from 'src/components/iconify/Iconify';
import dynamic from 'next/dynamic';
import SignLanguageToTextService from 'src/services/SignLanguageToText.js';
import { Block } from 'src/sections/_examples/Block';
const useReactMediaRecorder = () =>
// eslint-disable-next-line react-hooks/rules-of-hooks
......@@ -57,7 +61,9 @@ export default function AboutPage() {
const [isUploadFile, setIsUploadFile] = useState<boolean | string | null>(true);
const [videoUrl, setVideoUrl] = useState('');
const [loading, setLoading] = useState(false);
const [value, setValue] = useState('ආආආආආආආආආආආආආආආආ');
const [value, setValue] = useState('');
const [speed, setSpeed] = useState(0);
const [recordedVideoUrl, setRecordedVideoUrl] = useState(null);
const handleDropSingleFile = useCallback(async (acceptedFiles: File[]) => {
const file = acceptedFiles[0];
......@@ -105,6 +111,68 @@ export default function AboutPage() {
// Video Upload
const translateSignLanguageToText = async () => {
if (file) {
setLoading(true);
const formData = new FormData();
formData.append('video_request', file, file.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please select a file.', { variant: 'warning' });
}
};
const translateSignLanguageToTextRecord = async () => {
console.log('TEST TES ');
console.log(recordedVideoUrl);
if (recordedVideoUrl) {
setLoading(true);
const formData = new FormData();
formData.append('video_request', recordedVideoUrl, recordedVideoUrl.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please record a video.', { variant: 'warning' });
}
};
function valuetext(value: number) {
setSpeed(value);
return `$${value}°C`;
}
const handleVideoRecorded = (url) => {
setRecordedVideoUrl(url);
};
return (
<>
<Head>
......@@ -144,6 +212,7 @@ export default function AboutPage() {
file={file}
onDrop={handleDropSingleFile}
onDelete={() => setFile(null)}
multiple={true}
/>
{file && (
......@@ -163,6 +232,45 @@ export default function AboutPage() {
<Card sx={{ p: 5, minHeight: 300 }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h3>Set Sign Speed </h3>
<Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/>
<h4>Speed - {speed}</h4>
</Grid>
<Grid
item
xs={12}
md={6}
container
direction="row"
justifyContent="flex-end"
alignItems="center"
>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
onClick={() => {
translateSignLanguageToText();
}}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
......@@ -202,20 +310,6 @@ export default function AboutPage() {
</Box>
</Card>
</Grid>
<Grid item xs={12} md={12}>
<center>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
>
Translate
</Button>
</center>
</Grid>
</Grid>
</Card>
</Box>
......@@ -224,19 +318,65 @@ export default function AboutPage() {
<Box sx={{ flexGrow: 1 }}>
<Card>
<CardHeader title="Upload a video containing Sign Language" />
<CardHeader title="Capture a video containing Sign Language" />
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<Card>
<CardContent>
<WebcamStreamCapture />
<WebcamStreamCapture onVideoRecorded={handleVideoRecorded} />
</CardContent>
{recordedVideoUrl && (
<div>
<h2>Recorded Video</h2>
<video src={recordedVideoUrl} controls autoPlay />
</div>
)}
</Card>
</Grid>
<Grid item xs={12} md={6}>
<Card sx={{ p: 5, minHeight: 300 }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h3>Set Sign Speed </h3>
<Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/>
<h4>Speed - {speed}</h4>
</Grid>
<Grid
item
xs={12}
md={6}
container
direction="row"
justifyContent="flex-end"
alignItems="center"
>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
onClick={() => {
translateSignLanguageToTextRecord();
}}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
......@@ -276,20 +416,6 @@ export default function AboutPage() {
</Box>
</Card>
</Grid>
<Grid item xs={12} md={12}>
<center>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
>
Translate
</Button>
</center>
</Grid>
</Grid>
</Card>
</Box>
......
import React, { useEffect, useState } from 'react';
import Webcam from 'react-webcam';
import { Box, Button, Container, Grid, Stack } from '@mui/material';
import React, { useEffect, useRef, useState } from 'react';
import { Button, Grid } from '@mui/material';
import StopIcon from '@mui/icons-material/Stop';
import RadioButtonCheckedIcon from '@mui/icons-material/RadioButtonChecked';
import styles from './WebcamStreamCapture.module.css';
const WebcamStreamCapture = () => {
const webcamRef = React.useRef(null);
const mediaRecorderRef = React.useRef(null);
const [capturing, setCapturing] = React.useState(false);
const [recordedChunks, setRecordedChunks] = React.useState([]);
const [mediaBlobUrl, setMediaBlobUrl] = React.useState([]);
import Webcam from 'react-webcam';
const WebcamStreamCapture = ({ onVideoRecorded }) => {
const webcamRef = useRef(null);
const mediaRecorderRef = useRef(null);
const [capturing, setCapturing] = useState(false);
const [recordedChunks, setRecordedChunks] = useState([]);
const handleStartCaptureClick = React.useCallback(() => {
const handleStartCaptureClick = () => {
setRecordedChunks([]);
setMediaBlobUrl([]);
setCapturing(true);
mediaRecorderRef.current = new MediaRecorder(webcamRef.current.stream, {
......@@ -21,63 +19,31 @@ const WebcamStreamCapture = () => {
});
mediaRecorderRef.current.addEventListener('dataavailable', handleDataAvailable);
mediaRecorderRef.current.start();
}, [webcamRef, setCapturing, mediaRecorderRef]);
};
const handleDataAvailable = React.useCallback(
({ data }) => {
const handleDataAvailable = ({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
},
[setRecordedChunks]
);
};
const handleStopCaptureClick = React.useCallback(async () => {
const handleStopCaptureClick = () => {
mediaRecorderRef.current.stop();
const blob = new Blob(recordedChunks, {
type: 'video/mp4',
});
const url = await URL.createObjectURL(blob);
console.log(url);
await setMediaBlobUrl(url);
setCapturing(false);
}, [mediaRecorderRef, webcamRef, setCapturing]);
};
const handleDownload = React.useCallback(() => {
const handleDownload = () => {
if (recordedChunks.length) {
const blob = new Blob(recordedChunks, {
type: 'video/mp4',
type: 'video/webm', // Use 'video/webm' to match MediaRecorder mimeType
});
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
document.body.appendChild(a);
a.style = 'display: none';
a.href = url;
a.download = 'user-recording.mp4';
a.click();
window.URL.revokeObjectURL(url);
onVideoRecorded(url); // Pass the blob URL to the parent component
setRecordedChunks([]);
}
}, [recordedChunks]);
// Styles for camera
const [width, setWidth] = useState(window.innerWidth);
const handleResize = () => {
setWidth(window.innerWidth);
};
useEffect(() => {
window.addEventListener('resize', handleResize);
return () => window.removeEventListener('resize', handleResize);
}, []);
return (
<>
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
......@@ -106,13 +72,7 @@ const WebcamStreamCapture = () => {
</Button>
)}
{recordedChunks.length > 0 && (
<Button
onClick={handleDownload}
variant="contained"
sx={{
ml: 1,
}}
>
<Button onClick={handleDownload} variant="contained" sx={{ ml: 1 }}>
Download
</Button>
)}
......@@ -121,12 +81,19 @@ const WebcamStreamCapture = () => {
<Grid item xs={12}>
{recordedChunks.length > 0 && (
<center>
<video src={mediaBlobUrl} controls autoPlay />
<video
src={
recordedChunks.length > 0
? URL.createObjectURL(new Blob(recordedChunks, { type: 'video/webm' }))
: null
}
controls
autoPlay
/>
</center>
)}
</Grid>
</Grid>
</>
);
};
......
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment