Commit f8c53ceb authored by janithgamage1.ed's avatar janithgamage1.ed

Merge branch 'master' into IT20005276

parents 6366744b 68684f39
models/*
!models/
\ No newline at end of file
!models/
*.h5
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"# frame = extract_hand_shape(frame)\n",
"# frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e7e05125",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_19904\\1480809324.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 32\u001b[0m \u001b[1;31m# Display the hand shape with white background\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape with White Background'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhand_shape_with_white_bg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 34\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n"
]
}
],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "93d1ae9d",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16572\\1170496581.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;31m# Display the hand shape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mframe\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -16,6 +16,9 @@
files/*
!files/
*.pyc
*~
*.swp
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
......
......@@ -4,3 +4,36 @@
2023-05-19 00:32:48,522 - ERROR - Received request at root endpoint.
2023-05-19 23:09:38,565 - INFO - Failed to make predictions. name 'CLASSES' is not defined
2023-05-19 23:09:38,565 - INFO - Failed to make predictions. name 'CLASSES' is not defined
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,936 - INFO - Error.
2023-05-24 20:05:37,936 - INFO - Error.
2023-05-24 20:05:37,936 - INFO - Error.
2023-07-12 05:50:25,202 - INFO - Error. 'SignLanguagePredictionService' object has no attribute 'predict_sign_language_video2'
2023-07-12 05:50:25,202 - INFO - Error. 'SignLanguagePredictionService' object has no attribute 'predict_sign_language_video2'
2023-07-12 05:50:25,202 - INFO - Error. 'SignLanguagePredictionService' object has no attribute 'predict_sign_language_video2'
2023-07-12 06:33:48,435 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:33:48,435 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:33:48,435 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:27,777 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:27,777 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:27,777 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:33,502 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:33,502 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:34:33,502 - INFO - Error. SignLanguagePredictionService.predict_sign_language_video_new2() missing 1 required positional argument: 'speed'
2023-07-12 06:39:07,178 - INFO - Failed to make predictions. local variable 'threshold_percentage' referenced before assignment
2023-07-12 06:39:07,178 - INFO - Failed to make predictions. local variable 'threshold_percentage' referenced before assignment
2023-07-12 06:39:07,178 - INFO - Failed to make predictions. local variable 'threshold_percentage' referenced before assignment
2023-07-12 06:39:07,180 - INFO - Error.
2023-07-12 06:39:07,180 - INFO - Error.
2023-07-12 06:39:07,180 - INFO - Error.
2023-08-03 06:00:41,581 - INFO - Failed to make predictions. [WinError 32] The process cannot access the file because it is being used by another process: 'files/test_video.mp4'
2023-08-03 06:00:41,581 - INFO - Failed to make predictions. [WinError 32] The process cannot access the file because it is being used by another process: 'files/test_video.mp4'
2023-08-03 06:00:41,581 - INFO - Failed to make predictions. [WinError 32] The process cannot access the file because it is being used by another process: 'files/test_video.mp4'
2023-08-03 06:00:41,626 - INFO - Error.
2023-08-03 06:00:41,626 - INFO - Error.
2023-08-03 06:00:41,626 - INFO - Error.
import base64
import os
import cv2
from fastapi import APIRouter, File, HTTPException,UploadFile
import numpy as np
from fastapi import APIRouter, File, HTTPException, Query,UploadFile
from pydantic import BaseModel
import tensorflow as tf
from core.logger import setup_logger
from core import setup_logger
from services.translate_service import SignLanguagePredictionService
from utils import mappings
......@@ -23,12 +20,13 @@ model= None
CLASSES = mappings.classes
NUM_CLASSES = len(mappings.classes) # number of classes
IMG_SIZE = 224 # image size
speed_levels = mappings.speed_levels
# Instantiate the service class
prediction_service = SignLanguagePredictionService(model, CLASSES, mappings)
prediction_service = SignLanguagePredictionService(model, CLASSES, mappings,speed_levels)
@router.post("/upload/video")
@router.post("/upload/video", tags=["Sign Language"])
async def upload_video(video: UploadFile = File(...)):
try:
......@@ -44,7 +42,7 @@ async def upload_video(video: UploadFile = File(...)):
detail="Failed to upload the video"
)
@router.post('/predict-sign-language/image')
@router.post('/predict-sign-language/image', tags=["Sign Language"])
def predict_using_image(image_request: UploadFile = File(...)):
try:
return prediction_service.predict_sign_language(image_request)
......@@ -54,10 +52,21 @@ def predict_using_image(image_request: UploadFile = File(...)):
status_code=500,
detail="Request Failed."
)
@router.post('/predict-sign-language/video')
@router.post('/predict-sign-language/video', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...)):
try:
return prediction_service.predict_sign_language_video(video_request)
return prediction_service.predict_sign_language_video_new(video_request)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
status_code=500,
detail="Request Failed."
)
@router.post('/predict-sign-language/video/speed_levels', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...), speed: int = Query(...)):
try:
return prediction_service.predict_sign_language_video_with_speed_levels(video_request, speed=speed)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
......
......@@ -7,12 +7,12 @@ def test():
# Your code here
return {"pong"}
@router.get("/users")
@router.get("/test")
def get_users():
# Your code here
return {"message": "Get users endpoint"}
@router.post("/users")
@router.post("/test-api")
def create_user():
# Your code here
return {"message": "Create user endpoint"}
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
......@@ -2,7 +2,9 @@ from fastapi import FastAPI
from controllers import translate_controler, users_controller
from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware
from core import setup_logger
from core.logger import setup_logger
app = FastAPI()
......@@ -17,6 +19,8 @@ app.include_router(translate_controler.router)
origins = [
"http://localhost",
"http://localhost:8080",
"http://localhost:8004",
"http://localhost:3000",
]
app.add_middleware(CORSMiddleware,
......
......@@ -3,22 +3,21 @@ import cv2
import numpy as np
from fastapi import HTTPException, UploadFile
from typing import Dict
from typing import Counter, Dict
import tensorflow as tf
from core.logger import setup_logger
from core import setup_logger
from utils import mappings
logger = setup_logger()
IMG_SIZE = 224 # image size
class SignLanguagePredictionService:
def __init__(self, model, classes, mappings):
def __init__(self, model, classes, mappings,speed_levels):
self.model = model
self.classes = classes
self.mappings = mappings
self.speed_levels = speed_levels
def predict_sign_language(self, image_request: UploadFile) -> Dict[str, str]:
try:
......@@ -62,7 +61,7 @@ class SignLanguagePredictionService:
frame_count = 0
# Loop through the frames of the video
while frame_count < 20:
while frame_count < 50:
success, frame = video.read()
if not success:
break
......@@ -95,3 +94,150 @@ class SignLanguagePredictionService:
status_code=500,
detail="Failed to make predictions"
)
def predict_sign_language_video_new(self, video_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
frame_count = 0
# Loop through the frames of the video
while frame_count < 50:
success, frame = video.read()
if not success:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
video.release()
# Delete the video file
os.remove(video_location)
threshold_percentage = 60
predictions = get_predicted_percentage(predictions, threshold_percentage)
return {'frame_count': frame_count, 'predictions': predictions }
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def predict_sign_language_video_with_speed_levels(self, video_request: UploadFile, speed: int) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
final_predictions = []
frame_count = 0
# Determine the number of frames per sign based on the speed level
frames_per_sign = self.speed_levels.get(speed, 50) # Default to level 1 if speed level is not provided
# Loop through the frames of the video
while frame_count <= video.get(cv2.CAP_PROP_FRAME_COUNT):
success, frame = video.read()
if not success:
break
# TODO Add to Config
if frame_count >= 500:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
# TODO Add to Config
threshold_percentage = 60
# Check if the required number of frames per sign has been reached
if frame_count % frames_per_sign == 0:
predictions = get_predicted_percentage(predictions, threshold_percentage)
final_predictions = final_predictions+predictions
predictions = []
video.release()
# Delete the video file
os.remove(video_location)
return {'frame_count': frame_count, 'predictions': final_predictions}
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def extract_hand_shape(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
hand_contour = contours[0]
hand_shape = np.zeros_like(image)
cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)
return hand_shape
def get_predicted_percentage(array, threshold):
counts = Counter(array)
total_elements = len(array)
percentages = {}
for element, count in counts.items():
percentage = (count / total_elements) * 100
percentages[element] = percentage
elements_above_threshold = [element for element, percentage in percentages.items() if percentage > threshold]
return elements_above_threshold
\ No newline at end of file
......@@ -27,4 +27,13 @@ classes =['A',
'Ohh',
'T',
'Uh',
'Uhh']
\ No newline at end of file
'Uhh']
speed_levels = {
1: 50, # 10 frames per sign for level 1
2: 40, # 20 frames per sign for level 2
3: 30, # 30 frames per sign for level 3
4: 20, # 40 frames per sign for level 4
5: 10 # 50 frames per sign for level 5
}
\ No newline at end of file
......@@ -7,12 +7,12 @@ Welcome to the FitsPro ERP Platform repository! This project aims to revolutioni
#### update packages
```
source-folder > yarn
source-folder > yarn
```
#### Start the project
#### Start the project
```
```
source-folder > yarn start
```
......
This diff is collapsed.
......@@ -22,7 +22,9 @@
"@fullcalendar/timegrid": "^6.1.5",
"@fullcalendar/timeline": "^6.1.5",
"@hello-pangea/dnd": "^16.2.0",
"@material-ui/core": "^4.12.4",
"@mui/base": "^5.0.0-alpha.126",
"@mui/icons-material": "^5.14.6",
"@mui/lab": "^5.0.0-alpha.127",
"@mui/material": "^5.12.1",
"@mui/system": "^5.12.1",
......@@ -54,6 +56,7 @@
"jwt-decode": "^3.1.2",
"lodash": "^4.17.21",
"match-sorter": "^6.3.1",
"mui-file-input": "^3.0.0",
"notistack": "^3.0.1",
"process": "^0.11.10",
"react": "^18.2.0",
......@@ -76,6 +79,7 @@
"react-infinite-scroll-component": "^6.1.0",
"react-intersection-observer": "^9.4.3",
"react-intl": "^6.4.0",
"react-material-file-upload": "^0.0.4",
"react-number-format": "^5.1.4",
"react-organizational-chart": "^2.2.1",
"react-quill": "^2.0.0",
......@@ -92,6 +96,7 @@
"react-table-sticky": "^1.1.3",
"react-timer-hook": "^3.0.5",
"react-to-print": "^2.14.12",
"react-webcam": "^7.1.1",
"react-window": "^1.8.9",
"react-zoom-pan-pinch": "^3.0.7",
"react18-input-otp": "^1.1.3",
......@@ -177,4 +182,4 @@
"react-error-overlay": "6.0.11",
"typescript": "^5.0.4"
}
}
\ No newline at end of file
}
.videoContainer {
position: relative;
width: 50%;
padding-top: 56.25%; /* 16:9 aspect ratio */
}
.futuristicVideo {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
border: none;
outline: none;
background-color: black;
opacity: 0.8;
filter: blur(4px);
transition: opacity 0.5s, filter 0.5s;
}
.futuristicVideo:hover {
opacity: 1;
filter: blur(0);
}
import { useRef, useState } from 'react';
import { Button, Grid } from '@mui/material';
import Webcam from 'react-webcam';
import { PauseCircleOutlined, PlayCircleOutlined } from '@ant-design/icons';
//@ts-ignore
const WebcamStreamCapture = ({ onVideoRecorded }) => {
const webcamRef = useRef(null);
const mediaRecorderRef = useRef(null);
const [capturing, setCapturing] = useState(false);
const [recordedChunks, setRecordedChunks] = useState([]);
const handleStartCaptureClick = () => {
setRecordedChunks([]);
setCapturing(true);
//@ts-ignore
mediaRecorderRef.current = new MediaRecorder(webcamRef.current.stream, {
mimeType: 'video/webm'
});
//@ts-ignore
mediaRecorderRef.current.addEventListener('dataavailable', handleDataAvailable);
//@ts-ignore
mediaRecorderRef.current.start();
};
//@ts-ignore
const handleDataAvailable = ({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
};
const handleStopCaptureClick = () => {
//@ts-ignore
mediaRecorderRef.current.stop();
setCapturing(false);
};
const handleDownload = () => {
if (recordedChunks.length) {
const blob = new Blob(recordedChunks, {
type: 'video/webm' // Use 'video/webm' to match MediaRecorder mimeType
});
const url = URL.createObjectURL(blob);
onVideoRecorded(url); // Pass the blob URL to the parent component
setRecordedChunks([]);
}
};
return (
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
<Webcam audio={false} ref={webcamRef} style={{ width: '100%', maxWidth: '500px' }} />
</center>
</Grid>
<Grid item xs={12}>
<center>
{capturing ? (
<Button onClick={handleStopCaptureClick} startIcon={<PauseCircleOutlined />} color="error" variant="contained">
Stop Capture
</Button>
) : (
<Button onClick={handleStartCaptureClick} startIcon={<PlayCircleOutlined />} color="error" variant="contained">
Start Capture
</Button>
)}
{recordedChunks.length > 0 && (
<Button onClick={handleDownload} variant="contained" sx={{ ml: 1 }}>
Download
</Button>
)}
</center>
</Grid>
<Grid item xs={12}>
{recordedChunks.length > 0 && (
<center>
{/* @ts-ignore */}
<video
src={recordedChunks.length > 0 ? URL.createObjectURL(new Blob(recordedChunks, { type: 'video/webm' })) : undefined}
controls
autoPlay
/>
</center>
)}
</Grid>
</Grid>
);
};
export default WebcamStreamCapture;
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
This diff is collapsed.
......@@ -42,11 +42,13 @@
"amazon-cognito-identity-js": "^5.2.11",
"apexcharts": "^3.36.0",
"autosuggest-highlight": "^3.3.4",
"axios": "^1.1.2",
"axios": "^1.4.0",
"change-case": "^4.1.2",
"date-fns": "^2.29.3",
"firebase": "^9.12.1",
"form-data": "^4.0.0",
"framer-motion": "^7.5.3",
"fs": "^0.0.1-security",
"highlight.js": "^11.6.0",
"i18next": "^21.10.0",
"i18next-browser-languagedetector": "^6.1.8",
......
......@@ -3,7 +3,7 @@ import { PATH_DASHBOARD } from './routes/paths';
// API
// ----------------------------------------------------------------------
export const BACKEND_URL = 'http://127.0.0.1:8000/';
export const HOST_API_KEY = process.env.HOST_API_KEY || '';
export const FIREBASE_API = {
......
......@@ -21,6 +21,7 @@ import {
Paper,
CircularProgress,
LinearProgress,
Slider,
} from '@mui/material';
// layouts
import MainLayout from '../layouts/main';
......@@ -41,6 +42,9 @@ import { useSnackbar } from 'notistack';
import useCopyToClipboard from 'src/hooks/useCopyToClipboard';
import Iconify from 'src/components/iconify/Iconify';
import dynamic from 'next/dynamic';
import SignLanguageToTextService from 'src/services/SignLanguageToText.js';
import { Block } from 'src/sections/_examples/Block';
const useReactMediaRecorder = () =>
// eslint-disable-next-line react-hooks/rules-of-hooks
......@@ -57,7 +61,9 @@ export default function AboutPage() {
const [isUploadFile, setIsUploadFile] = useState<boolean | string | null>(true);
const [videoUrl, setVideoUrl] = useState('');
const [loading, setLoading] = useState(false);
const [value, setValue] = useState('ආආආආආආආආආආආආආආආආ');
const [value, setValue] = useState('');
const [speed, setSpeed] = useState(0);
const [recordedVideoUrl, setRecordedVideoUrl] = useState(null);
const handleDropSingleFile = useCallback(async (acceptedFiles: File[]) => {
const file = acceptedFiles[0];
......@@ -105,6 +111,68 @@ export default function AboutPage() {
// Video Upload
const translateSignLanguageToText = async () => {
if (file) {
setLoading(true);
const formData = new FormData();
formData.append('video_request', file, file.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please select a file.', { variant: 'warning' });
}
};
const translateSignLanguageToTextRecord = async () => {
console.log('TEST TES ');
console.log(recordedVideoUrl);
if (recordedVideoUrl) {
setLoading(true);
const formData = new FormData();
formData.append('video_request', recordedVideoUrl, recordedVideoUrl.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please record a video.', { variant: 'warning' });
}
};
function valuetext(value: number) {
setSpeed(value);
return `$${value}°C`;
}
const handleVideoRecorded = (url) => {
setRecordedVideoUrl(url);
};
return (
<>
<Head>
......@@ -144,6 +212,7 @@ export default function AboutPage() {
file={file}
onDrop={handleDropSingleFile}
onDelete={() => setFile(null)}
multiple={true}
/>
{file && (
......@@ -163,6 +232,45 @@ export default function AboutPage() {
<Card sx={{ p: 5, minHeight: 300 }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h3>Set Sign Speed </h3>
<Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/>
<h4>Speed - {speed}</h4>
</Grid>
<Grid
item
xs={12}
md={6}
container
direction="row"
justifyContent="flex-end"
alignItems="center"
>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
onClick={() => {
translateSignLanguageToText();
}}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
......@@ -202,20 +310,6 @@ export default function AboutPage() {
</Box>
</Card>
</Grid>
<Grid item xs={12} md={12}>
<center>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
>
Translate
</Button>
</center>
</Grid>
</Grid>
</Card>
</Box>
......@@ -224,19 +318,65 @@ export default function AboutPage() {
<Box sx={{ flexGrow: 1 }}>
<Card>
<CardHeader title="Upload a video containing Sign Language" />
<CardHeader title="Capture a video containing Sign Language" />
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<Card>
<CardContent>
<WebcamStreamCapture />
<WebcamStreamCapture onVideoRecorded={handleVideoRecorded} />
</CardContent>
{recordedVideoUrl && (
<div>
<h2>Recorded Video</h2>
<video src={recordedVideoUrl} controls autoPlay />
</div>
)}
</Card>
</Grid>
<Grid item xs={12} md={6}>
<Card sx={{ p: 5, minHeight: 300 }}>
<Box display="grid" gap={5}>
<Stack spacing={2}>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h3>Set Sign Speed </h3>
<Slider
defaultValue={30}
getAriaValueText={valuetext}
valueLabelDisplay="auto"
step={10}
marks
min={10}
max={110}
/>
<h4>Speed - {speed}</h4>
</Grid>
<Grid
item
xs={12}
md={6}
container
direction="row"
justifyContent="flex-end"
alignItems="center"
>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
onClick={() => {
translateSignLanguageToTextRecord();
}}
>
Translate
</Button>
</Grid>
</Grid>
{loading ? (
<Card>
<CardContent>
......@@ -276,20 +416,6 @@ export default function AboutPage() {
</Box>
</Card>
</Grid>
<Grid item xs={12} md={12}>
<center>
<Button
variant="contained"
style={{ width: '200px', height: '60px', fontSize: '20px' }}
sx={{
mb: 3,
}}
disabled={loading}
>
Translate
</Button>
</center>
</Grid>
</Grid>
</Card>
</Box>
......
import React, { useEffect, useState } from 'react';
import Webcam from 'react-webcam';
import { Box, Button, Container, Grid, Stack } from '@mui/material';
import React, { useEffect, useRef, useState } from 'react';
import { Button, Grid } from '@mui/material';
import StopIcon from '@mui/icons-material/Stop';
import RadioButtonCheckedIcon from '@mui/icons-material/RadioButtonChecked';
import styles from './WebcamStreamCapture.module.css';
const WebcamStreamCapture = () => {
const webcamRef = React.useRef(null);
const mediaRecorderRef = React.useRef(null);
const [capturing, setCapturing] = React.useState(false);
const [recordedChunks, setRecordedChunks] = React.useState([]);
const [mediaBlobUrl, setMediaBlobUrl] = React.useState([]);
import Webcam from 'react-webcam';
const handleStartCaptureClick = React.useCallback(() => {
const WebcamStreamCapture = ({ onVideoRecorded }) => {
const webcamRef = useRef(null);
const mediaRecorderRef = useRef(null);
const [capturing, setCapturing] = useState(false);
const [recordedChunks, setRecordedChunks] = useState([]);
const handleStartCaptureClick = () => {
setRecordedChunks([]);
setMediaBlobUrl([]);
setCapturing(true);
mediaRecorderRef.current = new MediaRecorder(webcamRef.current.stream, {
......@@ -21,112 +19,81 @@ const WebcamStreamCapture = () => {
});
mediaRecorderRef.current.addEventListener('dataavailable', handleDataAvailable);
mediaRecorderRef.current.start();
}, [webcamRef, setCapturing, mediaRecorderRef]);
};
const handleDataAvailable = React.useCallback(
({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
},
[setRecordedChunks]
);
const handleDataAvailable = ({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
};
const handleStopCaptureClick = React.useCallback(async () => {
const handleStopCaptureClick = () => {
mediaRecorderRef.current.stop();
const blob = new Blob(recordedChunks, {
type: 'video/mp4',
});
const url = await URL.createObjectURL(blob);
console.log(url);
await setMediaBlobUrl(url);
setCapturing(false);
}, [mediaRecorderRef, webcamRef, setCapturing]);
};
const handleDownload = React.useCallback(() => {
const handleDownload = () => {
if (recordedChunks.length) {
const blob = new Blob(recordedChunks, {
type: 'video/mp4',
type: 'video/webm', // Use 'video/webm' to match MediaRecorder mimeType
});
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
document.body.appendChild(a);
a.style = 'display: none';
a.href = url;
a.download = 'user-recording.mp4';
a.click();
window.URL.revokeObjectURL(url);
onVideoRecorded(url); // Pass the blob URL to the parent component
setRecordedChunks([]);
}
}, [recordedChunks]);
// Styles for camera
const [width, setWidth] = useState(window.innerWidth);
const handleResize = () => {
setWidth(window.innerWidth);
};
useEffect(() => {
window.addEventListener('resize', handleResize);
return () => window.removeEventListener('resize', handleResize);
}, []);
return (
<>
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
<Webcam audio={false} ref={webcamRef} style={{ width: '100%', maxWidth: '500px' }} />
</center>
</Grid>
<Grid item xs={12}>
<center>
{capturing ? (
<Button
onClick={handleStopCaptureClick}
startIcon={<StopIcon />}
color="error"
variant="contained"
>
Stop Capture
</Button>
) : (
<Button
onClick={handleStartCaptureClick}
startIcon={<RadioButtonCheckedIcon />}
color="error"
variant="contained"
>
Start Capture
</Button>
)}
{recordedChunks.length > 0 && (
<Button
onClick={handleDownload}
variant="contained"
sx={{
ml: 1,
}}
>
Download
</Button>
)}
</center>
</Grid>
<Grid item xs={12}>
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
<Webcam audio={false} ref={webcamRef} style={{ width: '100%', maxWidth: '500px' }} />
</center>
</Grid>
<Grid item xs={12}>
<center>
{capturing ? (
<Button
onClick={handleStopCaptureClick}
startIcon={<StopIcon />}
color="error"
variant="contained"
>
Stop Capture
</Button>
) : (
<Button
onClick={handleStartCaptureClick}
startIcon={<RadioButtonCheckedIcon />}
color="error"
variant="contained"
>
Start Capture
</Button>
)}
{recordedChunks.length > 0 && (
<center>
<video src={mediaBlobUrl} controls autoPlay />
</center>
<Button onClick={handleDownload} variant="contained" sx={{ ml: 1 }}>
Download
</Button>
)}
</Grid>
</center>
</Grid>
<Grid item xs={12}>
{recordedChunks.length > 0 && (
<center>
<video
src={
recordedChunks.length > 0
? URL.createObjectURL(new Blob(recordedChunks, { type: 'video/webm' }))
: null
}
controls
autoPlay
/>
</center>
)}
</Grid>
</>
</Grid>
);
};
......
import axios from 'axios';
class SignLanguageToTextService {
predictSignLanguageVideo(speed, data) {
return axios.post(
`http://127.0.0.1:8000/predict-sign-language/video/speed_levels?speed=${speed}`,
data
);
}
}
export default new SignLanguageToTextService();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment