Commit 14bf66ef authored by Paranagama R.P.S.D.'s avatar Paranagama R.P.S.D.

fix : Preprocessing changes & model changes

parent edfa7401
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e7e05125",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_19904\\1480809324.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 32\u001b[0m \u001b[1;31m# Display the hand shape with white background\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape with White Background'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhand_shape_with_white_bg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 34\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n"
]
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 62,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "02a9f711",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 66,
"id": "7aa4a681",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16084\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e7e05125",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_19904\\1480809324.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 32\u001b[0m \u001b[1;31m# Display the hand shape with white background\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape with White Background'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhand_shape_with_white_bg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 34\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n"
]
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 62,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 68,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16084\\10235205.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -4,3 +4,12 @@
2023-05-19 00:32:48,522 - ERROR - Received request at root endpoint.
2023-05-19 23:09:38,565 - INFO - Failed to make predictions. name 'CLASSES' is not defined
2023-05-19 23:09:38,565 - INFO - Failed to make predictions. name 'CLASSES' is not defined
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,932 - INFO - Failed to make predictions. OpenCV(4.7.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
2023-05-24 20:05:37,936 - INFO - Error.
2023-05-24 20:05:37,936 - INFO - Error.
2023-05-24 20:05:37,936 - INFO - Error.
......@@ -24,7 +24,7 @@ IMG_SIZE = 224 # image size
prediction_service = SignLanguagePredictionService(model, CLASSES, mappings)
@router.post("/upload/video")
@router.post("/upload/video", tags=["Sign Language"])
async def upload_video(video: UploadFile = File(...)):
try:
......@@ -40,7 +40,7 @@ async def upload_video(video: UploadFile = File(...)):
detail="Failed to upload the video"
)
@router.post('/predict-sign-language/image')
@router.post('/predict-sign-language/image', tags=["Sign Language"])
def predict_using_image(image_request: UploadFile = File(...)):
try:
return prediction_service.predict_sign_language(image_request)
......@@ -50,10 +50,10 @@ def predict_using_image(image_request: UploadFile = File(...)):
status_code=500,
detail="Request Failed."
)
@router.post('/predict-sign-language/video')
@router.post('/predict-sign-language/video', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...)):
try:
return prediction_service.predict_sign_language_video(video_request)
return prediction_service.predict_sign_language_video2(video_request)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
......
......@@ -7,12 +7,12 @@ def test():
# Your code here
return {"pong"}
@router.get("/users")
@router.get("/test")
def get_users():
# Your code here
return {"message": "Get users endpoint"}
@router.post("/users")
@router.post("/test-api")
def create_user():
# Your code here
return {"message": "Create user endpoint"}
......@@ -60,7 +60,7 @@ class SignLanguagePredictionService:
frame_count = 0
# Loop through the frames of the video
while frame_count < 20:
while frame_count < 50:
success, frame = video.read()
if not success:
break
......@@ -93,3 +93,64 @@ class SignLanguagePredictionService:
status_code=500,
detail="Failed to make predictions"
)
def predict_sign_language_video(self, video_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
frame_count = 0
# Loop through the frames of the video
while frame_count < 50:
success, frame = video.read()
if not success:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
video.release()
# Delete the video file
os.remove(video_location)
return {'frame_count': frame_count, 'predictions': predictions}
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def extract_hand_shape(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
hand_contour = contours[0]
hand_shape = np.zeros_like(image)
cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)
return hand_shape
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment