Commit 9d1e08e0 authored by Paranagama R.P.S.D.'s avatar Paranagama R.P.S.D.

fix : Recorded video display error

parent 4832b594
......@@ -76,6 +76,44 @@
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"# frame = extract_hand_shape(frame)\n",
"# frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 4,
......@@ -95,14 +133,6 @@
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
......
......@@ -52,7 +52,7 @@
},
{
"cell_type": "code",
"execution_count": 62,
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
......@@ -83,7 +83,7 @@
},
{
"cell_type": "markdown",
"id": "02a9f711",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
......@@ -91,8 +91,8 @@
},
{
"cell_type": "code",
"execution_count": 66,
"id": "7aa4a681",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
......@@ -102,7 +102,7 @@
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16084\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
......@@ -112,8 +112,8 @@
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
......@@ -142,6 +142,52 @@
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
......
......@@ -78,31 +78,53 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e7e05125",
"execution_count": 1,
"id": "93d1ae9d",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_19904\\1480809324.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 32\u001b[0m \u001b[1;31m# Display the hand shape with white background\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape with White Background'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhand_shape_with_white_bg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 34\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n"
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16572\\1170496581.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;31m# Display the hand shape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mframe\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n"
]
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": []
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
......
......@@ -52,7 +52,7 @@
},
{
"cell_type": "code",
"execution_count": 62,
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
......@@ -91,7 +91,7 @@
},
{
"cell_type": "code",
"execution_count": 68,
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
......@@ -102,7 +102,7 @@
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16084\\10235205.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
......@@ -142,6 +142,52 @@
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
......
......@@ -174,8 +174,8 @@ class SignLanguagePredictionService:
if not success:
break
# TODO Add to COnfig
if frame_count >= 250:
# TODO Add to Config
if frame_count >= 500:
break
# Preprocess the frame
......@@ -195,7 +195,7 @@ class SignLanguagePredictionService:
predictions.append(sinhala_letter)
frame_count += 1
# TODO Add to COnfig
# TODO Add to Config
threshold_percentage = 60
# Check if the required number of frames per sign has been reached
......
......@@ -63,6 +63,7 @@ export default function AboutPage() {
const [loading, setLoading] = useState(false);
const [value, setValue] = useState('');
const [speed, setSpeed] = useState(0);
const [recordedVideoUrl, setRecordedVideoUrl] = useState(null);
const handleDropSingleFile = useCallback(async (acceptedFiles: File[]) => {
const file = acceptedFiles[0];
......@@ -114,7 +115,8 @@ export default function AboutPage() {
if (file) {
setLoading(true);
const formData = new FormData();
formData.append('video_request', file, 'test_video.mp4');
formData.append('video_request', file, file.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
......@@ -135,11 +137,42 @@ export default function AboutPage() {
}
};
const translateSignLanguageToTextRecord = async () => {
console.log('TEST TES ');
console.log(recordedVideoUrl);
if (recordedVideoUrl) {
setLoading(true);
const formData = new FormData();
formData.append('video_request', recordedVideoUrl, recordedVideoUrl.name);
try {
const response = await SignLanguageToTextService.predictSignLanguageVideo(speed, formData);
if (response.status == 200) {
console.log(response.data);
setValue(response.data.predictions);
} else {
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
setLoading(false);
} catch (error) {
console.log(error);
setLoading(false);
enqueueSnackbar('Something went Wrong!', { variant: 'error' });
}
} else {
enqueueSnackbar('Please record a video.', { variant: 'warning' });
}
};
function valuetext(value: number) {
setSpeed(value);
return `$${value}°C`;
}
const handleVideoRecorded = (url) => {
setRecordedVideoUrl(url);
};
return (
<>
<Head>
......@@ -290,8 +323,15 @@ export default function AboutPage() {
<Grid item xs={12} md={6}>
<Card>
<CardContent>
<WebcamStreamCapture />
<WebcamStreamCapture onVideoRecorded={handleVideoRecorded} />
</CardContent>
{recordedVideoUrl && (
<div>
<h2>Recorded Video</h2>
<video src={recordedVideoUrl} controls autoPlay />
</div>
)}
</Card>
</Grid>
<Grid item xs={12} md={6}>
......@@ -330,7 +370,7 @@ export default function AboutPage() {
}}
disabled={loading}
onClick={() => {
translateSignLanguageToText();
translateSignLanguageToTextRecord();
}}
>
Translate
......
import React, { useEffect, useState } from 'react';
import Webcam from 'react-webcam';
import { Box, Button, Container, Grid, Stack } from '@mui/material';
import React, { useEffect, useRef, useState } from 'react';
import { Button, Grid } from '@mui/material';
import StopIcon from '@mui/icons-material/Stop';
import RadioButtonCheckedIcon from '@mui/icons-material/RadioButtonChecked';
import styles from './WebcamStreamCapture.module.css';
const WebcamStreamCapture = () => {
const webcamRef = React.useRef(null);
const mediaRecorderRef = React.useRef(null);
const [capturing, setCapturing] = React.useState(false);
const [recordedChunks, setRecordedChunks] = React.useState([]);
const [mediaBlobUrl, setMediaBlobUrl] = React.useState([]);
import Webcam from 'react-webcam';
const handleStartCaptureClick = React.useCallback(() => {
const WebcamStreamCapture = ({ onVideoRecorded }) => {
const webcamRef = useRef(null);
const mediaRecorderRef = useRef(null);
const [capturing, setCapturing] = useState(false);
const [recordedChunks, setRecordedChunks] = useState([]);
const handleStartCaptureClick = () => {
setRecordedChunks([]);
setMediaBlobUrl([]);
setCapturing(true);
mediaRecorderRef.current = new MediaRecorder(webcamRef.current.stream, {
......@@ -21,112 +19,81 @@ const WebcamStreamCapture = () => {
});
mediaRecorderRef.current.addEventListener('dataavailable', handleDataAvailable);
mediaRecorderRef.current.start();
}, [webcamRef, setCapturing, mediaRecorderRef]);
};
const handleDataAvailable = React.useCallback(
({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
},
[setRecordedChunks]
);
const handleDataAvailable = ({ data }) => {
if (data.size > 0) {
setRecordedChunks((prev) => prev.concat(data));
}
};
const handleStopCaptureClick = React.useCallback(async () => {
const handleStopCaptureClick = () => {
mediaRecorderRef.current.stop();
const blob = new Blob(recordedChunks, {
type: 'video/mp4',
});
const url = await URL.createObjectURL(blob);
console.log(url);
await setMediaBlobUrl(url);
setCapturing(false);
}, [mediaRecorderRef, webcamRef, setCapturing]);
};
const handleDownload = React.useCallback(() => {
const handleDownload = () => {
if (recordedChunks.length) {
const blob = new Blob(recordedChunks, {
type: 'video/mp4',
type: 'video/webm', // Use 'video/webm' to match MediaRecorder mimeType
});
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
document.body.appendChild(a);
a.style = 'display: none';
a.href = url;
a.download = 'user-recording.mp4';
a.click();
window.URL.revokeObjectURL(url);
onVideoRecorded(url); // Pass the blob URL to the parent component
setRecordedChunks([]);
}
}, [recordedChunks]);
// Styles for camera
const [width, setWidth] = useState(window.innerWidth);
const handleResize = () => {
setWidth(window.innerWidth);
};
useEffect(() => {
window.addEventListener('resize', handleResize);
return () => window.removeEventListener('resize', handleResize);
}, []);
return (
<>
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
<Webcam audio={false} ref={webcamRef} style={{ width: '100%', maxWidth: '500px' }} />
</center>
</Grid>
<Grid item xs={12}>
<center>
{capturing ? (
<Button
onClick={handleStopCaptureClick}
startIcon={<StopIcon />}
color="error"
variant="contained"
>
Stop Capture
</Button>
) : (
<Button
onClick={handleStartCaptureClick}
startIcon={<RadioButtonCheckedIcon />}
color="error"
variant="contained"
>
Start Capture
</Button>
)}
{recordedChunks.length > 0 && (
<Button
onClick={handleDownload}
variant="contained"
sx={{
ml: 1,
}}
>
Download
</Button>
)}
</center>
</Grid>
<Grid item xs={12}>
<Grid container spacing={2}>
<Grid item xs={12}>
<center>
<Webcam audio={false} ref={webcamRef} style={{ width: '100%', maxWidth: '500px' }} />
</center>
</Grid>
<Grid item xs={12}>
<center>
{capturing ? (
<Button
onClick={handleStopCaptureClick}
startIcon={<StopIcon />}
color="error"
variant="contained"
>
Stop Capture
</Button>
) : (
<Button
onClick={handleStartCaptureClick}
startIcon={<RadioButtonCheckedIcon />}
color="error"
variant="contained"
>
Start Capture
</Button>
)}
{recordedChunks.length > 0 && (
<center>
<video src={mediaBlobUrl} controls autoPlay />
</center>
<Button onClick={handleDownload} variant="contained" sx={{ ml: 1 }}>
Download
</Button>
)}
</Grid>
</center>
</Grid>
<Grid item xs={12}>
{recordedChunks.length > 0 && (
<center>
<video
src={
recordedChunks.length > 0
? URL.createObjectURL(new Blob(recordedChunks, { type: 'video/webm' }))
: null
}
controls
autoPlay
/>
</center>
)}
</Grid>
</>
</Grid>
);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment