fix: update

parent 4562a5f5
{ {
"cSpell.words": [ "cSpell.words": [
"formik",
"Janith", "Janith",
"leaderboard", "leaderboard",
"SLIIT" "SLIIT"
......
models/* models/*
!models/ !models/
\ No newline at end of file
*.h5
\ No newline at end of file
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "93d1ae9d",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"# frame = extract_hand_shape(frame)\n",
"# frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e7e05125",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_19904\\1480809324.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 32\u001b[0m \u001b[1;31m# Display the hand shape with white background\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 33\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape with White Background'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhand_shape_with_white_bg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 34\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:/a/opencv-python/opencv-python/opencv/modules/highgui/src/precomp.hpp:155: error: (-215:Assertion failed) src_depth != CV_16F && src_depth != CV_32S in function 'convertToShow'\n"
]
}
],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": [
"# Convert the image to grayscale\n",
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
"\n",
"# Apply Gaussian blur to reduce noise\n",
"blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
"\n",
"# Apply adaptive thresholding to segment the hand from the background\n",
"_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
"\n",
"# Find contours of the hand\n",
"contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
"# Sort contours by area in descending order\n",
"contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
"\n",
"# Extract the largest contour (hand region)\n",
"hand_contour = contours[0]\n",
"\n",
"# Create a black image of the same size as the input image\n",
"hand_shape = np.zeros_like(image)\n",
"\n",
"# Draw the hand contour on the black image\n",
"cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "93d1ae9d",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_16572\\1170496581.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;31m# Display the hand shape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'Hand Shape'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mframe\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window_w32.cpp:124: error: (-215:Assertion failed) bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32) in function 'FillBitmapInfo'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"\n",
"IMG_SIZE = 224\n",
"\n",
"frame = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', frame)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 17,
"id": "4e8edf74",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "e11e6436",
"metadata": {},
"outputs": [],
"source": [
"# works well with white background buth have some issues eith other backgrounds \n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "157e18d5",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e7e05125",
"metadata": {},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"# image = cv2.imread('D:/RP/project/2023-029/Project/Backend\\ML_Models/sign_language_to_text/scene00001.png')\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"hand_shape = extract_hand_shape(image)\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "markdown",
"id": "ae0a7cb5",
"metadata": {},
"source": [
"### Capture Hand"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1c04daef",
"metadata": {},
"outputs": [
{
"ename": "error",
"evalue": "OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31merror\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_3024\\2889311055.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 30\u001b[0m \u001b[1;31m# Display the result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 31\u001b[1;33m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Area Above Elbow\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0marea_above_elbow\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 32\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwaitKey\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 33\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdestroyAllWindows\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31merror\u001b[0m: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\highgui\\src\\window.cpp:971: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'\n"
]
}
],
"source": [
"import cv2\n",
"import numpy as np\n",
"import mediapipe as mp\n",
"# Load the image\n",
"image = cv2.imread('C:/Users/HP Pavilion/Downloads/test_sign.jpeg')\n",
"\n",
"image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
"# Initialize Mediapipe solutions\n",
"mp_holistic = mp.solutions.holistic\n",
"\n",
"# Initialize the holistic model\n",
"holistic = mp_holistic.Holistic()\n",
"\n",
"# Process the image with Mediapipe\n",
"results = holistic.process(image_rgb)\n",
"\n",
"# Extract the left elbow and hand landmarks\n",
"left_elbow_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.WRIST]\n",
"left_hand_landmark = results.left_hand_landmarks.landmark[mp_holistic.HandLandmark.PINKY_MCP]\n",
"\n",
"# Convert the landmark coordinates to image pixels\n",
"image_height, image_width, _ = image.shape\n",
"left_elbow_px = int(left_elbow_landmark.x * image_width), int(left_elbow_landmark.y * image_height)\n",
"left_hand_px = int(left_hand_landmark.x * image_width), int(left_hand_landmark.y * image_height)\n",
"\n",
"# Calculate the area above the elbow\n",
"area_above_elbow = image[0:left_elbow_px[1], left_elbow_px[0]:left_hand_px[0]]\n",
"\n",
"# Display the result\n",
"cv2.imshow(\"Area Above Elbow\", area_above_elbow)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3ead61b7",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This works for any background, but does not hapture the fist area\n",
"import cv2\n",
"import numpy as np\n",
"\n",
"IMG_SIZE = 224 # image size\n",
"\n",
"# image = cv2.imread('C:/Users/HP Pavilion/Downloads/images (1).jpg')\n",
"\n",
"\n",
"image = cv2.imread('D:/RP/project/2023-029/Project/Backend/ML_Models/sign_language_to_text/test_image.png')\n",
"\n",
"def extract_hand_shape(image):\n",
" gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n",
" blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n",
" _, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
" contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
" contours = sorted(contours, key=cv2.contourArea, reverse=True)\n",
" hand_contour = contours[0]\n",
" hand_shape = np.zeros_like(image)\n",
" cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)\n",
" return hand_shape\n",
"\n",
"\n",
"frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))\n",
"frame = extract_hand_shape(frame)\n",
"frame = np.array([frame], dtype=np.float32) / 255.0\n",
"\n",
"\n",
"\n",
"\n",
"# Display the hand shape\n",
"cv2.imshow('Hand Shape', hand_shape)\n",
"cv2.waitKey(0)\n",
"cv2.destroyAllWindows()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
This source diff could not be displayed because it is too large. You can view the blob instead.
import Curriculum from '../models/curriculum.model.js'; import Curriculum from '../models/curriculum.model.js';
import Tutorial from '../models/tutorial.model.js';
export const getAllCurriculums = async (req, res) => { export const getAllCurriculums = async (req, res) => {
try { try {
const curriculums = await Curriculum.find(); const curriculums = await Curriculum.find().populate("tutorials");
res.status(200).json(curriculums); res.status(200).json(curriculums);
} catch (error) { } catch (error) {
res.status(500).json({ message: error.message }); res.status(500).json({ message: error.message });
} }
} };
export const getCurriculumById = async (req, res) => { export const getCurriculumById = async (req, res) => {
const { id } = req.params; const { id } = req.params;
try { try {
const curriculum = await Curriculum.findById(id); const curriculum = await Curriculum.findById(id).populate("tutorials");
if (!curriculum) {
return res.status(404).json({ message: 'Curriculum not found' });
}
res.status(200).json(curriculum); res.status(200).json(curriculum);
} catch (error) { } catch (error) {
res.status(404).json({ message: 'Curriculum not found' }); res.status(500).json({ message: error.message });
} }
} };
export const createCurriculum = async (req, res) => { export const createCurriculum = async (req, res) => {
const curriculumData = req.body; const curriculumData = req.body;
try { try {
const newCurriculum = new Curriculum(curriculumData); const newCurriculum = new Curriculum(curriculumData);
// Calculate total tutorial mark
let totalTutorialMark = 0;
for (const tutorialId of curriculumData.tutorials) {
const tutorial = await Tutorial.findById(tutorialId);
totalTutorialMark += tutorial.tutorialMarks;
}
newCurriculum.curriculumMark = totalTutorialMark;
await newCurriculum.save(); await newCurriculum.save();
res.status(201).json(newCurriculum); res.status(201).json(newCurriculum);
} catch (error) { } catch (error) {
res.status(400).json({ message: error.message }); res.status(400).json({ message: error.message });
} }
} }
export const updateCurriculum = async (req, res) => { export const updateCurriculum = async (req, res) => {
const { id } = req.params; const { id } = req.params;
const updatedCurriculum = req.body; const updatedCurriculum = req.body;
try { try {
const curriculum = await Curriculum.findById(id);
// Calculate total tutorial mark
let totalTutorialMark = 0;
for (const tutorialId of updatedCurriculum.tutorials) {
const tutorial = await Tutorial.findById(tutorialId);
totalTutorialMark += tutorial.tutorialMarks;
}
updatedCurriculum.curriculumMark = totalTutorialMark;
const result = await Curriculum.findByIdAndUpdate(id, updatedCurriculum, { new: true }); const result = await Curriculum.findByIdAndUpdate(id, updatedCurriculum, { new: true });
res.status(200).json(result); res.status(200).json(result);
} catch (error) { } catch (error) {
......
import { exec } from "child_process";
import multer from "multer"
export const marksCalculator = async (req, res) => {
const imageData = req.file.buffer.toString('base64');
const targetClass = req.body.class;
const { curriculumIndex, tutorialIndex } = req.params;
const status = "";
// console.log(curriculumIndex, tutorialIndex);
try {
if (curriculumIndex == 1 && tutorialIndex == 1) {
// Run Python script to perform prediction
const pythonProcess = exec('python prediction_config/C1T1/predict.py', (error, stdout, stderr) => {
if (error) {
console.error(error);
return res.status(500).json({ error: 'An error occurred' });
}
const [predicted_class_name, confidence] = stdout.trim().split(',');
const parsedConfidence = parseFloat(confidence).toFixed(2);
let status = "";
if (predicted_class_name === targetClass && parsedConfidence > 85) {
status = "pass";
} else {
status = "fail";
}
res.status(200).json({
code: "01",
result: {
predicted_class_name,
confidence: parsedConfidence,
status
}
});
});
pythonProcess.stdin.write(`${imageData}\n${targetClass}`);
pythonProcess.stdin.end();
} else {
return res.status(400).json({ code: "02", message: "Curriculum Index or Tutorial Index Invalid" })
}
} catch (error) {
res.status(500).json({ code: "00", message: "Something went wrong" })
}
}
\ No newline at end of file
...@@ -21,6 +21,11 @@ export const getTutorialById = async (req, res) => { ...@@ -21,6 +21,11 @@ export const getTutorialById = async (req, res) => {
export const createTutorial = async (req, res) => { export const createTutorial = async (req, res) => {
const tutorialData = req.body; const tutorialData = req.body;
// Calculate total tutorial marks based on task item marks
const totalTaskMarks = tutorialData.taskItems.reduce((total, task) => total + (task.taskItemMark || 0), 0);
tutorialData.tutorialMarks = totalTaskMarks;
try { try {
const newTutorial = new Tutorial(tutorialData); const newTutorial = new Tutorial(tutorialData);
await newTutorial.save(); await newTutorial.save();
...@@ -32,9 +37,14 @@ export const createTutorial = async (req, res) => { ...@@ -32,9 +37,14 @@ export const createTutorial = async (req, res) => {
export const updateTutorial = async (req, res) => { export const updateTutorial = async (req, res) => {
const { id } = req.params; const { id } = req.params;
const updatedTutorial = req.body; const updatedTutorialData = req.body;
// Calculate total tutorial marks based on updated task item marks
const totalTaskMarks = updatedTutorialData.taskItems.reduce((total, task) => total + (task.taskItemMark || 0), 0);
updatedTutorialData.tutorialMarks = totalTaskMarks;
try { try {
const result = await Tutorial.findByIdAndUpdate(id, updatedTutorial, { new: true }); const result = await Tutorial.findByIdAndUpdate(id, updatedTutorialData, { new: true });
res.status(200).json(result); res.status(200).json(result);
} catch (error) { } catch (error) {
res.status(404).json({ message: 'Tutorial not found' }); res.status(404).json({ message: 'Tutorial not found' });
......
import UserProgress from '../models/userProgress.model.js'; import UserProgress from "../models/userProgress.model.js";
// Logic to subscribe to a curriculum for a user
export const subscribeCurriculum = async (req, res) => {
const { userId, curriculum } = req.body;
try {
let userProgress = await UserProgress.findOne({ userId });
if (!userProgress) {
// If there is no user progress record for this user, create a new one
userProgress = new UserProgress({
userId,
curriculums: [curriculum], // Add the curriculum to the curriculums array
});
} else {
// If there is an existing user progress record, check if the curriculum already exists
const existingCurriculumIndex = userProgress.curriculums.findIndex(c => c.curriculumCode === curriculum.curriculumCode);
if (existingCurriculumIndex !== -1) {
// If the curriculum exists, update it
userProgress.curriculums[existingCurriculumIndex] = curriculum;
} else {
// If the curriculum doesn't exist, add it to the array
userProgress.curriculums.push(curriculum);
}
}
// Update the totalCurriculumsMarks based on curriculum marks
const totalCurriculumsMarks = userProgress.curriculums.reduce((total, curriculum) => total + curriculum.curriculumMark, 0);
userProgress.totalCurriculumsMarks = totalCurriculumsMarks;
// Save the user progress record
await userProgress.save();
res.status(200).json({ message: 'Curriculum subscription successful' });
} catch (error) {
console.error(error);
res.status(500).json({ error: 'Internal server error' });
}
};
// Get user's curriculum subscriptions and progress
export const getUserProgress = async (req, res) => { export const getUserProgress = async (req, res) => {
const userId = req.params.userId; const userId = req.params.userId;
try { try {
const userProgress = await UserProgress.findOne({ userId }).populate('curriculumId tutorialProgress.tutorialId'); const userProgress = await UserProgress.findOne({ userId });
res.status(200).json(userProgress);
if (!userProgress) {
return res.status(404).json({ message: 'User progress not found' });
}
res.status(200).json({ userProgress });
} catch (error) { } catch (error) {
res.status(500).json({ message: error.message }); console.error(error);
res.status(500).json({ error: 'Internal server error' });
} }
} };
export const updateUserProgress = async (req, res) => { // Update task item progress and calculate overall progress
const userId = req.params.userId; export const updateTaskItemProgress = async (req, res) => {
const { curriculumId, tutorialId, completed, marks } = req.body; const { userId, curriculumCode, tutorialCode, taskItemTitle, taskItemMarkUser, taskItemSpentTime } = req.body;
try { try {
let userProgress = await UserProgress.findOne({ userId }); const userProgress = await UserProgress.findOne({ userId });
if (!userProgress) { if (!userProgress) {
userProgress = new UserProgress({ userId }); return res.status(404).json({ message: 'User progress not found' });
} }
const curriculumProgress = userProgress.curriculumId.find(prog => prog.curriculumId.equals(curriculumId)); // Find the curriculum, tutorial, and task item to update
const curriculumIndex = userProgress.curriculums.findIndex(c => c.curriculumCode === curriculumCode);
if (curriculumIndex === -1) {
return res.status(404).json({ message: 'Curriculum not found' });
}
if (!curriculumProgress) { const tutorialIndex = userProgress.curriculums[curriculumIndex].tutorials.findIndex(t => t.tutorialCode === tutorialCode);
userProgress.curriculumId.push({ curriculumId }); if (tutorialIndex === -1) {
return res.status(404).json({ message: 'Tutorial not found' });
} }
const tutorialProgress = curriculumProgress.tutorialProgress.find(prog => prog.tutorialId.equals(tutorialId)); const taskItemIndex = userProgress.curriculums[curriculumIndex].tutorials[tutorialIndex].taskItems.findIndex(item => item.title === taskItemTitle);
if (taskItemIndex === -1) {
if (!tutorialProgress) { return res.status(404).json({ message: 'Task item not found' });
curriculumProgress.tutorialProgress.push({ tutorialId, completed });
} else {
tutorialProgress.completed = completed;
} }
userProgress.marks = marks; // Update task item progress
userProgress.curriculums[curriculumIndex].tutorials[tutorialIndex].taskItems[taskItemIndex].taskItemMarkUser = taskItemMarkUser;
userProgress.curriculums[curriculumIndex].tutorials[tutorialIndex].taskItems[taskItemIndex].taskItemSpentTime = taskItemSpentTime;
// Calculate total task marks and spent time for the tutorial
const tutorial = userProgress.curriculums[curriculumIndex].tutorials[tutorialIndex];
tutorial.tutorialMarkUser = tutorial.taskItems.reduce((total, item) => total + item.taskItemMarkUser, 0);
tutorial.tutorialSpentTime = tutorial.taskItems.reduce((total, item) => total + item.taskItemSpentTime, 0);
// Calculate total tutorial marks and spent time for the curriculum
const curriculum = userProgress.curriculums[curriculumIndex];
curriculum.curriculumMarkUser = curriculum.tutorials.reduce((total, t) => total + t.tutorialMarkUser, 0);
curriculum.curriculumSpentTime = curriculum.tutorials.reduce((total, t) => total + t.tutorialSpentTime, 0);
// Calculate total curriculum marks and spent time for the user
userProgress.totalCurriculumsMarks = userProgress.curriculums.reduce((total, c) => total + c.curriculumMarkUser, 0);
userProgress.totalCurriculumSpentTime = userProgress.curriculums.reduce((total, c) => total + c.curriculumSpentTime, 0);
// Save the updated user progress
await userProgress.save(); await userProgress.save();
res.status(200).json(userProgress);
res.status(200).json({ message: 'Task item progress updated successfully' });
} catch (error) { } catch (error) {
res.status(500).json({ message: error.message }); console.error(error);
res.status(500).json({ error: 'Internal server error' });
} }
} };
...@@ -84,24 +84,18 @@ const curriculums = [ ...@@ -84,24 +84,18 @@ const curriculums = [
}, },
{ {
"tutorialCode": "02", "tutorialCode": "02",
"tutorialTitle": "Learn the Basics of Sign Language", "tutorialTitle": "Everyday Vocabulary in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc", "tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Introduce the concept of sign language and its importance.\nTeach basic greetings and expressions, such as hello, goodbye, thank you, and sorry.\nProvide visual demonstrations and practice exercises for learners to practice these basic signs." "tutorialContent": "Teach signs for everyday objects and activities, such as eat, drink, sleep, book, pen, etc.\nIntroduce signs for common words used in daily life.\nProvide visual demonstrations and interactive exercises for learners to practice using these signs."
}, },
{ {
"tutorialCode": "03", "tutorialCode": "03",
"tutorialTitle": "Family Signs in Sign Language", "tutorialTitle": "Family Signs in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc", "tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Teach signs for family members, such as mother, father, sister, brother, etc.\nIntroduce signs for common family-related words, such as family, love, and home.\nProvide visual demonstrations and practice exercises for learners to practice these family signs." "tutorialContent": "Teach signs for family members, such as mother, father, sister, brother, etc.\nIntroduce signs for common family-related words, such as family, love, and home.\nProvide visual demonstrations and practice exercises for learners to practice these family signs."
}, },
{ {
"tutorialCode": "04", "tutorialCode": "04",
"tutorialTitle": "Everyday Vocabulary in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Teach signs for everyday objects and activities, such as eat, drink, sleep, book, pen, etc.\nIntroduce signs for common words used in daily life.\nProvide visual demonstrations and interactive exercises for learners to practice using these signs."
},
{
"tutorialCode": "05",
"tutorialTitle": "Basic Conversational Phrases in Sign Language", "tutorialTitle": "Basic Conversational Phrases in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc", "tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Teach simple conversational phrases, such as \"What is your name?\" or \"How are you?\"\nIntroduce signs for common question words and phrases.\nProvide visual demonstrations and practice exercises for learners to practice these conversational phrases." "tutorialContent": "Teach simple conversational phrases, such as \"What is your name?\" or \"How are you?\"\nIntroduce signs for common question words and phrases.\nProvide visual demonstrations and practice exercises for learners to practice these conversational phrases."
......
import mongoose from "mongoose"; import mongoose from "mongoose";
const tutorialSchema = new mongoose.Schema({ const commonFields = {
tutorialCode: String,
tutorialTitle: String,
tutorialImage: String,
// Additional fields for tutorial content
});
const curriculumSchema = new mongoose.Schema({
curriculumCode: String,
curriculumLevel: String,
curriculumName: String,
curriculumImage: String,
tutorials: [tutorialSchema], // Embed tutorials as subdocuments
// Additional fields for curriculum details
status: {
type: Number,
default: 1, // Default status as active (1)
},
createdBy: String, createdBy: String,
updatedBy: String, updatedBy: String,
createdAt: { createdAt: {
...@@ -28,6 +11,31 @@ const curriculumSchema = new mongoose.Schema({ ...@@ -28,6 +11,31 @@ const curriculumSchema = new mongoose.Schema({
type: Date, type: Date,
default: new Date(), default: new Date(),
}, },
};
const curriculumSchema = new mongoose.Schema({
curriculumCode: {
type: String,
unique: true, // Ensures unique values for curriculumCode
},
curriculumLevel: Number,
curriculumTitle: String,
curriculumDescription: String,
curriculumImage: String,
curriculumMark: {
type: Number,
default: 0
},
tutorials: [{
type: mongoose.Schema.Types.ObjectId,
ref: "Tutorial",
}],
// Additional fields for curriculum details
status: {
type: Number,
default: 1, // Default status as active (1)
},
...commonFields
}); });
const Curriculum = mongoose.model("Curriculum", curriculumSchema); const Curriculum = mongoose.model("Curriculum", curriculumSchema);
......
import mongoose from "mongoose"; import mongoose from "mongoose";
const commonFields = {
createdBy: String,
updatedBy: String,
createdAt: {
type: Date,
default: new Date(),
},
updatedAt: {
type: Date,
default: new Date(),
},
};
const taskItemSchema = new mongoose.Schema({ const taskItemSchema = new mongoose.Schema({
title: String, title: String,
description: String, description: String,
howToDo: String, howToDo: String,
referenceImage: String, referenceImage: String,
referenceVideo: String, referenceVideo: String,
taskItemMark : {
type: Number,
default: 0
}
// Additional fields for task items // Additional fields for task items
}); });
const tutorialSchema = new mongoose.Schema({ const tutorialSchema = new mongoose.Schema({
tutorialCode: String, tutorialCode: {
type: String,
unique: true, // Ensures unique values for tutorialCode
},
tutorialTitle: String, tutorialTitle: String,
tutorialDescription: String,
tutorialImage: String, tutorialImage: String,
tutorialMarks: {
type: Number,
default: 0
},
taskItems: [taskItemSchema], // Embed task items as subdocuments taskItems: [taskItemSchema], // Embed task items as subdocuments
// Additional fields for tutorial details // Additional fields for tutorial details
status: { status: {
type: Number, type: Number,
default: 1, // Default status as active (1) default: 1, // Default status as active (1)
}, },
createdBy: String, ...commonFields
updatedBy: String,
createdAt: {
type: Date,
default: new Date(),
},
updatedAt: {
type: Date,
default: new Date(),
},
}); });
const Tutorial = mongoose.model("Tutorial", tutorialSchema); const Tutorial = mongoose.model("Tutorial", tutorialSchema);
......
import mongoose from "mongoose"; import mongoose from "mongoose";
const taskItemSchema = new mongoose.Schema({
title: {
type: String,
required: true,
},
description: {
type: String,
required: true,
},
howToDo: [String],
referenceImage: String,
referenceVideo: String,
taskItemMark: {
type: Number,
default: 0
},
taskItemMarkUser: {
type: Number,
default: 0
},
taskItemSpentTime: {
type: Number,
default: 0
},
}, { _id: false });
const tutorialTypeUserProgressSchema = new mongoose.Schema({
tutorialCode: String,
tutorialTitle: String,
tutorialDescription: String,
tutorialImage: String,
tutorialMark: {
type: Number,
default: 0
},
tutorialMarkUser: {
type: Number,
default: 0
},
tutorialSpentTime: {
type: Number,
default: 0
},
taskItems: [taskItemSchema],
}, { _id: false });
const curriculumTypeUserProgressSchema = new mongoose.Schema({
curriculumCode: String,
curriculumLevel: Number,
curriculumTitle: String,
curriculumDescription: String,
curriculumImage: String,
curriculumMark: {
type: Number,
default: 0
},
curriculumMarkUser: {
type: Number,
default: 0
},
curriculumSpentTime: {
type: Number,
default: 0
},
tutorials: [tutorialTypeUserProgressSchema],
}, { _id: false });
const userProgressSchema = new mongoose.Schema({ const userProgressSchema = new mongoose.Schema({
userId: { userId: {
type: mongoose.Schema.Types.ObjectId, type: mongoose.Schema.Types.ObjectId,
required: true, required: true,
ref: 'User' // Reference to the User model ref: 'User'
},
curriculums: [curriculumTypeUserProgressSchema],
totalCurriculumsMarks: {
type: Number,
default: 0
},
totalCurriculumSpentTime: {
type: Number,
default: 0
},
status: {
type: Number,
default: 1,
},
createdBy: String,
createdAt: {
type: Date,
default: new Date(),
},
updatedBy: String,
updatedAt: {
type: Date,
default: new Date(),
}, },
curriculumId: {
type: mongoose.Schema.Types.ObjectId,
required: true,
ref: 'Curriculum' // Reference to the Curriculum model
},
tutorialProgress: [
{
tutorialId: {
type: mongoose.Schema.Types.ObjectId,
ref: 'Tutorial' // Reference to the Tutorial model
},
completed: {
type: Boolean,
default: false
}
}
],
marks: {
type: Number,
default: 0
}
}); });
const UserProgress = mongoose.model("UserProgress", userProgressSchema); const UserProgress = mongoose.model("UserProgress", userProgressSchema);
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"nodemailer": "^6.9.1", "nodemailer": "^6.9.1",
"nodemon": "^2.0.22", "nodemon": "^2.0.22",
"react-mic": "^12.4.6",
"torch": "^0.2.7",
"uuid": "^9.0.0" "uuid": "^9.0.0"
} }
} }
import sys
import io
import base64
import torch
import torchvision.transforms as transforms
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class theCNN(nn.Module):
def __init__(self):
super().__init__()
self.conv01 = nn.Conv2d(
in_channels=1,
out_channels=10,
kernel_size=5,
stride=1,
padding=1
)
self.dropout1 = nn.Dropout(0.2) # Add dropout layer
self.conv02 = nn.Conv2d(
in_channels=10,
out_channels=20,
kernel_size=5,
stride=1,
padding=1
)
self.dropout2 = nn.Dropout(0.2) # Add dropout layer
expectedSize = np.floor((73+2*0-1)/1) + 1
expectedSize = 20*int(expectedSize**2)
self.fc01 = nn.Linear(expectedSize, 50)
self.output = nn.Linear(50, 16)
def forward(self, x):
# convo -> maxpool -> relu
x = F.relu(F.max_pool2d(self.conv01(x), 2))
# convo -> maxpool -> relu
x = F.relu(F.max_pool2d(self.conv02(x), 2))
nUnits = x.shape.numel()/x.shape[0]
x = x.view(-1, int(nUnits))
x = F.relu(self.fc01(x))
return torch.softmax(self.output(x), axis=1)
# Load the mapping dictionary
class_mapping = {
0: 'Eight',
1: 'Eleven',
2: 'Fifty',
3: 'Five',
4: 'Four',
5: 'Fourteen',
6: 'Nine',
7: 'One',
8: 'Seven',
9: 'Six',
10: 'Ten',
11: 'Thirteen',
12: 'Thirty',
13: 'Three',
14: 'Twenty',
15: 'Two'
}
# Load the trained model
model = theCNN() # Instantiate your model class
model.load_state_dict(torch.load(
'D:/SLIIT_Y4_Research_Module/Research/Project/2023-029/Project/Backend/Server_Node/prediction_config/C1T1/trained_model_modified.pth', map_location=torch.device('cpu')))
model.eval()
# Read image data and target class from standard input
input_data = sys.stdin.readlines()
image_data = input_data[0].strip().encode() # Convert to bytes
target_class = input_data[1].strip()
# Preprocess the image
# image_data = base64.b64decode(sys.argv[1])
# image = Image.open(io.BytesIO(image_data))
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
transform = transforms.Compose([
transforms.Resize((300, 300)),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5]),
])
image_tensor = transform(image).unsqueeze(0)
# Predict the class
with torch.no_grad():
outputs = model(image_tensor)
predicted_class = torch.argmax(outputs[0]).item()
confidence = outputs[0][predicted_class].item()
# Get the predicted class name using the mapping dictionary
predicted_class_name = class_mapping[predicted_class]
# Calculate similarity or confidence percentage
# target_class = sys.argv[2].lower() # Class name passed from the API
similarity = 100 * confidence
print(f"{predicted_class_name},{similarity:.2f}")
import express from "express";
import multer from "multer";
import { marksCalculator } from "../controllers/marksCalculator.controller.js";
// Set up storage for uploaded images
const storage = multer.memoryStorage();
const upload = multer({ storage: storage });
const router = express.Router();
router.post('/curriculum/:curriculumIndex/tutorial/:tutorialIndex', upload.single('image'), marksCalculator)
export default router;
\ No newline at end of file
import express from "express"; import express from "express";
import { getUserProgress, updateUserProgress } from "../controllers/userProgress.controller.js"; import { getUserProgress, subscribeCurriculum, updateTaskItemProgress } from "../controllers/userProgress.controller.js";
const router = express.Router(); const router = express.Router();
router.post('/subscribe-curriculum', subscribeCurriculum);
router.get('/:userId', getUserProgress); router.get('/:userId', getUserProgress);
router.post('/:userId', updateUserProgress); router.put('/update-task-item-progress', updateTaskItemProgress);
export default router; export default router;
...@@ -4,10 +4,17 @@ import dotenv from "dotenv"; ...@@ -4,10 +4,17 @@ import dotenv from "dotenv";
import express from "express"; import express from "express";
import mongoose from "mongoose"; import mongoose from "mongoose";
import multer from "multer";
// Set up storage for uploaded images
const storage = multer.memoryStorage();
const upload = multer({ storage: storage });
//import routes //import routes
import curriculumRoutes from "./routes/curriculum.routes.js"; import curriculumRoutes from "./routes/curriculum.routes.js";
import feedbackRoutes from "./routes/feedback.routes.js"; import feedbackRoutes from "./routes/feedback.routes.js";
import leaderboardRoutes from "./routes/leaderboard.routes.js"; import leaderboardRoutes from "./routes/leaderboard.routes.js";
import marksCalculatorRoutes from "./routes/marksCalculator.routes.js";
import translateRoutes from "./routes/translate.routes.js"; import translateRoutes from "./routes/translate.routes.js";
import tutorialRoutes from "./routes/tutorial.routes.js"; import tutorialRoutes from "./routes/tutorial.routes.js";
import userRoutes from "./routes/user.routes.js"; import userRoutes from "./routes/user.routes.js";
...@@ -33,6 +40,7 @@ app.use("/rest_node/tutorial", tutorialRoutes); ...@@ -33,6 +40,7 @@ app.use("/rest_node/tutorial", tutorialRoutes);
app.use("/rest_node/user-progress", userProgressRoutes); app.use("/rest_node/user-progress", userProgressRoutes);
app.use("/rest_node/feedback", feedbackRoutes); app.use("/rest_node/feedback", feedbackRoutes);
app.use("/rest_node/leaderboard", leaderboardRoutes); app.use("/rest_node/leaderboard", leaderboardRoutes);
app.use("/rest_node/marks-calculator", marksCalculatorRoutes);
const CONNECTION_URL = `mongodb+srv://${process.env.DB_USERNAME}:${process.env.DB_PASSWORD}@researchmanagement-appl.vzhn4.mongodb.net/?retryWrites=true&w=majority`; const CONNECTION_URL = `mongodb+srv://${process.env.DB_USERNAME}:${process.env.DB_PASSWORD}@researchmanagement-appl.vzhn4.mongodb.net/?retryWrites=true&w=majority`;
const PORT = process.env.PORT || 5000; const PORT = process.env.PORT || 5000;
......
This diff is collapsed.
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
files/* files/*
!files/ !files/
*.pyc
*~
*.swp
# Created by https://www.toptal.com/developers/gitignore/api/python # Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python # Edit at https://www.toptal.com/developers/gitignore?templates=python
......
import base64 from fastapi import APIRouter, File, HTTPException, Query,UploadFile
import os
import cv2
from fastapi import APIRouter, File, HTTPException, UploadFile
import numpy as np
from pydantic import BaseModel from pydantic import BaseModel
import tensorflow as tf import tensorflow as tf
from core.logger import setup_logger
from core import setup_logger
from services.translate_service import SignLanguagePredictionService from services.translate_service import SignLanguagePredictionService
from utils import mappings from utils import mappings
...@@ -21,16 +18,17 @@ class ImageRequest(BaseModel): ...@@ -21,16 +18,17 @@ class ImageRequest(BaseModel):
# Load your Keras model # Load your Keras model
# model = tf.keras.models.load_model('../ML_Models/sign_language_to_text/models/sign_language_model.h5') # model = tf.keras.models.load_model('../ML_Models/sign_language_to_text/models/sign_language_model.h5')
model = None model= None
CLASSES = mappings.classes CLASSES = mappings.classes
NUM_CLASSES = len(mappings.classes) # number of classes NUM_CLASSES = len(mappings.classes) # number of classes
IMG_SIZE = 224 # image size IMG_SIZE = 224 # image size
speed_levels = mappings.speed_levels
# Instantiate the service class # Instantiate the service class
prediction_service = SignLanguagePredictionService(model, CLASSES, mappings) prediction_service = SignLanguagePredictionService(model, CLASSES, mappings,speed_levels)
@router.post("/upload/video") @router.post("/upload/video", tags=["Sign Language"])
async def upload_video(video: UploadFile = File(...)): async def upload_video(video: UploadFile = File(...)):
try: try:
file_location = f"files/{video.filename}" file_location = f"files/{video.filename}"
...@@ -40,22 +38,36 @@ async def upload_video(video: UploadFile = File(...)): ...@@ -40,22 +38,36 @@ async def upload_video(video: UploadFile = File(...)):
return {"text": "ආආආආආආආ"} return {"text": "ආආආආආආආ"}
except Exception as e: except Exception as e:
logger.info(f"Failed to upload file. {e}") logger.info(f"Failed to upload file. {e}")
raise HTTPException(status_code=500, detail="Failed to upload the video") raise HTTPException(
status_code=500,
detail="Failed to upload the video"
@router.post("/predict-sign-language/image") )
@router.post('/predict-sign-language/image', tags=["Sign Language"])
def predict_using_image(image_request: UploadFile = File(...)): def predict_using_image(image_request: UploadFile = File(...)):
try: try:
return prediction_service.predict_sign_language(image_request) return prediction_service.predict_sign_language(image_request)
except Exception as e: except Exception as e:
logger.info(f"Error. {e}") logger.info(f"Error. {e}")
raise HTTPException(status_code=500, detail="Request Failed.") raise HTTPException(
status_code=500,
detail="Request Failed."
@router.post("/predict-sign-language/video") )
@router.post('/predict-sign-language/video', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...)): def predict_using_video(video_request: UploadFile = File(...)):
try: try:
return prediction_service.predict_sign_language_video(video_request) return prediction_service.predict_sign_language_video_new(video_request)
except Exception as e:
logger.info(f"Error. {e}")
raise HTTPException(
status_code=500,
detail="Request Failed."
)
@router.post('/predict-sign-language/video/speed_levels', tags=["Sign Language"])
def predict_using_video(video_request: UploadFile = File(...), speed: int = Query(...)):
try:
return prediction_service.predict_sign_language_video_with_speed_levels(video_request, speed=speed)
except Exception as e: except Exception as e:
logger.info(f"Error. {e}") logger.info(f"Error. {e}")
raise HTTPException(status_code=500, detail="Request Failed.") raise HTTPException(status_code=500, detail="Request Failed.")
...@@ -7,12 +7,12 @@ def test(): ...@@ -7,12 +7,12 @@ def test():
# Your code here # Your code here
return {"pong"} return {"pong"}
@router.get("/users") @router.get("/test")
def get_users(): def get_users():
# Your code here # Your code here
return {"message": "Get users endpoint"} return {"message": "Get users endpoint"}
@router.post("/users") @router.post("/test-api")
def create_user(): def create_user():
# Your code here # Your code here
return {"message": "Create user endpoint"} return {"message": "Create user endpoint"}
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
from controllers import (translate_controler, users_controller, # from core import setup_logger
video_to_sign_language_controller)
from core import setup_logger
from fastapi import FastAPI from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from pymongo.mongo_client import MongoClient from pymongo.mongo_client import MongoClient
from controllers import (translate_controler, users_controller, video_to_sign_language_controller)
# from controllers import translate_controler, users_controller
# from core.logger import setup_logger
app = FastAPI() app = FastAPI()
...@@ -31,7 +33,7 @@ async def shutdown_db_client(): ...@@ -31,7 +33,7 @@ async def shutdown_db_client():
app.mongodb_client.close() app.mongodb_client.close()
logger = setup_logger() # logger = setup_logger()
app.include_router(users_controller.router) app.include_router(users_controller.router)
app.include_router(translate_controler.router) app.include_router(translate_controler.router)
...@@ -41,8 +43,9 @@ app.include_router(video_to_sign_language_controller.router) ...@@ -41,8 +43,9 @@ app.include_router(video_to_sign_language_controller.router)
origins = [ origins = [
"http://localhost", "http://localhost",
"http://localhost:8080", "http://localhost:8080",
"http://127.0.0.1:8000", "http://127.0.0.1:8000",
"http://localhost:3000" "http://localhost:8004",
"http://localhost:3000",
] ]
app.add_middleware( app.add_middleware(
......
...@@ -3,22 +3,21 @@ import cv2 ...@@ -3,22 +3,21 @@ import cv2
import numpy as np import numpy as np
from fastapi import HTTPException, UploadFile from fastapi import HTTPException, UploadFile
from typing import Dict from typing import Counter, Dict
import tensorflow as tf from core.logger import setup_logger
from core import setup_logger
from utils import mappings
logger = setup_logger() logger = setup_logger()
IMG_SIZE = 224 # image size IMG_SIZE = 224 # image size
class SignLanguagePredictionService: class SignLanguagePredictionService:
def __init__(self, model, classes, mappings): def __init__(self, model, classes, mappings,speed_levels):
self.model = model self.model = model
self.classes = classes self.classes = classes
self.mappings = mappings self.mappings = mappings
self.speed_levels = speed_levels
def predict_sign_language(self, image_request: UploadFile) -> Dict[str, str]: def predict_sign_language(self, image_request: UploadFile) -> Dict[str, str]:
try: try:
...@@ -62,7 +61,7 @@ class SignLanguagePredictionService: ...@@ -62,7 +61,7 @@ class SignLanguagePredictionService:
frame_count = 0 frame_count = 0
# Loop through the frames of the video # Loop through the frames of the video
while frame_count < 20: while frame_count < 50:
success, frame = video.read() success, frame = video.read()
if not success: if not success:
break break
...@@ -95,3 +94,150 @@ class SignLanguagePredictionService: ...@@ -95,3 +94,150 @@ class SignLanguagePredictionService:
status_code=500, status_code=500,
detail="Failed to make predictions" detail="Failed to make predictions"
) )
def predict_sign_language_video_new(self, video_request: UploadFile) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
frame_count = 0
# Loop through the frames of the video
while frame_count < 50:
success, frame = video.read()
if not success:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
video.release()
# Delete the video file
os.remove(video_location)
threshold_percentage = 60
predictions = get_predicted_percentage(predictions, threshold_percentage)
return {'frame_count': frame_count, 'predictions': predictions }
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def predict_sign_language_video_with_speed_levels(self, video_request: UploadFile, speed: int) -> Dict[str, str]:
try:
# Create a temporary file to save the video
video_location = f"files/{video_request.filename}"
with open(video_location, "wb") as file:
file.write(video_request.file.read())
# Read the video using OpenCV
video = cv2.VideoCapture(video_location)
predictions = []
final_predictions = []
frame_count = 0
# Determine the number of frames per sign based on the speed level
frames_per_sign = self.speed_levels.get(speed, 50) # Default to level 1 if speed level is not provided
# Loop through the frames of the video
while frame_count <= video.get(cv2.CAP_PROP_FRAME_COUNT):
success, frame = video.read()
if not success:
break
# TODO Add to Config
if frame_count >= 500:
break
# Preprocess the frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = extract_hand_shape(frame)
frame = np.array([frame], dtype=np.float32) / 255.0
# Make prediction
prediction = self.model.predict(frame)
class_index = np.argmax(prediction)
class_name = self.classes[class_index]
sinhala_letter = self.mappings.letter_mapping.get(class_name, 'Unknown')
# Store the prediction for the frame
predictions.append(sinhala_letter)
frame_count += 1
# TODO Add to Config
threshold_percentage = 60
# Check if the required number of frames per sign has been reached
if frame_count % frames_per_sign == 0:
predictions = get_predicted_percentage(predictions, threshold_percentage)
final_predictions = final_predictions+predictions
predictions = []
video.release()
# Delete the video file
os.remove(video_location)
return {'frame_count': frame_count, 'predictions': final_predictions}
except Exception as e:
logger.info(f"Failed to make predictions. {e}")
raise HTTPException(
status_code=500,
detail="Failed to make predictions"
)
def extract_hand_shape(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
hand_contour = contours[0]
hand_shape = np.zeros_like(image)
cv2.drawContours(hand_shape, [hand_contour], 0, (255, 255, 255), thickness=cv2.FILLED)
return hand_shape
def get_predicted_percentage(array, threshold):
counts = Counter(array)
total_elements = len(array)
percentages = {}
for element, count in counts.items():
percentage = (count / total_elements) * 100
percentages[element] = percentage
elements_above_threshold = [element for element, percentage in percentages.items() if percentage > threshold]
return elements_above_threshold
\ No newline at end of file
...@@ -27,4 +27,13 @@ classes =['A', ...@@ -27,4 +27,13 @@ classes =['A',
'Ohh', 'Ohh',
'T', 'T',
'Uh', 'Uh',
'Uhh'] 'Uhh']
\ No newline at end of file
speed_levels = {
1: 50, # 10 frames per sign for level 1
2: 40, # 20 frames per sign for level 2
3: 30, # 30 frames per sign for level 3
4: 20, # 40 frames per sign for level 4
5: 10 # 50 frames per sign for level 5
}
\ No newline at end of file
...@@ -7,12 +7,12 @@ Welcome to the FitsPro ERP Platform repository! This project aims to revolutioni ...@@ -7,12 +7,12 @@ Welcome to the FitsPro ERP Platform repository! This project aims to revolutioni
#### update packages #### update packages
``` ```
source-folder > yarn source-folder > yarn
``` ```
#### Start the project #### Start the project
``` ```
source-folder > yarn start source-folder > yarn start
``` ```
......
This diff is collapsed.
...@@ -22,7 +22,9 @@ ...@@ -22,7 +22,9 @@
"@fullcalendar/timegrid": "^6.1.5", "@fullcalendar/timegrid": "^6.1.5",
"@fullcalendar/timeline": "^6.1.5", "@fullcalendar/timeline": "^6.1.5",
"@hello-pangea/dnd": "^16.2.0", "@hello-pangea/dnd": "^16.2.0",
"@material-ui/core": "^4.12.4",
"@mui/base": "^5.0.0-alpha.126", "@mui/base": "^5.0.0-alpha.126",
"@mui/icons-material": "^5.14.6",
"@mui/lab": "^5.0.0-alpha.127", "@mui/lab": "^5.0.0-alpha.127",
"@mui/material": "^5.12.1", "@mui/material": "^5.12.1",
"@mui/system": "^5.12.1", "@mui/system": "^5.12.1",
...@@ -77,6 +79,7 @@ ...@@ -77,6 +79,7 @@
"react-infinite-scroll-component": "^6.1.0", "react-infinite-scroll-component": "^6.1.0",
"react-intersection-observer": "^9.4.3", "react-intersection-observer": "^9.4.3",
"react-intl": "^6.4.0", "react-intl": "^6.4.0",
"react-material-file-upload": "^0.0.4",
"react-number-format": "^5.1.4", "react-number-format": "^5.1.4",
"react-organizational-chart": "^2.2.1", "react-organizational-chart": "^2.2.1",
"react-quill": "^2.0.0", "react-quill": "^2.0.0",
...@@ -93,6 +96,7 @@ ...@@ -93,6 +96,7 @@
"react-table-sticky": "^1.1.3", "react-table-sticky": "^1.1.3",
"react-timer-hook": "^3.0.5", "react-timer-hook": "^3.0.5",
"react-to-print": "^2.14.12", "react-to-print": "^2.14.12",
"react-webcam": "^7.1.1",
"react-window": "^1.8.9", "react-window": "^1.8.9",
"react-zoom-pan-pinch": "^3.0.7", "react-zoom-pan-pinch": "^3.0.7",
"react18-input-otp": "^1.1.3", "react18-input-otp": "^1.1.3",
......
export interface CurriculumLevelsType {
id: number
code: string
description: string
}
// ==============================|| DATA - CURRICULUM LEVELS ||============================== //
const curriculumLevels: readonly CurriculumLevelsType[] = [
{ id: 1, code: "Level 1", description: "Preliminary" },
{ id: 2, code: "Level 2", description: "Intermediate" },
{ id: 3, code: "Level 3", description: "Advance" },
];
export default curriculumLevels;
This diff is collapsed.
export interface StatusType {
id: number
code: string
description: string
}
// ==============================|| DATA - Status ||============================== //
const status: readonly StatusType[] = [
{ id: 1, code: "Active", description: "Active" },
{ id: 2, code: "New", description: "New" },
{ id: 3, code: "Pending", description: "Pending" },
{ id: 4, code: "Hold", description: "Hold" },
{ id: 5, code: "Rejected", description: "Rejected" },
];
export default status;
import { taskItemType } from "types/taskItem";
export const taskItems: taskItemType[] = [
{
"_id": "1",
"title": "Learn Number One",
"description": "In this lesson, you will learn how to sign the number one. Understanding this basic sign is crucial for counting and expressing singular items or concepts in sign language. Practice the hand shape, movement, and facial expression associated with the sign to improve your fluency.",
"howToDo": [
"- Extend your index finger straight up.",
"- Keep the rest of your fingers closed.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=17sHGfW9zip8xAwbRtUihzxkseKq-Qn7q",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "2",
"title": "Learn Number Two",
"description": "In this lesson, you will learn how to sign the number two. Mastering this sign is essential for counting and expressing pairs or dual concepts in sign language. Pay attention to the hand shape, movement, and facial expression involved in signing the number two to enhance your signing skills.",
"howToDo": [
"- Extend your index and middle fingers straight up.",
"- Keep the rest of your fingers closed.",
"- Hold your hand in front of your chest."],
"referenceImage": "https://drive.google.com/uc?export=view&id=1Nm-we15Rrr04ovRC1sr-ZVMNHbnALRm2",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "3",
"title": "Learn Number Three",
"description": "In this lesson, you will learn how to sign the number three in sign language. Mastering this sign is essential for expressing the quantity or position of three items. Pay attention to the hand shape, movement, and facial expression involved in signing the number three to enhance your signing skills.",
"howToDo": [
"- Extend your index, middle, and ring fingers straight up.",
"- Keep the rest of your fingers closed.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=1mKY8D1utbqm8flnNM7DZRKtuPQDXqFSm",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "4",
"title": "Learn Number Four",
"description": "In this lesson, you will learn how to sign the number four in sign language. This sign is commonly used for counting or indicating the quantity or position of four items. Focus on the hand shape, movement, and expression to accurately convey the number four in sign language.",
"howToDo": [
"- Extend your thumb, index, middle, and ring fingers straight up.",
"- Keep your pinky finger folded.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=1VtbHehsxbOC04fVR6_Ca6HDv6csH17SJ",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "5",
"title": "Learn Number Five",
"description": "In this lesson, you will learn how to sign the number five in sign language. Mastering this sign is crucial for counting, expressing quantities, or representing the concept of five. Practice the hand shape, movement, and facial expression to effectively communicate the number five in sign language.",
"howToDo": [
"- Extend all your fingers straight up.",
"- Keep your thumb resting on the side of your palm.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=1G6qx4dhHTKUPvNag0R3qlDJugNOgcTqM",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "6",
"title": "Learn Number Six",
"description": "In this lesson, you will learn how to sign the number six in sign language. This sign is commonly used for counting, indicating quantities, or representing the concept of six. Pay attention to the hand shape, movement, and expression involved in signing the number six to enhance your signing proficiency.",
"howToDo": [
"- Extend your thumb and pinky finger straight up.",
"- Keep the rest of your fingers closed.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=1Q2TbcPTx8KuLp4v2mPL_7GHO0l52DZXw",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "7",
"title": "Learn Number Seven",
"description": "In this lesson, you will learn how to sign the number seven in sign language. Mastering this sign is essential for counting, expressing quantities, or representing the concept of seven. Focus on the hand shape, movement, and facial expression to accurately convey the number seven in sign language.",
"howToDo": [
"- Extend your index, middle, and ring fingers straight up.",
"- Keep your thumb, pinky, and pinky finger folded.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=1Jwt1TqXO1L7t3JFqQYzKL4S4GCuqULJ1",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "8",
"title": "Learn Number Eight",
"description": "In this lesson, you will learn how to sign the number eight in sign language. This sign is commonly used for counting, indicating quantities, or representing the concept of eight. Practice the hand shape, movement, and expression to effectively communicate the number eight in sign language.",
"howToDo": [
"- Extend all your fingers straight up.",
"- Cross your index and middle fingers over your ring and pinky fingers.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=1zSf4hmqIUCcfebgoD6DTWmJ3NxY36LJL",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "9",
"title": "Learn Number Nine",
"description": "In this lesson, you will learn how to sign the number nine in sign language. Mastering this sign is crucial for counting, expressing quantities, or representing the concept of nine. Pay attention to the hand shape, movement, and expression involved in signing the number nine to enhance your signing proficiency.",
"howToDo": [
"- Extend your thumb and all your fingers straight up.",
"- Keep your pinky finger folded.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=16WB1Ifhq_ozKOO-ehfIqVRB6oC3B5STw",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
},
{
"_id": "10",
"title": "Learn Number Ten",
"description": "In this lesson, you will learn how to sign the number ten in sign language. This sign is commonly used for counting, indicating quantities, or representing the concept of ten. Focus on the hand shape, movement, and facial expression to accurately convey the number ten in sign language.",
"howToDo": [
"- Extend your thumb, index, and middle fingers straight up.",
"- Keep the rest of your fingers closed.",
"- Hold your hand in front of your chest."
],
"referenceImage": "https://drive.google.com/uc?export=view&id=11tCYFYjdVrr5LIFGyzOrIUg8bTURATZh",
"referenceVideo": "https://example.com/number_seven_video.mp4",
"taskItemMark": 10
}
]
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
// import { useMemo } from 'react'; import { useMemo } from 'react';
// material-ui // material-ui
import { Theme } from '@mui/material/styles';
import { Box, useMediaQuery } from '@mui/material'; import { Box, useMediaQuery } from '@mui/material';
import { Theme } from '@mui/material/styles';
// project import // project import
import Search from './Search'; import Localization from './Localization';
import Message from './Message'; import Message from './Message';
import Profile from './Profile';
// import Localization from './Localization';
import Notification from './Notification'; import Notification from './Notification';
// import Customization from './Customization'; import Profile from './Profile';
import Search from './Search';
import Customization from './Customization';
import MobileSection from './MobileSection'; import MobileSection from './MobileSection';
// import MegaMenuSection from './MegaMenuSection'; // import MegaMenuSection from './MegaMenuSection';
...@@ -23,13 +23,12 @@ import { MenuOrientation } from 'types/config'; ...@@ -23,13 +23,12 @@ import { MenuOrientation } from 'types/config';
// ==============================|| HEADER - CONTENT ||============================== // // ==============================|| HEADER - CONTENT ||============================== //
const HeaderContent = () => { const HeaderContent = () => {
// const { i18n, menuOrientation } = useConfig(); const { i18n, menuOrientation } = useConfig();
const { menuOrientation } = useConfig();
const downLG = useMediaQuery((theme: Theme) => theme.breakpoints.down('lg')); const downLG = useMediaQuery((theme: Theme) => theme.breakpoints.down('lg'));
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
// const localization = useMemo(() => <Localization />, [i18n]); const localization = useMemo(() => <Localization />, [i18n]);
// const megaMenu = useMemo(() => <MegaMenuSection />, []); // const megaMenu = useMemo(() => <MegaMenuSection />, []);
...@@ -38,12 +37,12 @@ const HeaderContent = () => { ...@@ -38,12 +37,12 @@ const HeaderContent = () => {
{menuOrientation === MenuOrientation.HORIZONTAL && !downLG && <DrawerHeader open={true} />} {menuOrientation === MenuOrientation.HORIZONTAL && !downLG && <DrawerHeader open={true} />}
{!downLG && <Search />} {!downLG && <Search />}
{/* {!downLG && megaMenu} */} {/* {!downLG && megaMenu} */}
{/* {!downLG && localization} */} {!downLG && localization}
{downLG && <Box sx={{ width: '100%', ml: 1 }} />} {downLG && <Box sx={{ width: '100%', ml: 1 }} />}
<Notification /> <Notification />
<Message /> <Message />
{/* <Customization /> */} <Customization />
{!downLG && <Profile />} {!downLG && <Profile />}
{downLG && <MobileSection />} {downLG && <MobileSection />}
</> </>
......
...@@ -3,18 +3,20 @@ import { FormattedMessage } from 'react-intl'; ...@@ -3,18 +3,20 @@ import { FormattedMessage } from 'react-intl';
// assets // assets
import { import {
AudioOutlined,
BookOutlined, BookOutlined,
CarryOutOutlined, CarryOutOutlined,
CreditCardOutlined, CreditCardOutlined,
FastForwardOutlined,
FileTextOutlined, FileTextOutlined,
LoginOutlined, LoginOutlined,
RedditOutlined,
RocketOutlined, RocketOutlined,
ShoppingOutlined, ShoppingOutlined,
TeamOutlined, TeamOutlined,
TranslationOutlined, TranslationOutlined,
UserOutlined, UserOutlined,
FastForwardOutlined, VideoCameraOutlined,
RedditOutlined
} from '@ant-design/icons'; } from '@ant-design/icons';
// type // type
...@@ -33,7 +35,9 @@ const icons = { ...@@ -33,7 +35,9 @@ const icons = {
BookOutlined, BookOutlined,
TranslationOutlined, TranslationOutlined,
FastForwardOutlined, FastForwardOutlined,
RedditOutlined RedditOutlined,
AudioOutlined,
VideoCameraOutlined
}; };
// ==============================|| MENU ITEMS - SUPPORT ||============================== // // ==============================|| MENU ITEMS - SUPPORT ||============================== //
...@@ -92,6 +96,32 @@ const application: NavItemType = { ...@@ -92,6 +96,32 @@ const application: NavItemType = {
} }
] ]
}, },
{
id: 'emotion-detection',
title: <FormattedMessage id="emotion-detection" />,
type: 'collapse',
icon: icons.RedditOutlined,
children: [
{
id: 'audio-detection',
title: <FormattedMessage id="audio-detection" />,
type: 'item',
icon: icons.AudioOutlined,
url: '/emotion-detection/audio-detection',
},
{
id: 'video-detection',
title: <FormattedMessage id="video-detection" />,
type: 'item',
icon: icons.VideoCameraOutlined,
url: '/emotion-detection/video-detection',
},
]
},
{ {
id: 'learning-management', id: 'learning-management',
title: <FormattedMessage id="learning-management" />, title: <FormattedMessage id="learning-management" />,
...@@ -123,6 +153,12 @@ const application: NavItemType = { ...@@ -123,6 +153,12 @@ const application: NavItemType = {
type: 'item', type: 'item',
url: '/learning-management/curriculums-subscribed', url: '/learning-management/curriculums-subscribed',
}, },
{
id: 'learning-curriculums-subscribed-tutorial',
title: <FormattedMessage id="learning-curriculums-subscribed-tutorial" />,
type: 'item',
url: '/learning-management/curriculums-subscribed-tutorial',
},
{ {
id: 'learning-lead-board', id: 'learning-lead-board',
title: <FormattedMessage id="learning-lead-board" />, title: <FormattedMessage id="learning-lead-board" />,
......
import React, { useState } from 'react';
import MainCard from 'components/MainCard';
import ScrollX from 'components/ScrollX';
import Grid from '@mui/material/Grid';
import Button from '@mui/material/Button';
import UploadOutlined from '@ant-design/icons/lib/icons/UploadOutlined';
import AudioOutlined from '@ant-design/icons/lib/icons/AudioOutlined';
const List = () => {
const [audioBlob, setAudioBlob] = useState<Blob | undefined>(undefined);
const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | undefined>(undefined);
const [isRecording, setIsRecording] = useState<boolean>(false);
const [audioUrl, setAudioUrl] = useState<string | undefined>(undefined);
const handleRecordStart = async () => {
// Clear the uploaded audio state when recording starts
setAudioUrl(undefined);
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = (event) => {
if (event.data.size > 0) {
setAudioBlob(new Blob([event.data], { type: 'audio/wav' }));
}
};
recorder.onstop = () => {
stream.getTracks().forEach(track => track.stop());
};
setMediaRecorder(recorder);
recorder.start();
setIsRecording(true);
} catch (error) {
console.error('Error starting recording:', error);
}
};
const handleRecordStop = () => {
if (mediaRecorder && isRecording) {
mediaRecorder.stop();
setIsRecording(false);
}
};
const handleUpload = (event: React.ChangeEvent<HTMLInputElement>) => {
const files = event.target.files;
if (files && files[0] && files[0].type.startsWith('audio/')) {
const audioObjectUrl = URL.createObjectURL(files[0]);
setAudioUrl(audioObjectUrl);
} else {
// Handle case where uploaded file is not an audio file
}
};
return (
<MainCard content={false}>
<ScrollX>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h2>Upload or Record</h2>
<MainCard>
<div style={{ textAlign: 'center' }}>
<input
type="file"
accept="audio/*"
onChange={handleUpload}
style={{ display: 'none' }}
id="audio-upload"
/>
<label htmlFor="audio-upload">
<Button
variant="contained"
color="primary"
component="span"
startIcon={<UploadOutlined />}
>
Upload
</Button>
</label>
<Button
variant="contained"
color="primary"
startIcon={<AudioOutlined />}
onClick={isRecording ? handleRecordStop : handleRecordStart}
>
{isRecording ? 'Stop Recording' : 'Record'}
</Button>
{audioBlob && (
<audio controls>
<source src={URL.createObjectURL(audioBlob)} type="audio/wav" />
Your browser does not support the audio element.
</audio>
)}
{audioUrl && (
<audio controls>
<source src={audioUrl} type="audio/mpeg" />
Your browser does not support the audio element.
</audio>
)}
</div>
</MainCard>
</Grid>
<Grid item xs={12} md={6}>
<h2>3D Avatar</h2>
<MainCard>
{/* Content of the second card */}
{/* You can put your 3D avatar components here */}
</MainCard>
</Grid>
</Grid>
</ScrollX>
</MainCard>
);
};
export default List;
import React, { useState, useRef } from 'react';
import MainCard from 'components/MainCard';
import ScrollX from 'components/ScrollX';
import Grid from '@mui/material/Grid';
import Button from '@mui/material/Button';
import VideoCameraOutlined from '@ant-design/icons/lib/icons/VideoCameraOutlined';
import UploadOutlined from '@ant-design/icons/lib/icons/UploadOutlined';
// import WebcamOutlinedIcon from '@mui/icons-material/WebcamOutlined';
const List = () => {
const [selectedFile, setSelectedFile] = useState<File | null>(null);
const [isLive, setIsLive] = useState(false);
const fileInputRef = useRef<HTMLInputElement | null>(null);
const videoRef = useRef<HTMLVideoElement | null>(null);
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
if (event.target.files && event.target.files[0]) {
setSelectedFile(event.target.files[0]);
}
};
const handleUploadButtonClick = () => {
if (fileInputRef.current) {
fileInputRef.current.click();
}
};
const handleLiveButtonClick = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
if (videoRef.current) {
videoRef.current.srcObject = stream;
videoRef.current.play(); // Start playing the live camera feed
}
setIsLive(true);
} catch (error) {
console.error('Error accessing camera:', error);
}
};
return (
<MainCard content={false}>
<ScrollX>
<Grid container spacing={2}>
<Grid item xs={12} md={6}>
<h2>Upload or Live</h2>
<MainCard>
<div style={{ textAlign: 'center' }}>
<input
type="file"
accept="video/*"
ref={fileInputRef}
onChange={handleFileChange}
style={{ display: 'none' }}
/>
{isLive ? (
<video
ref={videoRef}
autoPlay
playsInline
width="100%"
/>
) : (
selectedFile && (
<video
controls
width="100%"
src={URL.createObjectURL(selectedFile)}
/>
)
)}
<Button
variant="contained"
color="primary"
startIcon={<UploadOutlined />}
onClick={handleUploadButtonClick}
>
Upload
</Button>
<Button
variant="contained"
color="primary"
startIcon={<VideoCameraOutlined />}
onClick={handleLiveButtonClick}
>
Live
</Button>
</div>
</MainCard>
</Grid>
<Grid item xs={12} md={6}>
<h2>3D Avatar</h2>
<MainCard>
{/* Content of the second card */}
{/* You can put your 3D avatar components here */}
</MainCard>
</Grid>
</Grid>
</ScrollX>
</MainCard>
);
};
export default List;
import { useEffect, useState } from 'react';
// material-ui // material-ui
import {
// third-party Accordion, AccordionDetails, AccordionSummary,
Box,
FormControl,
Grid,
MenuItem,
Pagination,
Select,
SelectChangeEvent,
Slide,
Stack,
Theme,
Typography,
useMediaQuery
} from '@mui/material';
import { useTheme } from '@mui/material/styles';
// project import // project import
import MainCard from 'components/MainCard'; import usePagination from 'hooks/usePagination';
import ScrollX from 'components/ScrollX'; import { GlobalFilter } from 'utils/react-table';
// types
// assets // assets
import { BookOutlined } from '@ant-design/icons';
//types import { userProgress } from 'data/userProgress';
import CurriculumSection from 'sections/learning-management/learning-curriculums-subscribed/CurriculumSection';
import EmptyCurriculumCard from 'sections/learning-management/learning-curriculums-subscribed/skeleton/EmptyCurriculumCard';
import { curriculumTypeUserProgress, userProgressType } from 'types/userProgress';
// ==============================|| List ||============================== // // ==============================|| List ||============================== //
const allColumns = [
{
id: 1,
header: 'Default'
},
{
id: 2,
header: 'Title'
}
];
const List = () => { const List = () => {
const theme = useTheme();
const [data, setData] = useState<userProgressType["curriculums"]>([])
const matchDownSM = useMediaQuery((theme: Theme) => theme.breakpoints.down('sm'));
const [curriculumsData, setCurriculumsData] = useState<userProgressType["curriculums"]>([]);
const [sortBy, setSortBy] = useState('Default');
const [globalFilter, setGlobalFilter] = useState('');
const [page, setPage] = useState(1);
const handleChange = (event: SelectChangeEvent) => {
setSortBy(event.target.value as string);
};
// search
useEffect(() => {
const newData = data?.filter((value: any) => {
if (globalFilter) {
return value.curriculumTitle.toLowerCase().includes(globalFilter.toLowerCase());
} else {
return value;
}
});
setCurriculumsData(newData);
}, [globalFilter, data]);
const PER_PAGE = 6;
const count = Math.ceil(curriculumsData?.length! / PER_PAGE);
const _DATA = usePagination(curriculumsData, PER_PAGE);
const handleChangePage = (e: any, p: any) => {
setPage(p);
_DATA.jump(p);
};
useEffect(() => {
setData(userProgress.curriculums)
}, [])
const [expanded, setExpanded] = useState<string | false>('panel0');
const handleAccordionChange = (panel: string) => (event: React.SyntheticEvent, newExpanded: boolean) => {
setExpanded(newExpanded ? panel : false);
};
return ( return (
<MainCard content={false}> <>
<ScrollX> <Box sx={{ position: 'relative', marginBottom: 3 }}>
</ScrollX> <Stack direction="row" alignItems="center">
</MainCard> <Stack
direction={matchDownSM ? 'column' : 'row'}
sx={{ width: '100%' }}
spacing={1}
justifyContent="space-between"
alignItems="center"
>
{/* @ts-ignore */}
<GlobalFilter preGlobalFilteredRows={data} globalFilter={globalFilter} setGlobalFilter={setGlobalFilter} />
<Stack direction={matchDownSM ? 'column' : 'row'} alignItems="center" spacing={1}>
<FormControl sx={{ m: 1, minWidth: 120 }}>
<Select
value={sortBy}
onChange={handleChange}
displayEmpty
inputProps={{ 'aria-label': 'Without label' }}
renderValue={(selected) => {
if (!selected) {
return <Typography variant="subtitle1">Sort By</Typography>;
}
return <Typography variant="subtitle2">Sort by ({sortBy})</Typography>;
}}
>
{allColumns.map((column) => {
return (
<MenuItem key={column.id} value={column.header}>
{column.header}
</MenuItem>
);
})}
</Select>
</FormControl>
</Stack>
</Stack>
</Stack>
</Box>
<Grid container spacing={3}>
{curriculumsData?.length! > 0 ? (
_DATA
.currentData()
.sort(function (a: any, b: any) {
if (sortBy === 'Title') return a.curriculumTitle.localeCompare(b.curriculumTitle);
return a;
})
.map((curriculum: curriculumTypeUserProgress, index: number) => (
<Slide key={index} direction="up" in={true} timeout={50}>
<Grid item xs={12} sm={12} lg={12}>
<Box
sx={{
'& .MuiAccordion-root': {
borderColor: theme.palette.divider,
'& .MuiAccordionSummary-root': {
bgcolor: 'transparent',
flexDirection: 'row',
'&:focus-visible': {
bgcolor: 'primary.lighter'
}
},
'& .MuiAccordionDetails-root': {
borderColor: theme.palette.divider
},
'& .Mui-expanded': {
color: theme.palette.primary.main
}
}
}}
>
<Accordion expanded={expanded === `panel${index}`} onChange={handleAccordionChange(`panel${index}`)}>
<AccordionSummary aria-controls={`panel${index}d-content`} id={`panel${index}d-header`}>
<Stack direction="row" spacing={1.5} alignItems="center">
<BookOutlined style={{ fontSize: '26px' }} />
<Typography variant="h4">{curriculum.curriculumTitle}</Typography>
</Stack>
</AccordionSummary>
<AccordionDetails>
<CurriculumSection curriculum={curriculum} />
</AccordionDetails>
</Accordion>
</Box>
</Grid>
</Slide>
))
) : (
<EmptyCurriculumCard title={'System have no curriculumsData yet.'} />
)}
</Grid>
<Stack spacing={2} sx={{ p: 2.5 }} alignItems="flex-end">
<Pagination
count={count}
size="medium"
page={page}
showFirstButton
showLastButton
variant="combined"
color="primary"
onChange={handleChangePage}
/>
</Stack>
</>
) )
} }
......
import { taskItemTypeUserProgress } from "types/userProgress";
export interface selectedItemContentProps extends taskItemTypeUserProgress {
userId: string
curriculumCode: string
tutorialCode: string
}
export interface selectedCommonDataProps {
userId: string
curriculumCode: string
tutorialCode: string
title: string
}
\ No newline at end of file
import { useEffect, useState } from 'react';
// material-ui // material-ui
import {
// third-party Box,
FormControl,
Grid,
MenuItem,
Pagination,
Select,
SelectChangeEvent,
Slide,
Stack,
Theme,
Typography,
useMediaQuery
} from '@mui/material';
// project import // project import
import MainCard from 'components/MainCard';
import ScrollX from 'components/ScrollX';
import usePagination from 'hooks/usePagination';
import CurriculumCard from 'sections/learning-management/learning-curriculums/CurriculumCard';
import EmptyCurriculumCard from 'sections/learning-management/learning-curriculums/skeleton/EmptyCurriculumCard';
import { curriculumType } from 'types/curriculum';
import { GlobalFilter } from 'utils/react-table';
// assets // types
//types // assets
import { curriculums } from 'data/curriculums';
// ==============================|| List ||============================== // // ==============================|| List ||============================== //
const allColumns = [
{
id: 1,
header: 'Default'
},
{
id: 2,
header: 'Title'
}
];
const List = () => { const List = () => {
const [data, setData] = useState<curriculumType[]>([])
const matchDownSM = useMediaQuery((theme: Theme) => theme.breakpoints.down('sm'));
const [curriculumsData, setCurriculumsData] = useState<curriculumType[]>([]);
const [sortBy, setSortBy] = useState('Default');
const [globalFilter, setGlobalFilter] = useState('');
const [page, setPage] = useState(1);
const handleChange = (event: SelectChangeEvent) => {
setSortBy(event.target.value as string);
};
// search
useEffect(() => {
const newData = data.filter((value: any) => {
if (globalFilter) {
return value.curriculumTitle.toLowerCase().includes(globalFilter.toLowerCase());
} else {
return value;
}
});
setCurriculumsData(newData);
}, [globalFilter, data]);
const PER_PAGE = 6;
const count = Math.ceil(curriculumsData.length / PER_PAGE);
const _DATA = usePagination(curriculumsData, PER_PAGE);
const handleChangePage = (e: any, p: any) => {
setPage(p);
_DATA.jump(p);
};
useEffect(() => {
setData(curriculums)
}, [])
return ( return (
<MainCard content={false}> <>
<ScrollX> <Box sx={{ position: 'relative', marginBottom: 3 }}>
</ScrollX> <Stack direction="row" alignItems="center">
</MainCard> <Stack
direction={matchDownSM ? 'column' : 'row'}
sx={{ width: '100%' }}
spacing={1}
justifyContent="space-between"
alignItems="center"
>
{/* @ts-ignore */}
<GlobalFilter preGlobalFilteredRows={data} globalFilter={globalFilter} setGlobalFilter={setGlobalFilter} />
<Stack direction={matchDownSM ? 'column' : 'row'} alignItems="center" spacing={1}>
<FormControl sx={{ m: 1, minWidth: 120 }}>
<Select
value={sortBy}
onChange={handleChange}
displayEmpty
inputProps={{ 'aria-label': 'Without label' }}
renderValue={(selected) => {
if (!selected) {
return <Typography variant="subtitle1">Sort By</Typography>;
}
return <Typography variant="subtitle2">Sort by ({sortBy})</Typography>;
}}
>
{allColumns.map((column) => {
return (
<MenuItem key={column.id} value={column.header}>
{column.header}
</MenuItem>
);
})}
</Select>
</FormControl>
</Stack>
</Stack>
</Stack>
</Box>
<Grid container spacing={3}>
{curriculumsData.length > 0 ? (
_DATA
.currentData()
.sort(function (a: any, b: any) {
if (sortBy === 'Title') return a.curriculumTitle.localeCompare(b.curriculumTitle);
return a;
})
.map((curriculum: curriculumType, index: number) => (
<Slide key={index} direction="up" in={true} timeout={50}>
<Grid item xs={12} sm={6} lg={4}>
<CurriculumCard curriculum={curriculum} />
</Grid>
</Slide>
))
) : (
<EmptyCurriculumCard title={'System have no curriculumsData yet.'} />
)}
</Grid>
<Stack spacing={2} sx={{ p: 2.5 }} alignItems="flex-end">
<Pagination
count={count}
size="medium"
page={page}
showFirstButton
showLastButton
variant="combined"
color="primary"
onChange={handleChangePage}
/>
</Stack>
</>
) )
} }
......
export interface dataProps {
_id: number | string | undefined;
curriculumCode: string;
curriculumLevel: number;
curriculumName: string;
curriculumDescription: string;
curriculumImage: string;
tutorials?: tutorialItemProps[];
status?: number;
createdBy?: string;
updatedBy?: string;
createdAt?: Date;
updatedAt?: Date;
}
export interface tutorialItemProps {
_id: number | string | undefined;
tutorialCode: string;
tutorialTitle: string;
tutorialDescription: string;
tutorialImage: string;
status?: number;
createdBy?: string;
updatedBy?: string;
createdAt?: Date;
updatedAt?: Date;
taskItems?: taskItemProps[]
}
export interface taskItemProps {
_id: number | string | undefined;
title: string;
description: string;
howToDo: string;
referenceImage: string;
referenceVideo: string;
}
\ No newline at end of file
.videoContainer {
position: relative;
width: 50%;
padding-top: 56.25%; /* 16:9 aspect ratio */
}
.futuristicVideo {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
border: none;
outline: none;
background-color: black;
opacity: 0.8;
filter: blur(4px);
transition: opacity 0.5s, filter 0.5s;
}
.futuristicVideo:hover {
opacity: 1;
filter: blur(0);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment