Commit 39909344 authored by Gamage B.G.J's avatar Gamage B.G.J

Merge branch 'dev' into 'master'

Dev

See merge request !5
parents b67a974b 57a9efd8
models/*
!models/
DataSet/Sn_sign_language_dataset/
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "f47e929b",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"import torch.nn as nn\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import uvicorn\n",
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "13b1d58b",
"metadata": {},
"outputs": [],
"source": [
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "35f4adc0",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a5aba4be",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "11ec2fae",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "353a4725",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"theCNN(\n",
" (conv01): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (conv02): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (fc01): Linear(in_features=106580, out_features=50, bias=True)\n",
" (output): Linear(in_features=50, out_features=16, bias=True)\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "bb87b7f0",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" predicted_class = torch.argmax(probabilities).item()\n",
"\n",
" # Get the actual number corresponding to the hand sign\n",
" actual_number = get_actual_number_from_image(image)\n",
" \n",
" print(actual_number)\n",
"\n",
" # Compare predicted class with actual number and calculate correctness percentage\n",
" correct = int(predicted_class + 1 == actual_number)\n",
" print(correct)\n",
" correctness_percentage = correct / 1.0 * 100.0\n",
"\n",
" return {\"predicted_class\": predicted_class, \"correctness_percentage\": correctness_percentage}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "05646e93",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"def get_actual_number_from_image(image):\n",
" # Convert the image to numpy array\n",
" image_array = np.array(image)\n",
"\n",
" # Apply image processing techniques to detect and recognize digits\n",
" # Example steps: thresholding, contour detection, character segmentation, digit recognition\n",
"\n",
" # Apply thresholding\n",
" _, binary_image = cv2.threshold(image_array, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n",
"\n",
" # Find contours\n",
" contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
" # Sort contours based on their x-coordinate\n",
" contours = sorted(contours, key=lambda cnt: cv2.boundingRect(cnt)[0])\n",
"\n",
" # Initialize the recognized digit sequence\n",
" digit_sequence = \"\"\n",
"\n",
" # Iterate over the contours and recognize digits\n",
" for contour in contours:\n",
" # Get the bounding box of the contour\n",
" x, y, w, h = cv2.boundingRect(contour)\n",
"\n",
" # Crop the digit region from the image\n",
" digit_image = binary_image[y:y + h, x:x + w]\n",
"\n",
" # Resize the digit image to a fixed size (e.g., 28x28)\n",
" resized_digit_image = cv2.resize(digit_image, (28, 28))\n",
"\n",
" # Preprocess the resized digit image (e.g., normalize pixel values)\n",
" preprocessed_digit_image = resized_digit_image / 255.0\n",
"\n",
" # Flatten the preprocessed digit image\n",
" flattened_digit_image = preprocessed_digit_image.flatten()\n",
"\n",
" # Pass the flattened digit image to your digit recognition model\n",
" # to get the predicted digit (e.g., using a separate model or the same model you used for training)\n",
"\n",
" # Here, let's assume you have a function `predict_digit` that takes the flattened digit image\n",
" # and returns the predicted digit as an integer\n",
" predicted_digit = predict_digit(flattened_digit_image)\n",
"\n",
" # Add the predicted digit to the digit sequence\n",
" digit_sequence += str(predicted_digit)\n",
"\n",
" # Convert the digit sequence to an integer\n",
" actual_number = int(digit_sequence)\n",
"\n",
" return actual_number\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee993fc1",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [36312]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n",
"INFO: Uvicorn running on http://127.0.0.1:8001 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"5\n",
"0\n",
"INFO: 127.0.0.1:60625 - \"POST /score HTTP/1.1\" 200 OK\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "34c5efea",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c898b57c",
"metadata": {},
"outputs": [],
"source": [
"pip install python-multipart"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a99b4156",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "01b6e57c",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn "
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2e561f13",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "1e9e4208",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "4d86d2b8",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "9379bf73",
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "Expected state_dict to be dict-like, got <class '__main__.theCNN'>.",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[6], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m model \u001b[38;5;241m=\u001b[39m theCNN()\n\u001b[1;32m----> 2\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_state_dict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmodel1.pth\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 3\u001b[0m model\u001b[38;5;241m.\u001b[39meval()\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python310\\site-packages\\torch\\nn\\modules\\module.py:1994\u001b[0m, in \u001b[0;36mModule.load_state_dict\u001b[1;34m(self, state_dict, strict)\u001b[0m\n\u001b[0;32m 1971\u001b[0m \u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"Copies parameters and buffers from :attr:`state_dict` into\u001b[39;00m\n\u001b[0;32m 1972\u001b[0m \u001b[38;5;124;03mthis module and its descendants. If :attr:`strict` is ``True``, then\u001b[39;00m\n\u001b[0;32m 1973\u001b[0m \u001b[38;5;124;03mthe keys of :attr:`state_dict` must exactly match the keys returned\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1991\u001b[0m \u001b[38;5;124;03m ``RuntimeError``.\u001b[39;00m\n\u001b[0;32m 1992\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 1993\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(state_dict, Mapping):\n\u001b[1;32m-> 1994\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExpected state_dict to be dict-like, got \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28mtype\u001b[39m(state_dict)))\n\u001b[0;32m 1996\u001b[0m missing_keys: List[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m 1997\u001b[0m unexpected_keys: List[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m []\n",
"\u001b[1;31mTypeError\u001b[0m: Expected state_dict to be dict-like, got <class '__main__.theCNN'>."
]
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18b869d3",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" similarity_scores = probabilities.numpy()\n",
"\n",
" return {\"similarity_scores\": similarity_scores.tolist()}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a99a8b6",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b30f5c6",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e2e07d8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "60c58fd2",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "c1b7af91",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn \n",
"from io import BytesIO"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7f1cd8a4",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "2035acf5",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2cfca47d",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "2786c4df",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"theCNN(\n",
" (conv01): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (conv02): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (fc01): Linear(in_features=106580, out_features=50, bias=True)\n",
" (output): Linear(in_features=50, out_features=16, bias=True)\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "30569105",
"metadata": {},
"outputs": [],
"source": [
"# @app.post(\"/predict-similarity\")\n",
"# async def predict_similarity(image: UploadFile):\n",
"# image_bytes = await image.read()\n",
"# img = Image.open(BytesIO(image_bytes))\n",
"# img = transform(img).unsqueeze(0)\n",
"# output = model(img)\n",
"# similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
"# return {\"similarity_score\": similarity_score}\n",
"\n",
"\n",
"@app.post(\"/predict-similarity\")\n",
"async def predict_similarity(sign: str, image: UploadFile):\n",
" image_bytes = await image.read()\n",
" img = Image.open(BytesIO(image_bytes))\n",
" img = transform(img).unsqueeze(0)\n",
" output = model(img)\n",
" similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
" return {\"sign\": sign, \"similarity_score\": similarity_score}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "5c19e31c",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bea39dc9",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [36440]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n",
"INFO: Uvicorn running on http://127.0.0.1:8001 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO: 127.0.0.1:63408 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63440 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63461 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63484 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "52ede8b7",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d7bb1db",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "49b49c05",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"import torch.nn as nn\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import uvicorn\n",
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3f0148a4",
"metadata": {},
"outputs": [],
"source": [
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26e5f198",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4359663",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "308a25d3",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e3544d2",
"metadata": {},
"outputs": [],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d521a8ca",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" predicted_class = torch.argmax(probabilities).item()\n",
"\n",
" # Get the actual number corresponding to the hand sign\n",
" actual_number = get_actual_number_from_image(image)\n",
" \n",
" print(actual_number)\n",
"\n",
" # Compare predicted class with actual number and calculate correctness percentage\n",
" correct = int(predicted_class + 1 == actual_number)\n",
" print(correct)\n",
" correctness_percentage = correct / 1.0 * 100.0\n",
"\n",
" return {\"predicted_class\": predicted_class, \"correctness_percentage\": correctness_percentage}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "030c5fbc",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"def get_actual_number_from_image(image):\n",
" # Convert the image to numpy array\n",
" image_array = np.array(image)\n",
"\n",
" # Apply image processing techniques to detect and recognize digits\n",
" # Example steps: thresholding, contour detection, character segmentation, digit recognition\n",
"\n",
" # Apply thresholding\n",
" _, binary_image = cv2.threshold(image_array, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n",
"\n",
" # Find contours\n",
" contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
" # Sort contours based on their x-coordinate\n",
" contours = sorted(contours, key=lambda cnt: cv2.boundingRect(cnt)[0])\n",
"\n",
" # Initialize the recognized digit sequence\n",
" digit_sequence = \"\"\n",
"\n",
" # Iterate over the contours and recognize digits\n",
" for contour in contours:\n",
" # Get the bounding box of the contour\n",
" x, y, w, h = cv2.boundingRect(contour)\n",
"\n",
" # Crop the digit region from the image\n",
" digit_image = binary_image[y:y + h, x:x + w]\n",
"\n",
" # Resize the digit image to a fixed size (e.g., 28x28)\n",
" resized_digit_image = cv2.resize(digit_image, (28, 28))\n",
"\n",
" # Preprocess the resized digit image (e.g., normalize pixel values)\n",
" preprocessed_digit_image = resized_digit_image / 255.0\n",
"\n",
" # Flatten the preprocessed digit image\n",
" flattened_digit_image = preprocessed_digit_image.flatten()\n",
"\n",
" # Pass the flattened digit image to your digit recognition model\n",
" # to get the predicted digit (e.g., using a separate model or the same model you used for training)\n",
"\n",
" # Here, let's assume you have a function `predict_digit` that takes the flattened digit image\n",
" # and returns the predicted digit as an integer\n",
" predicted_digit = predict_digit(flattened_digit_image)\n",
"\n",
" # Add the predicted digit to the digit sequence\n",
" digit_sequence += str(predicted_digit)\n",
"\n",
" # Convert the digit sequence to an integer\n",
" actual_number = int(digit_sequence)\n",
"\n",
" return actual_number\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5035c79a",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c0449757",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c898b57c",
"metadata": {},
"outputs": [],
"source": [
"pip install python-multipart"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a99b4156",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01b6e57c",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e561f13",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1e9e4208",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4d86d2b8",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9379bf73",
"metadata": {},
"outputs": [],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18b869d3",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" similarity_scores = probabilities.numpy()\n",
"\n",
" return {\"similarity_scores\": similarity_scores.tolist()}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a99a8b6",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b30f5c6",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e2e07d8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "3d735e52",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "db756418",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn \n",
"from io import BytesIO"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "fc83d1b8",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "63dbfc01",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "f7e5cdba",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "d86a9515",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"theCNN(\n",
" (conv01): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (conv02): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (fc01): Linear(in_features=106580, out_features=50, bias=True)\n",
" (output): Linear(in_features=50, out_features=16, bias=True)\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4b527135",
"metadata": {},
"outputs": [],
"source": [
"# @app.post(\"/predict-similarity\")\n",
"# async def predict_similarity(image: UploadFile):\n",
"# image_bytes = await image.read()\n",
"# img = Image.open(BytesIO(image_bytes))\n",
"# img = transform(img).unsqueeze(0)\n",
"# output = model(img)\n",
"# similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
"# return {\"similarity_score\": similarity_score}\n",
"\n",
"\n",
"@app.post(\"/predict-similarity\")\n",
"async def predict_similarity(sign: str, image: UploadFile):\n",
" image_bytes = await image.read()\n",
" img = Image.open(BytesIO(image_bytes))\n",
" img = transform(img).unsqueeze(0)\n",
" output = model(img)\n",
" similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
" return {\"sign\": sign, \"similarity_score\": similarity_score}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "df08611e",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "84b9601b",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [36440]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n",
"INFO: Uvicorn running on http://127.0.0.1:8001 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO: 127.0.0.1:63408 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63440 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63461 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63484 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63502 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63517 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63523 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63536 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63536 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63536 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63546 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ab358400",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "504cf81d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
......@@ -41,7 +41,7 @@ export const signUp = async (req, res) => {
} = req.body;
try {
if (!type) return res.status(400).json({ code: "02", message: "Type Field Required" })
// if (!type) return res.status(400).json({ code: "02", message: "Type Field Required" })
if (!email) return res.status(400).json({ code: "02", message: "Email Field Required" })
if (!userFirstName) return res.status(400).json({ code: "02", message: "User First Name Field Required" })
if (!userLastName) return res.status(400).json({ code: "02", message: "User Last Name Field Required" })
......@@ -50,51 +50,74 @@ export const signUp = async (req, res) => {
const existingUser = await User.findOne({ email })
if (existingUser) return res.status(400).json({ code: "02", message: "User already exists" })
if (type === "buyer") {
if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" })
if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" })
const hashedPassword = await bcrypt.hash(password, 12)
const userDetails = new User({
email,
password: hashedPassword,
type,
userDetails: {
userQNumber: uuidv4(),
userEmail: email,
userName: `${userFirstName} ${userLastName}`,
userContactNumber,
userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
userType: type,
}
})
const userResult = await userDetails.save()
const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
res.status(200).json({ code: "01", result: userResult, token })
} else if (type === "trader") {
const userDetails = new User({
email,
type,
userDetails: {
userQNumber: uuidv4(),
userEmail: email,
userName: `${userFirstName} ${userLastName}`,
userContactNumber,
userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
userType: type,
},
states: 2
})
const userResult = await userDetails.save()
const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
res.status(200).json({ code: "01", result: userResult, token })
}
// if (type === "buyer") {
// if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" })
// if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" })
// const hashedPassword = await bcrypt.hash(password, 12)
// const userDetails = new User({
// email,
// password: hashedPassword,
// type,
// userDetails: {
// userQNumber: uuidv4(),
// userEmail: email,
// userName: `${userFirstName} ${userLastName}`,
// userContactNumber,
// userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
// userType: type,
// }
// })
// const userResult = await userDetails.save()
// const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
// res.status(200).json({ code: "01", result: userResult, token })
// } else if (type === "trader") {
// const userDetails = new User({
// email,
// type,
// userDetails: {
// userQNumber: uuidv4(),
// userEmail: email,
// userName: `${userFirstName} ${userLastName}`,
// userContactNumber,
// userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
// userType: type,
// },
// states: 2
// })
// const userResult = await userDetails.save()
// const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
// res.status(200).json({ code: "01", result: userResult, token })
// }
if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" })
if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" })
const hashedPassword = await bcrypt.hash(password, 12)
const userDetails = new User({
email,
password: hashedPassword,
type,
userDetails: {
userQNumber: uuidv4(),
userEmail: email,
userName: `${userFirstName} ${userLastName}`,
userContactNumber,
userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
}
})
const userResult = await userDetails.save()
const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
res.status(200).json({ code: "01", result: userResult, token })
} catch (error) {
res.status(500).json({ code: "00", message: "Something went wrong" })
......
const curriculums = [
{
"curriculumCode": "01",
"curriculumLevel": "Base Level",
"curriculumName": "Learn Sign Language",
"curriculumImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorials": [
{
"tutorialCode": "01",
"tutorialTitle": "Numbers and Counting in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"taskItems": [
{
"title": "Learn Number One",
"description": "Learn how to sign the number one in sign language.",
"howToDo": "- Extend your index finger straight up.\n- Keep the rest of your fingers closed.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_one.jpg",
"referenceVideo": "https://example.com/number_one_video.mp4"
},
{
"title": "Learn Number Two",
"description": "Learn how to sign the number two in sign language.",
"howToDo": "- Extend your index and middle fingers straight up.\n- Keep the rest of your fingers closed.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_two.jpg",
"referenceVideo": "https://example.com/number_two_video.mp4"
},
{
"title": "Learn Number Three",
"description": "Learn how to sign the number three in sign language.",
"howToDo": "- Extend your index, middle, and ring fingers straight up.\n- Keep the rest of your fingers closed.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_three.jpg",
"referenceVideo": "https://example.com/number_three_video.mp4"
},
{
"title": "Learn Number Four",
"description": "Learn how to sign the number four in sign language.",
"howToDo": "- Extend your thumb, index, middle, and ring fingers straight up.\n- Keep your pinky finger folded.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_four.jpg",
"referenceVideo": "https://example.com/number_four_video.mp4"
},
{
"title": "Learn Number Five",
"description": "Learn how to sign the number five in sign language.",
"howToDo": "- Extend all your fingers straight up.\n- Keep your thumb resting on the side of your palm.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_five.jpg",
"referenceVideo": "https://example.com/number_five_video.mp4"
},
{
"title": "Learn Number Six",
"description": "Learn how to sign the number six in sign language.",
"howToDo": "- Extend your thumb and pinky finger straight up.\n- Keep the rest of your fingers closed.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_six.jpg",
"referenceVideo": "https://example.com/number_six_video.mp4"
},
{
"title": "Learn Number Seven",
"description": "Learn how to sign the number seven in sign language.",
"howToDo": "- Extend your index, middle, and ring fingers straight up.\n- Keep your thumb, pinky, and pinky finger folded.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_seven.jpg",
"referenceVideo": "https://example.com/number_seven_video.mp4"
},
{
"title": "Learn Number Eight",
"description": "Learn how to sign the number eight in sign language.",
"howToDo": "- Extend all your fingers straight up.\n- Cross your index and middle fingers over your ring and pinky fingers.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_eight.jpg",
"referenceVideo": "https://example.com/number_eight_video.mp4"
},
{
"title": "Learn Number Nine",
"description": "Learn how to sign the number nine in sign language.",
"howToDo": "- Extend your thumb and all your fingers straight up.\n- Keep your pinky finger folded.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_nine.jpg",
"referenceVideo": "https://example.com/number_nine_video.mp4"
},
{
"title": "Learn Number Ten",
"description": "Learn how to sign the number ten in sign language.",
"howToDo": "- Extend your thumb, index, and middle fingers straight up.\n- Keep the rest of your fingers closed.\n- Hold your hand in front of your chest.",
"referenceImage": "https://example.com/number_ten.jpg",
"referenceVideo": "https://example.com/number_ten_video.mp4"
}
]
},
{
"tutorialCode": "02",
"tutorialTitle": "Learn the Basics of Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Introduce the concept of sign language and its importance.\nTeach basic greetings and expressions, such as hello, goodbye, thank you, and sorry.\nProvide visual demonstrations and practice exercises for learners to practice these basic signs."
},
{
"tutorialCode": "03",
"tutorialTitle": "Family Signs in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Teach signs for family members, such as mother, father, sister, brother, etc.\nIntroduce signs for common family-related words, such as family, love, and home.\nProvide visual demonstrations and practice exercises for learners to practice these family signs."
},
{
"tutorialCode": "04",
"tutorialTitle": "Everyday Vocabulary in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Teach signs for everyday objects and activities, such as eat, drink, sleep, book, pen, etc.\nIntroduce signs for common words used in daily life.\nProvide visual demonstrations and interactive exercises for learners to practice using these signs."
},
{
"tutorialCode": "05",
"tutorialTitle": "Basic Conversational Phrases in Sign Language",
"tutorialImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorialContent": "Teach simple conversational phrases, such as \"What is your name?\" or \"How are you?\"\nIntroduce signs for common question words and phrases.\nProvide visual demonstrations and practice exercises for learners to practice these conversational phrases."
}
]
},
{
"curriculumCode": "02",
"curriculumLevel": "Medium Level",
"curriculumName": "Learn Sign Language",
"curriculumImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorials": []
},
{
"curriculumCode": "03",
"curriculumLevel": "Advance Level",
"curriculumName": "Learn Sign Language",
"curriculumImage": "https://drive.google.com/uc?export=view&id=1YACBlu7X-O7-DKv5DoW3AM9kgfT7Yhdc",
"tutorials": []
}
]
......@@ -36,6 +36,7 @@ const userSchema = mongoose.Schema({
},
userType: {
type: String,
default: "N/A",
required: true
},
},
......
......@@ -5,8 +5,8 @@ import express from "express";
import mongoose from "mongoose";
//import routes
import userRoutes from "./routes/user.routes.js";
import translateRoutes from "./routes/translate.routes.js";
import userRoutes from "./routes/user.routes.js";
dotenv.config();
const app = express();
......
This diff is collapsed.
{
"name": "React Material Minimal UI",
"name": "React Material SignConnect+",
"short_name": "Minimal-UI",
"display": "standalone",
"start_url": "/",
......
......@@ -17,7 +17,7 @@ import MegaMenuCarousel from './MenuCarousel';
// ----------------------------------------------------------------------
const MENU_PAPER_WIDTH = 800;
const PARENT_ITEM_HEIGHT = 40;
const PARENT_ITEM_HEIGHT = 60;
type Props = {
data: MegaMenuItemProps[];
......
......@@ -2,20 +2,18 @@ import { useEffect } from 'react';
// next
import { useRouter } from 'next/router';
// @mui
import { Box, Stack, Drawer, Button } from '@mui/material';
import { Box, Button, Drawer, Stack } from '@mui/material';
// hooks
import useResponsive from '../../../hooks/useResponsive';
// config
import { NAV } from '../../../config';
// components
import Logo from '../../../components/logo';
import Scrollbar from '../../../components/scrollbar';
import { NavSectionVertical } from '../../../components/nav-section';
import Scrollbar from '../../../components/scrollbar';
//
import navConfig from './config';
import NavDocs from './NavDocs';
import NavAccount from './NavAccount';
import { useSettingsContext } from 'src/components/settings';
import NavAccount from './NavAccount';
import navConfig from './config';
// ----------------------------------------------------------------------
......@@ -38,12 +36,12 @@ export default function NavVertical({ openNav, onCloseNav }: Props) {
value: 'mini'
}
} as React.ChangeEvent<HTMLInputElement>)
}else {
} else {
onChangeLayout({
target: {
value: 'vertical'
}
} as React.ChangeEvent<HTMLInputElement> )
} as React.ChangeEvent<HTMLInputElement>)
}
}
......@@ -75,7 +73,7 @@ export default function NavVertical({ openNav, onCloseNav }: Props) {
}}
>
<Button onClick={onClickHan}>Change Layout</Button>
<Button onClick={onClickHan}>Change Layout</Button>
{/* <Logo /> */}
<NavAccount />
......@@ -84,8 +82,7 @@ export default function NavVertical({ openNav, onCloseNav }: Props) {
<NavSectionVertical data={navConfig} />
<Box sx={{ flexGrow: 1 }} />
<NavDocs />
</Scrollbar>
);
......
// routes
import { PATH_DASHBOARD } from '../../../routes/paths';
// components
import Label from '../../../components/label';
import Iconify from '../../../components/iconify';
import SvgColor from '../../../components/svg-color';
// ----------------------------------------------------------------------
......@@ -33,206 +31,40 @@ const ICONS = {
ecommerce: icon('ic_ecommerce'),
analytics: icon('ic_analytics'),
dashboard: icon('ic_dashboard'),
learning: icon('ic_learning')
};
const navConfig = [
// GENERAL
// ----------------------------------------------------------------------
// GENERAL
{
subheader: 'general',
items: [
{ title: 'app', path: PATH_DASHBOARD.general.app, icon: ICONS.dashboard },
{ title: 'ecommerce', path: PATH_DASHBOARD.general.ecommerce, icon: ICONS.ecommerce },
{ title: 'analytics', path: PATH_DASHBOARD.general.analytics, icon: ICONS.analytics },
{ title: 'banking', path: PATH_DASHBOARD.general.banking, icon: ICONS.banking },
{ title: 'booking', path: PATH_DASHBOARD.general.booking, icon: ICONS.booking },
{ title: 'file', path: PATH_DASHBOARD.general.file, icon: ICONS.file },
],
},
// MANAGEMENT
// ----------------------------------------------------------------------
{
subheader: 'management',
items: [
// USER
{
title: 'user',
path: PATH_DASHBOARD.user.root,
icon: ICONS.user,
children: [
{ title: 'profile', path: PATH_DASHBOARD.user.profile },
{ title: 'cards', path: PATH_DASHBOARD.user.cards },
{ title: 'list', path: PATH_DASHBOARD.user.list },
{ title: 'create', path: PATH_DASHBOARD.user.new },
{ title: 'edit', path: PATH_DASHBOARD.user.demoEdit },
{ title: 'account', path: PATH_DASHBOARD.user.account },
],
},
// E-COMMERCE
{ title: 'blank', path: PATH_DASHBOARD.blank, icon: ICONS.blank },
//Spoken language to Sign Language Module items
{
title: 'ecommerce',
path: PATH_DASHBOARD.eCommerce.root,
icon: ICONS.cart,
children: [
{ title: 'shop', path: PATH_DASHBOARD.eCommerce.shop },
{ title: 'product', path: PATH_DASHBOARD.eCommerce.demoView },
{ title: 'list', path: PATH_DASHBOARD.eCommerce.list },
{ title: 'create', path: PATH_DASHBOARD.eCommerce.new },
{ title: 'edit', path: PATH_DASHBOARD.eCommerce.demoEdit },
{ title: 'checkout', path: PATH_DASHBOARD.eCommerce.checkout },
],
},
// INVOICE
{
title: 'invoice',
path: PATH_DASHBOARD.invoice.root,
icon: ICONS.invoice,
title: 'Spoken language Translation Module',
path: PATH_DASHBOARD.spokenLanguageTranslationModule.root,
icon: ICONS.chat,
children: [
{ title: 'list', path: PATH_DASHBOARD.invoice.list },
{ title: 'details', path: PATH_DASHBOARD.invoice.demoView },
{ title: 'create', path: PATH_DASHBOARD.invoice.new },
{ title: 'edit', path: PATH_DASHBOARD.invoice.demoEdit },
{ title: 'Home', path: PATH_DASHBOARD.spokenLanguageTranslationModule.spokenLanguageTranslationHome },
],
},
// BLOG
// Sign Language Learning Module items
{
title: 'blog',
path: PATH_DASHBOARD.blog.root,
icon: ICONS.blog,
title: 'Learning Module',
path: PATH_DASHBOARD.learningModule.root,
icon: ICONS.learning,
children: [
{ title: 'posts', path: PATH_DASHBOARD.blog.posts },
{ title: 'post', path: PATH_DASHBOARD.blog.demoView },
{ title: 'create', path: PATH_DASHBOARD.blog.new },
{ title: 'Curriculum', path: PATH_DASHBOARD.learningModule.curriculumHome },
{ title: 'Question and Answers', path: PATH_DASHBOARD.learningModule.questionAndAnswersHome },
{ title: 'Lead Board', path: PATH_DASHBOARD.learningModule.leadBoardHome },
{ title: 'Feedback', path: PATH_DASHBOARD.learningModule.feedbackHome },
],
},
{
title: 'File manager',
path: PATH_DASHBOARD.fileManager,
icon: ICONS.folder,
},
],
},
// APP
// ----------------------------------------------------------------------
{
subheader: 'app',
items: [
{
title: 'mail',
path: PATH_DASHBOARD.mail.root,
icon: ICONS.mail,
info: <Label color="error">+32</Label>,
},
{
title: 'chat',
path: PATH_DASHBOARD.chat.root,
icon: ICONS.chat,
},
{
title: 'calendar',
path: PATH_DASHBOARD.calendar,
icon: ICONS.calendar,
},
{
title: 'kanban',
path: PATH_DASHBOARD.kanban,
icon: ICONS.kanban,
},
],
},
// DEMO MENU STATES
{
subheader: 'Other cases',
items: [
{
// default roles : All roles can see this entry.
// roles: ['user'] Only users can see this item.
// roles: ['admin'] Only admin can see this item.
// roles: ['admin', 'manager'] Only admin/manager can see this item.
// Reference from 'src/guards/RoleBasedGuard'.
title: 'item_by_roles',
path: PATH_DASHBOARD.permissionDenied,
icon: ICONS.lock,
roles: ['admin'],
caption: 'only_admin_can_see_this_item',
},
{
title: 'menu_level',
path: '#/dashboard/menu_level',
icon: ICONS.menuItem,
children: [
{
title: 'menu_level_2a',
path: '#/dashboard/menu_level/menu_level_2a',
},
{
title: 'menu_level_2b',
path: '#/dashboard/menu_level/menu_level_2b',
children: [
{
title: 'menu_level_3a',
path: '#/dashboard/menu_level/menu_level_2b/menu_level_3a',
},
{
title: 'menu_level_3b',
path: '#/dashboard/menu_level/menu_level_2b/menu_level_3b',
children: [
{
title: 'menu_level_4a',
path: '#/dashboard/menu_level/menu_level_2b/menu_level_3b/menu_level_4a',
},
{
title: 'menu_level_4b',
path: '#/dashboard/menu_level/menu_level_2b/menu_level_3b/menu_level_4b',
},
],
},
],
},
],
},
{
title: 'item_disabled',
path: '#disabled',
icon: ICONS.disabled,
disabled: true,
},
{
title: 'item_label',
path: '#label',
icon: ICONS.label,
info: (
<Label color="info" startIcon={<Iconify icon="eva:email-fill" />}>
NEW
</Label>
),
},
{
title: 'item_caption',
path: '#caption',
icon: ICONS.menuItem,
caption:
'Quisque malesuada placerat nisl. In hac habitasse platea dictumst. Cras id dui. Pellentesque commodo eros a enim. Morbi mollis tellus ac sapien.',
},
{
title: 'item_external_link',
path: 'https://www.google.com/',
icon: ICONS.external,
},
{
title: 'blank',
path: PATH_DASHBOARD.blank,
icon: ICONS.blank,
},
],
},
];
export default navConfig;
......@@ -97,7 +97,7 @@ export default function Footer() {
<Grid item xs={8} md={3}>
<Typography variant="body2" sx={{ pr: { md: 5 } }}>
The starting point for your next project with Minimal UI Kit, built on the newest
The starting point for your next project with SignConnect+ Kit, built on the newest
version of Material-UI ©, ready to be customized to your style.
</Typography>
......
......@@ -21,7 +21,7 @@ export default function Page403() {
return (
<>
<Head>
<title> 403 Forbidden | Minimal UI</title>
<title> 403 Forbidden | SignConnect+</title>
</Head>
<MotionContainer>
......
......@@ -21,7 +21,7 @@ export default function Page404() {
return (
<>
<Head>
<title> 404 Page Not Found | Minimal UI</title>
<title> 404 Page Not Found | SignConnect+</title>
</Head>
<MotionContainer>
......
......@@ -21,7 +21,7 @@ export default function Page500() {
return (
<>
<Head>
<title> 500 Internal Server Error | Minimal UI</title>
<title> 500 Internal Server Error | SignConnect+</title>
</Head>
<MotionContainer>
......
......@@ -45,10 +45,10 @@ export default class MyDocument extends Document {
{/* Meta */}
<meta
name="description"
content="The starting point for your next project with Minimal UI Kit, built on the newest version of Material-UI ©, ready to be customized to your style"
content="The starting point for your next project with SignConnect+ Kit, built on the newest version of Material-UI ©, ready to be customized to your style"
/>
<meta name="keywords" content="react,material,kit,application,dashboard,admin,template" />
<meta name="author" content="Minimal UI Kit" />
<meta name="author" content="SignConnect+ Kit" />
</Head>
<body>
......
......@@ -17,7 +17,7 @@ export default function AboutPage() {
return (
<>
<Head>
<title> About us | Minimal UI</title>
<title> About us | SignConnect+</title>
</Head>
<AboutHero />
......
......@@ -9,7 +9,7 @@ export default function LoginUnprotectedPage() {
return (
<>
<Head>
<title> Login Unprotected | Minimal UI</title>
<title> Login Unprotected | SignConnect+</title>
</Head>
<Login />
......
......@@ -12,7 +12,7 @@ export default function LoginPage() {
return (
<>
<Head>
<title> Login | Minimal UI</title>
<title> Login | SignConnect+</title>
</Head>
<GuestGuard>
......
......@@ -24,7 +24,7 @@ export default function NewPasswordPage() {
return (
<>
<Head>
<title> New Password | Minimal UI</title>
<title> New Password | SignConnect+</title>
</Head>
<SentIcon sx={{ mb: 5, height: 96 }} />
......
......@@ -9,7 +9,7 @@ export default function RegisterUnprotectedPage() {
return (
<>
<Head>
<title> Register Unprotected | Minimal UI</title>
<title> Register Unprotected | SignConnect+</title>
</Head>
<Register />
......
// next
import Head from 'next/head';
// auth
import GuestGuard from '../../auth/GuestGuard';
// sections
import Register from '../../sections/auth/Register';
......@@ -11,12 +10,10 @@ export default function RegisterPage() {
return (
<>
<Head>
<title> Register | Minimal UI</title>
<title> Register | SignConnect+</title>
</Head>
<GuestGuard>
<Register />
</GuestGuard>
<Register />
</>
);
}
......@@ -24,7 +24,7 @@ export default function ResetPasswordPage() {
return (
<>
<Head>
<title> Reset Password | Minimal UI</title>
<title> Reset Password | SignConnect+</title>
</Head>
<PasswordIcon sx={{ mb: 5, height: 96 }} />
......
......@@ -24,7 +24,7 @@ export default function VerifyCodePage() {
return (
<>
<Head>
<title> Verify Code | Minimal UI</title>
<title> Verify Code | SignConnect+</title>
</Head>
<EmailInboxIcon sx={{ mb: 5, height: 96 }} />
......
......@@ -27,7 +27,7 @@ export default function ComingSoonPage() {
return (
<>
<Head>
<title> Coming Soon | Minimal UI</title>
<title> Coming Soon | SignConnect+</title>
</Head>
<Typography variant="h3" paragraph>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment