Commit d82e0eb2 authored by Sumudu-Himasha-Ranaweera's avatar Sumudu-Himasha-Ranaweera

Merge branch 'master' into IT20251000

parents d9ea03b8 39909344
models/*
!models/
DataSet/Sn_sign_language_dataset/
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "f47e929b",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"import torch.nn as nn\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import uvicorn\n",
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "13b1d58b",
"metadata": {},
"outputs": [],
"source": [
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "35f4adc0",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a5aba4be",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "11ec2fae",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "353a4725",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"theCNN(\n",
" (conv01): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (conv02): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (fc01): Linear(in_features=106580, out_features=50, bias=True)\n",
" (output): Linear(in_features=50, out_features=16, bias=True)\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "bb87b7f0",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" predicted_class = torch.argmax(probabilities).item()\n",
"\n",
" # Get the actual number corresponding to the hand sign\n",
" actual_number = get_actual_number_from_image(image)\n",
" \n",
" print(actual_number)\n",
"\n",
" # Compare predicted class with actual number and calculate correctness percentage\n",
" correct = int(predicted_class + 1 == actual_number)\n",
" print(correct)\n",
" correctness_percentage = correct / 1.0 * 100.0\n",
"\n",
" return {\"predicted_class\": predicted_class, \"correctness_percentage\": correctness_percentage}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "05646e93",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"def get_actual_number_from_image(image):\n",
" # Convert the image to numpy array\n",
" image_array = np.array(image)\n",
"\n",
" # Apply image processing techniques to detect and recognize digits\n",
" # Example steps: thresholding, contour detection, character segmentation, digit recognition\n",
"\n",
" # Apply thresholding\n",
" _, binary_image = cv2.threshold(image_array, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n",
"\n",
" # Find contours\n",
" contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
" # Sort contours based on their x-coordinate\n",
" contours = sorted(contours, key=lambda cnt: cv2.boundingRect(cnt)[0])\n",
"\n",
" # Initialize the recognized digit sequence\n",
" digit_sequence = \"\"\n",
"\n",
" # Iterate over the contours and recognize digits\n",
" for contour in contours:\n",
" # Get the bounding box of the contour\n",
" x, y, w, h = cv2.boundingRect(contour)\n",
"\n",
" # Crop the digit region from the image\n",
" digit_image = binary_image[y:y + h, x:x + w]\n",
"\n",
" # Resize the digit image to a fixed size (e.g., 28x28)\n",
" resized_digit_image = cv2.resize(digit_image, (28, 28))\n",
"\n",
" # Preprocess the resized digit image (e.g., normalize pixel values)\n",
" preprocessed_digit_image = resized_digit_image / 255.0\n",
"\n",
" # Flatten the preprocessed digit image\n",
" flattened_digit_image = preprocessed_digit_image.flatten()\n",
"\n",
" # Pass the flattened digit image to your digit recognition model\n",
" # to get the predicted digit (e.g., using a separate model or the same model you used for training)\n",
"\n",
" # Here, let's assume you have a function `predict_digit` that takes the flattened digit image\n",
" # and returns the predicted digit as an integer\n",
" predicted_digit = predict_digit(flattened_digit_image)\n",
"\n",
" # Add the predicted digit to the digit sequence\n",
" digit_sequence += str(predicted_digit)\n",
"\n",
" # Convert the digit sequence to an integer\n",
" actual_number = int(digit_sequence)\n",
"\n",
" return actual_number\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee993fc1",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [36312]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n",
"INFO: Uvicorn running on http://127.0.0.1:8001 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"5\n",
"0\n",
"INFO: 127.0.0.1:60625 - \"POST /score HTTP/1.1\" 200 OK\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "34c5efea",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c898b57c",
"metadata": {},
"outputs": [],
"source": [
"pip install python-multipart"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a99b4156",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "01b6e57c",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn "
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2e561f13",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "1e9e4208",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "4d86d2b8",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "9379bf73",
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "Expected state_dict to be dict-like, got <class '__main__.theCNN'>.",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[6], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m model \u001b[38;5;241m=\u001b[39m theCNN()\n\u001b[1;32m----> 2\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_state_dict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmodel1.pth\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 3\u001b[0m model\u001b[38;5;241m.\u001b[39meval()\n",
"File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python310\\site-packages\\torch\\nn\\modules\\module.py:1994\u001b[0m, in \u001b[0;36mModule.load_state_dict\u001b[1;34m(self, state_dict, strict)\u001b[0m\n\u001b[0;32m 1971\u001b[0m \u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"Copies parameters and buffers from :attr:`state_dict` into\u001b[39;00m\n\u001b[0;32m 1972\u001b[0m \u001b[38;5;124;03mthis module and its descendants. If :attr:`strict` is ``True``, then\u001b[39;00m\n\u001b[0;32m 1973\u001b[0m \u001b[38;5;124;03mthe keys of :attr:`state_dict` must exactly match the keys returned\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1991\u001b[0m \u001b[38;5;124;03m ``RuntimeError``.\u001b[39;00m\n\u001b[0;32m 1992\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 1993\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(state_dict, Mapping):\n\u001b[1;32m-> 1994\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExpected state_dict to be dict-like, got \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\u001b[38;5;28mtype\u001b[39m(state_dict)))\n\u001b[0;32m 1996\u001b[0m missing_keys: List[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m []\n\u001b[0;32m 1997\u001b[0m unexpected_keys: List[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m []\n",
"\u001b[1;31mTypeError\u001b[0m: Expected state_dict to be dict-like, got <class '__main__.theCNN'>."
]
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18b869d3",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" similarity_scores = probabilities.numpy()\n",
"\n",
" return {\"similarity_scores\": similarity_scores.tolist()}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a99a8b6",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b30f5c6",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e2e07d8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "60c58fd2",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "c1b7af91",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn \n",
"from io import BytesIO"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7f1cd8a4",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "2035acf5",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2cfca47d",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "2786c4df",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"theCNN(\n",
" (conv01): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (conv02): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (fc01): Linear(in_features=106580, out_features=50, bias=True)\n",
" (output): Linear(in_features=50, out_features=16, bias=True)\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "30569105",
"metadata": {},
"outputs": [],
"source": [
"# @app.post(\"/predict-similarity\")\n",
"# async def predict_similarity(image: UploadFile):\n",
"# image_bytes = await image.read()\n",
"# img = Image.open(BytesIO(image_bytes))\n",
"# img = transform(img).unsqueeze(0)\n",
"# output = model(img)\n",
"# similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
"# return {\"similarity_score\": similarity_score}\n",
"\n",
"\n",
"@app.post(\"/predict-similarity\")\n",
"async def predict_similarity(sign: str, image: UploadFile):\n",
" image_bytes = await image.read()\n",
" img = Image.open(BytesIO(image_bytes))\n",
" img = transform(img).unsqueeze(0)\n",
" output = model(img)\n",
" similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
" return {\"sign\": sign, \"similarity_score\": similarity_score}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "5c19e31c",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bea39dc9",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [36440]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n",
"INFO: Uvicorn running on http://127.0.0.1:8001 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO: 127.0.0.1:63408 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63440 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63461 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63484 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "52ede8b7",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d7bb1db",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "49b49c05",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"import torch.nn as nn\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import uvicorn\n",
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3f0148a4",
"metadata": {},
"outputs": [],
"source": [
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26e5f198",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4359663",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "308a25d3",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e3544d2",
"metadata": {},
"outputs": [],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d521a8ca",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" predicted_class = torch.argmax(probabilities).item()\n",
"\n",
" # Get the actual number corresponding to the hand sign\n",
" actual_number = get_actual_number_from_image(image)\n",
" \n",
" print(actual_number)\n",
"\n",
" # Compare predicted class with actual number and calculate correctness percentage\n",
" correct = int(predicted_class + 1 == actual_number)\n",
" print(correct)\n",
" correctness_percentage = correct / 1.0 * 100.0\n",
"\n",
" return {\"predicted_class\": predicted_class, \"correctness_percentage\": correctness_percentage}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "030c5fbc",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import numpy as np\n",
"\n",
"def get_actual_number_from_image(image):\n",
" # Convert the image to numpy array\n",
" image_array = np.array(image)\n",
"\n",
" # Apply image processing techniques to detect and recognize digits\n",
" # Example steps: thresholding, contour detection, character segmentation, digit recognition\n",
"\n",
" # Apply thresholding\n",
" _, binary_image = cv2.threshold(image_array, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n",
"\n",
" # Find contours\n",
" contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
" # Sort contours based on their x-coordinate\n",
" contours = sorted(contours, key=lambda cnt: cv2.boundingRect(cnt)[0])\n",
"\n",
" # Initialize the recognized digit sequence\n",
" digit_sequence = \"\"\n",
"\n",
" # Iterate over the contours and recognize digits\n",
" for contour in contours:\n",
" # Get the bounding box of the contour\n",
" x, y, w, h = cv2.boundingRect(contour)\n",
"\n",
" # Crop the digit region from the image\n",
" digit_image = binary_image[y:y + h, x:x + w]\n",
"\n",
" # Resize the digit image to a fixed size (e.g., 28x28)\n",
" resized_digit_image = cv2.resize(digit_image, (28, 28))\n",
"\n",
" # Preprocess the resized digit image (e.g., normalize pixel values)\n",
" preprocessed_digit_image = resized_digit_image / 255.0\n",
"\n",
" # Flatten the preprocessed digit image\n",
" flattened_digit_image = preprocessed_digit_image.flatten()\n",
"\n",
" # Pass the flattened digit image to your digit recognition model\n",
" # to get the predicted digit (e.g., using a separate model or the same model you used for training)\n",
"\n",
" # Here, let's assume you have a function `predict_digit` that takes the flattened digit image\n",
" # and returns the predicted digit as an integer\n",
" predicted_digit = predict_digit(flattened_digit_image)\n",
"\n",
" # Add the predicted digit to the digit sequence\n",
" digit_sequence += str(predicted_digit)\n",
"\n",
" # Convert the digit sequence to an integer\n",
" actual_number = int(digit_sequence)\n",
"\n",
" return actual_number\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5035c79a",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c0449757",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c898b57c",
"metadata": {},
"outputs": [],
"source": [
"pip install python-multipart"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a99b4156",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01b6e57c",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e561f13",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1e9e4208",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4d86d2b8",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9379bf73",
"metadata": {},
"outputs": [],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18b869d3",
"metadata": {},
"outputs": [],
"source": [
"@app.post(\"/score\")\n",
"async def calculate_score(image_file: UploadFile):\n",
" image = Image.open(io.BytesIO(await image_file.read())).convert(\"L\")\n",
" image = transform(image).unsqueeze(0)\n",
"\n",
" with torch.no_grad():\n",
" output = model(image)\n",
"\n",
" probabilities = torch.softmax(output, dim=1)[0]\n",
" similarity_scores = probabilities.numpy()\n",
"\n",
" return {\"similarity_scores\": similarity_scores.tolist()}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a99a8b6",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b30f5c6",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e2e07d8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "3d735e52",
"metadata": {},
"outputs": [],
"source": [
"import nest_asyncio\n",
"import asyncio\n",
"import torch.nn.functional as F\n",
"\n",
"# Apply the patch\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "db756418",
"metadata": {},
"outputs": [],
"source": [
"from fastapi import FastAPI, UploadFile\n",
"from PIL import Image\n",
"import torch\n",
"import torchvision.transforms as transforms\n",
"import numpy as np\n",
"import io\n",
"import torch.nn as nn\n",
"import asyncio\n",
"import uvicorn \n",
"from io import BytesIO"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "fc83d1b8",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "63dbfc01",
"metadata": {},
"outputs": [],
"source": [
"transform = transforms.Compose([\n",
" transforms.Resize((300, 300)),\n",
" transforms.Grayscale(num_output_channels=1),\n",
" transforms.ToTensor(),\n",
" transforms.Normalize(mean=(0.5), std=(0.5))\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "f7e5cdba",
"metadata": {},
"outputs": [],
"source": [
"class theCNN(nn.Module):\n",
" def __init__(self):\n",
" super(theCNN, self).__init__()\n",
" \n",
" self.conv01 = nn.Conv2d(\n",
" in_channels=1,\n",
" out_channels=10,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" self.conv02 = nn.Conv2d(\n",
" in_channels=10,\n",
" out_channels=20,\n",
" kernel_size=5,\n",
" stride=1,\n",
" padding=1\n",
" )\n",
" \n",
" expectedSize = int(np.floor((73 + 2 * 0 - 1) / 1) + 1)\n",
" expectedSize = 20 * int(expectedSize ** 2)\n",
" \n",
" self.fc01 = nn.Linear(expectedSize, 50)\n",
" self.output = nn.Linear(50, 16)\n",
"\n",
" def forward(self, x):\n",
" x = F.relu(F.max_pool2d(self.conv01(x), 2))\n",
" x = F.relu(F.max_pool2d(self.conv02(x), 2))\n",
" nUnits = x.shape.numel() / x.shape[0]\n",
" x = x.view(-1, int(nUnits))\n",
" x = F.relu(self.fc01(x))\n",
" return torch.softmax(self.output(x), axis=1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "d86a9515",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"theCNN(\n",
" (conv01): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (conv02): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))\n",
" (fc01): Linear(in_features=106580, out_features=50, bias=True)\n",
" (output): Linear(in_features=50, out_features=16, bias=True)\n",
")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = theCNN()\n",
"model.load_state_dict(torch.load(\"model.pth\"))\n",
"model.eval()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4b527135",
"metadata": {},
"outputs": [],
"source": [
"# @app.post(\"/predict-similarity\")\n",
"# async def predict_similarity(image: UploadFile):\n",
"# image_bytes = await image.read()\n",
"# img = Image.open(BytesIO(image_bytes))\n",
"# img = transform(img).unsqueeze(0)\n",
"# output = model(img)\n",
"# similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
"# return {\"similarity_score\": similarity_score}\n",
"\n",
"\n",
"@app.post(\"/predict-similarity\")\n",
"async def predict_similarity(sign: str, image: UploadFile):\n",
" image_bytes = await image.read()\n",
" img = Image.open(BytesIO(image_bytes))\n",
" img = transform(img).unsqueeze(0)\n",
" output = model(img)\n",
" similarity_score = torch.max(output).item() * 100 # Get the maximum predicted probability as the similarity score\n",
" return {\"sign\": sign, \"similarity_score\": similarity_score}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "df08611e",
"metadata": {},
"outputs": [],
"source": [
"@app.get(\"/\")\n",
"async def hello_world(): \n",
"\n",
" return {\"Hello World\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "84b9601b",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [36440]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n",
"INFO: Uvicorn running on http://127.0.0.1:8001 (Press CTRL+C to quit)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO: 127.0.0.1:63408 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63440 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63461 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63484 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63502 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63517 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63523 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63536 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63536 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63536 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n",
"INFO: 127.0.0.1:63546 - \"POST /predict-similarity HTTP/1.1\" 422 Unprocessable Entity\n"
]
}
],
"source": [
"if __name__ == \"__main__\":\n",
" loop = asyncio.get_event_loop()\n",
" loop.create_task(uvicorn.run(app, host=\"127.0.0.1\", port=8001))\n",
" loop.run_forever()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ab358400",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "504cf81d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
...@@ -2,7 +2,11 @@ ...@@ -2,7 +2,11 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 6, "execution_count": 6,
=======
"execution_count": 2,
>>>>>>> master
"id": "ade37944", "id": "ade37944",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -17,7 +21,11 @@ ...@@ -17,7 +21,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 7, "execution_count": 7,
=======
"execution_count": 3,
>>>>>>> master
"id": "16176bf6", "id": "16176bf6",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -40,7 +48,11 @@ ...@@ -40,7 +48,11 @@
" 'Uhh']" " 'Uhh']"
] ]
}, },
<<<<<<< HEAD
"execution_count": 7, "execution_count": 7,
=======
"execution_count": 3,
>>>>>>> master
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
...@@ -57,7 +69,11 @@ ...@@ -57,7 +69,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 8, "execution_count": 8,
=======
"execution_count": 4,
>>>>>>> master
"id": "8f7b1301", "id": "8f7b1301",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -83,7 +99,11 @@ ...@@ -83,7 +99,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 9, "execution_count": 9,
=======
"execution_count": 5,
>>>>>>> master
"id": "c9034cbe", "id": "c9034cbe",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -113,7 +133,11 @@ ...@@ -113,7 +133,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": null, "execution_count": null,
=======
"execution_count": 6,
>>>>>>> master
"id": "7adb379e", "id": "7adb379e",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -124,7 +148,11 @@ ...@@ -124,7 +148,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": null, "execution_count": null,
=======
"execution_count": 7,
>>>>>>> master
"id": "d44f7806", "id": "d44f7806",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -144,12 +172,57 @@ ...@@ -144,12 +172,57 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": null, "execution_count": null,
=======
"execution_count": 8,
>>>>>>> master
"id": "ff4f0d06", "id": "ff4f0d06",
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
<<<<<<< HEAD
"outputs": [], "outputs": [],
=======
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 3.0344 - accuracy: 0.0708 - val_loss: 2.4118 - val_accuracy: 0.1034\n",
"Epoch 2/10\n",
"4/4 [==============================] - 4s 1s/step - loss: 2.3133 - accuracy: 0.3274 - val_loss: 1.6620 - val_accuracy: 0.9310\n",
"Epoch 3/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 1.2560 - accuracy: 0.9558 - val_loss: 0.4894 - val_accuracy: 0.9655\n",
"Epoch 4/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.2415 - accuracy: 0.9912 - val_loss: 0.0362 - val_accuracy: 1.0000\n",
"Epoch 5/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.0340 - accuracy: 0.9912 - val_loss: 0.0024 - val_accuracy: 1.0000\n",
"Epoch 6/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.0127 - val_accuracy: 1.0000\n",
"Epoch 7/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 3.6882e-05 - val_accuracy: 1.0000\n",
"Epoch 8/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 9.9268e-05 - accuracy: 1.0000 - val_loss: 2.7212e-06 - val_accuracy: 1.0000\n",
"Epoch 9/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 5.2195e-05 - accuracy: 1.0000 - val_loss: 6.4126e-07 - val_accuracy: 1.0000\n",
"Epoch 10/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 1.3251e-05 - accuracy: 1.0000 - val_loss: 2.7130e-07 - val_accuracy: 1.0000\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x2d653970160>"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
>>>>>>> master
"source": [ "source": [
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n", "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
"model.fit(train_data, train_labels, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(val_data, val_labels))\n" "model.fit(train_data, train_labels, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(val_data, val_labels))\n"
...@@ -157,18 +230,52 @@ ...@@ -157,18 +230,52 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": null, "execution_count": null,
"id": "61d6a8d8", "id": "61d6a8d8",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
=======
"execution_count": 9,
"id": "61d6a8d8",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op while saving (showing 3 of 3). These functions will not be directly callable after loading.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:tensorflow:Assets written to: ./models/model\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:tensorflow:Assets written to: ./models/model\\assets\n"
]
}
],
>>>>>>> master
"source": [ "source": [
"model.save('./models/model')" "model.save('./models/model')"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": null, "execution_count": null,
"id": "fdc9bfe6", "id": "fdc9bfe6",
=======
"execution_count": 10,
"id": "dc610fdb",
>>>>>>> master
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
...@@ -196,10 +303,29 @@ ...@@ -196,10 +303,29 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": null, "execution_count": null,
"id": "297e3e3c", "id": "297e3e3c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
=======
"execution_count": 11,
"id": "6b6d20d2",
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'test_data' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_17676\\3131021014.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpredictions\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest_data\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[0mpredicted_classes\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0margmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpredictions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'test_data' is not defined"
]
}
],
>>>>>>> master
"source": [ "source": [
"predictions = model.predict(test_data)\n", "predictions = model.predict(test_data)\n",
"predicted_classes = np.argmax(predictions, axis=1)\n", "predicted_classes = np.argmax(predictions, axis=1)\n",
...@@ -211,7 +337,11 @@ ...@@ -211,7 +337,11 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
<<<<<<< HEAD
"id": "e22211b0", "id": "e22211b0",
=======
"id": "2bd77ac5",
>>>>>>> master
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
......
{ {
"cells": [ "cells": [
{ {
<<<<<<< HEAD
"cell_type": "markdown", "cell_type": "markdown",
"id": "91b96b6e", "id": "91b96b6e",
"metadata": {}, "metadata": {},
...@@ -11,6 +12,10 @@ ...@@ -11,6 +12,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 6,
=======
"cell_type": "code",
"execution_count": 12,
>>>>>>> master
"id": "ade37944", "id": "ade37944",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -19,6 +24,7 @@ ...@@ -19,6 +24,7 @@
"import os\n", "import os\n",
"import cv2\n", "import cv2\n",
"import numpy as np\n", "import numpy as np\n",
<<<<<<< HEAD
"from sklearn.model_selection import train_test_split" "from sklearn.model_selection import train_test_split"
] ]
}, },
...@@ -30,11 +36,19 @@ ...@@ -30,11 +36,19 @@
"### Define Constants\n", "### Define Constants\n",
"\n", "\n",
"In this section, I define some constants used throughout the code. IMG_SIZE represents the desired size of the input images, BATCH_SIZE determines the number of samples processed in each training batch, and EPOCHS specifies the number of times the model will iterate over the entire dataset during training. CLASSES is a list of class names extracted from the directory structure, and NUM_CLASSES represents the total number of classes in the dataset." "In this section, I define some constants used throughout the code. IMG_SIZE represents the desired size of the input images, BATCH_SIZE determines the number of samples processed in each training batch, and EPOCHS specifies the number of times the model will iterate over the entire dataset during training. CLASSES is a list of class names extracted from the directory structure, and NUM_CLASSES represents the total number of classes in the dataset."
=======
"from sklearn.model_selection import train_test_split\n",
"import mediapipe as mp"
>>>>>>> master
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 7, "execution_count": 7,
=======
"execution_count": 13,
>>>>>>> master
"id": "16176bf6", "id": "16176bf6",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -57,7 +71,11 @@ ...@@ -57,7 +71,11 @@
" 'Uhh']" " 'Uhh']"
] ]
}, },
<<<<<<< HEAD
"execution_count": 7, "execution_count": 7,
=======
"execution_count": 13,
>>>>>>> master
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
...@@ -74,7 +92,11 @@ ...@@ -74,7 +92,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 8, "execution_count": 8,
=======
"execution_count": 14,
>>>>>>> master
"id": "8f7b1301", "id": "8f7b1301",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -99,6 +121,7 @@ ...@@ -99,6 +121,7 @@
] ]
}, },
{ {
<<<<<<< HEAD
"cell_type": "markdown", "cell_type": "markdown",
"id": "3d2af75d", "id": "3d2af75d",
"metadata": {}, "metadata": {},
...@@ -111,6 +134,10 @@ ...@@ -111,6 +134,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 9,
=======
"cell_type": "code",
"execution_count": 15,
>>>>>>> master
"id": "c9034cbe", "id": "c9034cbe",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -140,7 +167,11 @@ ...@@ -140,7 +167,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 10, "execution_count": 10,
=======
"execution_count": 16,
>>>>>>> master
"id": "7adb379e", "id": "7adb379e",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -150,6 +181,7 @@ ...@@ -150,6 +181,7 @@
] ]
}, },
{ {
<<<<<<< HEAD
"cell_type": "markdown", "cell_type": "markdown",
"id": "34d79d4d", "id": "34d79d4d",
"metadata": {}, "metadata": {},
...@@ -162,6 +194,10 @@ ...@@ -162,6 +194,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 11,
=======
"cell_type": "code",
"execution_count": 17,
>>>>>>> master
"id": "d44f7806", "id": "d44f7806",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -180,6 +216,7 @@ ...@@ -180,6 +216,7 @@
] ]
}, },
{ {
<<<<<<< HEAD
"cell_type": "markdown", "cell_type": "markdown",
"id": "ab7b7e82", "id": "ab7b7e82",
"metadata": {}, "metadata": {},
...@@ -192,6 +229,10 @@ ...@@ -192,6 +229,10 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": 12,
=======
"cell_type": "code",
"execution_count": 18,
>>>>>>> master
"id": "ff4f0d06", "id": "ff4f0d06",
"metadata": { "metadata": {
"scrolled": true "scrolled": true
...@@ -201,6 +242,7 @@ ...@@ -201,6 +242,7 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
<<<<<<< HEAD
"Epoch 1/20\n", "Epoch 1/20\n",
"152/152 [==============================] - 238s 2s/step - loss: 0.7102 - accuracy: 0.8011 - val_loss: 0.1194 - val_accuracy: 0.9703\n", "152/152 [==============================] - 238s 2s/step - loss: 0.7102 - accuracy: 0.8011 - val_loss: 0.1194 - val_accuracy: 0.9703\n",
"Epoch 2/20\n", "Epoch 2/20\n",
...@@ -241,15 +283,44 @@ ...@@ -241,15 +283,44 @@
"152/152 [==============================] - 205s 1s/step - loss: 6.9678e-07 - accuracy: 1.0000 - val_loss: 6.0417e-04 - val_accuracy: 1.0000\n", "152/152 [==============================] - 205s 1s/step - loss: 6.9678e-07 - accuracy: 1.0000 - val_loss: 6.0417e-04 - val_accuracy: 1.0000\n",
"Epoch 20/20\n", "Epoch 20/20\n",
"152/152 [==============================] - 204s 1s/step - loss: 5.5925e-07 - accuracy: 1.0000 - val_loss: 6.0406e-04 - val_accuracy: 1.0000\n" "152/152 [==============================] - 204s 1s/step - loss: 5.5925e-07 - accuracy: 1.0000 - val_loss: 6.0406e-04 - val_accuracy: 1.0000\n"
=======
"Epoch 1/10\n",
"152/152 [==============================] - 217s 1s/step - loss: 0.8329 - accuracy: 0.7585 - val_loss: 0.0838 - val_accuracy: 0.9860\n",
"Epoch 2/10\n",
"152/152 [==============================] - 205s 1s/step - loss: 0.0374 - accuracy: 0.9913 - val_loss: 0.0139 - val_accuracy: 0.9942\n",
"Epoch 3/10\n",
"152/152 [==============================] - 212s 1s/step - loss: 0.0022 - accuracy: 0.9998 - val_loss: 0.0106 - val_accuracy: 0.9959\n",
"Epoch 4/10\n",
"152/152 [==============================] - 211s 1s/step - loss: 0.0147 - accuracy: 0.9955 - val_loss: 0.0418 - val_accuracy: 0.9818\n",
"Epoch 5/10\n",
"152/152 [==============================] - 205s 1s/step - loss: 0.0190 - accuracy: 0.9955 - val_loss: 0.0273 - val_accuracy: 0.9917\n",
"Epoch 6/10\n",
"152/152 [==============================] - 205s 1s/step - loss: 0.0142 - accuracy: 0.9967 - val_loss: 0.0509 - val_accuracy: 0.9942\n",
"Epoch 7/10\n",
"152/152 [==============================] - 214s 1s/step - loss: 0.0037 - accuracy: 0.9990 - val_loss: 0.0027 - val_accuracy: 0.9992\n",
"Epoch 8/10\n",
"152/152 [==============================] - 230s 2s/step - loss: 0.0110 - accuracy: 0.9969 - val_loss: 0.0188 - val_accuracy: 0.9967\n",
"Epoch 9/10\n",
"152/152 [==============================] - 220s 1s/step - loss: 1.7629e-04 - accuracy: 1.0000 - val_loss: 0.0190 - val_accuracy: 0.9967\n",
"Epoch 10/10\n",
"152/152 [==============================] - 208s 1s/step - loss: 1.5000e-05 - accuracy: 1.0000 - val_loss: 0.0197 - val_accuracy: 0.9967\n"
>>>>>>> master
] ]
}, },
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
<<<<<<< HEAD
"<keras.callbacks.History at 0x283f3ad5a90>" "<keras.callbacks.History at 0x283f3ad5a90>"
] ]
}, },
"execution_count": 12, "execution_count": 12,
=======
"<keras.callbacks.History at 0x2d6000ebeb0>"
]
},
"execution_count": 18,
>>>>>>> master
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
...@@ -261,7 +332,11 @@ ...@@ -261,7 +332,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 13, "execution_count": 13,
=======
"execution_count": 19,
>>>>>>> master
"id": "61d6a8d8", "id": "61d6a8d8",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -288,12 +363,20 @@ ...@@ -288,12 +363,20 @@
} }
], ],
"source": [ "source": [
<<<<<<< HEAD
"model.save('./models/model') " "model.save('./models/model') "
=======
"model.save('./models/model')"
>>>>>>> master
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 14, "execution_count": 14,
=======
"execution_count": 20,
>>>>>>> master
"id": "fdc9bfe6", "id": "fdc9bfe6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -322,7 +405,11 @@ ...@@ -322,7 +405,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 15, "execution_count": 15,
=======
"execution_count": 21,
>>>>>>> master
"id": "297e3e3c", "id": "297e3e3c",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -330,8 +417,13 @@ ...@@ -330,8 +417,13 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
<<<<<<< HEAD
"5/5 [==============================] - 2s 299ms/step\n", "5/5 [==============================] - 2s 299ms/step\n",
"Test Accuracy: 0.9084507042253521\n" "Test Accuracy: 0.9084507042253521\n"
=======
"5/5 [==============================] - 2s 297ms/step\n",
"Test Accuracy: 0.9225352112676056\n"
>>>>>>> master
] ]
} }
], ],
...@@ -345,7 +437,11 @@ ...@@ -345,7 +437,11 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
<<<<<<< HEAD
"execution_count": 16, "execution_count": 16,
=======
"execution_count": 22,
>>>>>>> master
"id": "e22211b0", "id": "e22211b0",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -353,7 +449,11 @@ ...@@ -353,7 +449,11 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
<<<<<<< HEAD
"5/5 [==============================] - 2s 323ms/step\n" "5/5 [==============================] - 2s 323ms/step\n"
=======
"5/5 [==============================] - 2s 299ms/step\n"
>>>>>>> master
] ]
} }
], ],
...@@ -369,6 +469,29 @@ ...@@ -369,6 +469,29 @@
] ]
}, },
{ {
<<<<<<< HEAD
=======
"cell_type": "code",
"execution_count": null,
"id": "885678c5",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"img = cv2.imread('./scene00548.png')\n",
"img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
"img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))\n",
"img = np.array([img], dtype=np.float32) / 255.0\n",
"prediction = model.predict(img)\n",
"class_index = np.argmax(prediction)\n",
"class_name = CLASSES[class_index]\n",
"sinhala_letter = letter_mapping.get(class_name, 'Unknown')\n",
"print(sinhala_letter)"
]
},
{
>>>>>>> master
"cell_type": "markdown", "cell_type": "markdown",
"id": "69b66fc1", "id": "69b66fc1",
"metadata": {}, "metadata": {},
......
...@@ -41,7 +41,11 @@ export const signUp = async (req, res) => { ...@@ -41,7 +41,11 @@ export const signUp = async (req, res) => {
} = req.body; } = req.body;
try { try {
<<<<<<< HEAD
if (!type) return res.status(400).json({ code: "02", message: "Type Field Required" }) if (!type) return res.status(400).json({ code: "02", message: "Type Field Required" })
=======
// if (!type) return res.status(400).json({ code: "02", message: "Type Field Required" })
>>>>>>> master
if (!email) return res.status(400).json({ code: "02", message: "Email Field Required" }) if (!email) return res.status(400).json({ code: "02", message: "Email Field Required" })
if (!userFirstName) return res.status(400).json({ code: "02", message: "User First Name Field Required" }) if (!userFirstName) return res.status(400).json({ code: "02", message: "User First Name Field Required" })
if (!userLastName) return res.status(400).json({ code: "02", message: "User Last Name Field Required" }) if (!userLastName) return res.status(400).json({ code: "02", message: "User Last Name Field Required" })
...@@ -50,6 +54,7 @@ export const signUp = async (req, res) => { ...@@ -50,6 +54,7 @@ export const signUp = async (req, res) => {
const existingUser = await User.findOne({ email }) const existingUser = await User.findOne({ email })
if (existingUser) return res.status(400).json({ code: "02", message: "User already exists" }) if (existingUser) return res.status(400).json({ code: "02", message: "User already exists" })
<<<<<<< HEAD
if (type === "buyer") { if (type === "buyer") {
if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" }) if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" })
if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" }) if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" })
...@@ -95,6 +100,76 @@ export const signUp = async (req, res) => { ...@@ -95,6 +100,76 @@ export const signUp = async (req, res) => {
res.status(200).json({ code: "01", result: userResult, token }) res.status(200).json({ code: "01", result: userResult, token })
} }
=======
// if (type === "buyer") {
// if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" })
// if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" })
// const hashedPassword = await bcrypt.hash(password, 12)
// const userDetails = new User({
// email,
// password: hashedPassword,
// type,
// userDetails: {
// userQNumber: uuidv4(),
// userEmail: email,
// userName: `${userFirstName} ${userLastName}`,
// userContactNumber,
// userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
// userType: type,
// }
// })
// const userResult = await userDetails.save()
// const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
// res.status(200).json({ code: "01", result: userResult, token })
// } else if (type === "trader") {
// const userDetails = new User({
// email,
// type,
// userDetails: {
// userQNumber: uuidv4(),
// userEmail: email,
// userName: `${userFirstName} ${userLastName}`,
// userContactNumber,
// userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
// userType: type,
// },
// states: 2
// })
// const userResult = await userDetails.save()
// const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
// res.status(200).json({ code: "01", result: userResult, token })
// }
if (!password) return res.status(400).json({ code: "02", message: "Password Field Required" })
if (password !== confirmPassword) return res.status(400).json({ code: "02", message: "Passwords do not match" })
const hashedPassword = await bcrypt.hash(password, 12)
const userDetails = new User({
email,
password: hashedPassword,
type,
userDetails: {
userQNumber: uuidv4(),
userEmail: email,
userName: `${userFirstName} ${userLastName}`,
userContactNumber,
userAddress: `${userAddressLine1}, ${userAddressLine2}, ${userAddressLine3}`,
}
})
const userResult = await userDetails.save()
const token = jwt.sign({ email: userResult.email, id: userResult._id }, 'test', { expiresIn: "1h" })
res.status(200).json({ code: "01", result: userResult, token })
>>>>>>> master
} catch (error) { } catch (error) {
res.status(500).json({ code: "00", message: "Something went wrong" }) res.status(500).json({ code: "00", message: "Something went wrong" })
......
This diff is collapsed.
...@@ -36,6 +36,10 @@ const userSchema = mongoose.Schema({ ...@@ -36,6 +36,10 @@ const userSchema = mongoose.Schema({
}, },
userType: { userType: {
type: String, type: String,
<<<<<<< HEAD
=======
default: "N/A",
>>>>>>> master
required: true required: true
}, },
}, },
......
...@@ -5,8 +5,13 @@ import express from "express"; ...@@ -5,8 +5,13 @@ import express from "express";
import mongoose from "mongoose"; import mongoose from "mongoose";
//import routes //import routes
<<<<<<< HEAD
import userRoutes from "./routes/user.routes.js"; import userRoutes from "./routes/user.routes.js";
import translateRoutes from "./routes/translate.routes.js"; import translateRoutes from "./routes/translate.routes.js";
=======
import translateRoutes from "./routes/translate.routes.js";
import userRoutes from "./routes/user.routes.js";
>>>>>>> master
dotenv.config(); dotenv.config();
const app = express(); const app = express();
......
...@@ -16,9 +16,12 @@ ...@@ -16,9 +16,12 @@
files/* files/*
!files/ !files/
<<<<<<< HEAD
*.pyc *.pyc
*~ *~
*.swp *.swp
=======
>>>>>>> master
# Created by https://www.toptal.com/developers/gitignore/api/python # Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python # Edit at https://www.toptal.com/developers/gitignore?templates=python
......
<<<<<<< HEAD
from fastapi import APIRouter, File, HTTPException,UploadFile from fastapi import APIRouter, File, HTTPException,UploadFile
from pydantic import BaseModel from pydantic import BaseModel
import tensorflow as tf import tensorflow as tf
from core.logger import setup_logger from core.logger import setup_logger
=======
import base64
import os
import cv2
from fastapi import APIRouter, File, HTTPException,UploadFile
import numpy as np
from pydantic import BaseModel
import tensorflow as tf
from core import setup_logger
>>>>>>> master
from services.translate_service import SignLanguagePredictionService from services.translate_service import SignLanguagePredictionService
from utils import mappings from utils import mappings
......
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment