Commit edfa7401 authored by Paranagama R.P.S.D.'s avatar Paranagama R.P.S.D.

feat : Updated notebook

parent 9ab80463
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 6,
"id": "ade37944", "id": "ade37944",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 7,
"id": "16176bf6", "id": "16176bf6",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
" 'Uhh']" " 'Uhh']"
] ]
}, },
"execution_count": 3, "execution_count": 7,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 8,
"id": "8f7b1301", "id": "8f7b1301",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -83,7 +83,7 @@ ...@@ -83,7 +83,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 9,
"id": "c9034cbe", "id": "c9034cbe",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -113,7 +113,7 @@ ...@@ -113,7 +113,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": null,
"id": "7adb379e", "id": "7adb379e",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": null,
"id": "d44f7806", "id": "d44f7806",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -144,49 +144,12 @@ ...@@ -144,49 +144,12 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": null,
"id": "ff4f0d06", "id": "ff4f0d06",
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
"outputs": [ "outputs": [],
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 3.0344 - accuracy: 0.0708 - val_loss: 2.4118 - val_accuracy: 0.1034\n",
"Epoch 2/10\n",
"4/4 [==============================] - 4s 1s/step - loss: 2.3133 - accuracy: 0.3274 - val_loss: 1.6620 - val_accuracy: 0.9310\n",
"Epoch 3/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 1.2560 - accuracy: 0.9558 - val_loss: 0.4894 - val_accuracy: 0.9655\n",
"Epoch 4/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.2415 - accuracy: 0.9912 - val_loss: 0.0362 - val_accuracy: 1.0000\n",
"Epoch 5/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.0340 - accuracy: 0.9912 - val_loss: 0.0024 - val_accuracy: 1.0000\n",
"Epoch 6/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.0127 - val_accuracy: 1.0000\n",
"Epoch 7/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 3.6882e-05 - val_accuracy: 1.0000\n",
"Epoch 8/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 9.9268e-05 - accuracy: 1.0000 - val_loss: 2.7212e-06 - val_accuracy: 1.0000\n",
"Epoch 9/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 5.2195e-05 - accuracy: 1.0000 - val_loss: 6.4126e-07 - val_accuracy: 1.0000\n",
"Epoch 10/10\n",
"4/4 [==============================] - 5s 1s/step - loss: 1.3251e-05 - accuracy: 1.0000 - val_loss: 2.7130e-07 - val_accuracy: 1.0000\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x2d653970160>"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n", "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
"model.fit(train_data, train_labels, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(val_data, val_labels))\n" "model.fit(train_data, train_labels, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(val_data, val_labels))\n"
...@@ -194,40 +157,18 @@ ...@@ -194,40 +157,18 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": null,
"id": "61d6a8d8", "id": "61d6a8d8",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op while saving (showing 3 of 3). These functions will not be directly callable after loading.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:tensorflow:Assets written to: ./models/model\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:tensorflow:Assets written to: ./models/model\\assets\n"
]
}
],
"source": [ "source": [
"model.save('./models/model')" "model.save('./models/model')"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": null,
"id": "dc610fdb", "id": "fdc9bfe6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
...@@ -255,22 +196,10 @@ ...@@ -255,22 +196,10 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": null,
"id": "6b6d20d2", "id": "297e3e3c",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"ename": "NameError",
"evalue": "name 'test_data' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_17676\\3131021014.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpredictions\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest_data\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[0mpredicted_classes\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0margmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpredictions\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'test_data' is not defined"
]
}
],
"source": [ "source": [
"predictions = model.predict(test_data)\n", "predictions = model.predict(test_data)\n",
"predicted_classes = np.argmax(predictions, axis=1)\n", "predicted_classes = np.argmax(predictions, axis=1)\n",
...@@ -282,7 +211,7 @@ ...@@ -282,7 +211,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "2bd77ac5", "id": "e22211b0",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
......
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"id": "91b96b6e",
"metadata": {},
"source": [
"### Importing Required Libraries\n"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": 6,
"id": "ade37944", "id": "ade37944",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -11,13 +19,22 @@ ...@@ -11,13 +19,22 @@
"import os\n", "import os\n",
"import cv2\n", "import cv2\n",
"import numpy as np\n", "import numpy as np\n",
"from sklearn.model_selection import train_test_split\n", "from sklearn.model_selection import train_test_split"
"import mediapipe as mp" ]
},
{
"cell_type": "markdown",
"id": "a75d6b34",
"metadata": {},
"source": [
"### Define Constants\n",
"\n",
"In this section, I define some constants used throughout the code. IMG_SIZE represents the desired size of the input images, BATCH_SIZE determines the number of samples processed in each training batch, and EPOCHS specifies the number of times the model will iterate over the entire dataset during training. CLASSES is a list of class names extracted from the directory structure, and NUM_CLASSES represents the total number of classes in the dataset."
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 13, "execution_count": 7,
"id": "16176bf6", "id": "16176bf6",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -40,7 +57,7 @@ ...@@ -40,7 +57,7 @@
" 'Uhh']" " 'Uhh']"
] ]
}, },
"execution_count": 13, "execution_count": 7,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
...@@ -57,7 +74,7 @@ ...@@ -57,7 +74,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14, "execution_count": 8,
"id": "8f7b1301", "id": "8f7b1301",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -81,9 +98,19 @@ ...@@ -81,9 +98,19 @@
"}" "}"
] ]
}, },
{
"cell_type": "markdown",
"id": "3d2af75d",
"metadata": {},
"source": [
"### Load Dataset\n",
"\n",
"In this section, I define a function load_dataset() to load the images and labels from the dataset directory. The function iterates over the classes and images, reads and preprocesses each image, and stores the data and corresponding labels. The dataset path is provided as an argument to the function. After loading the dataset, I split it into training and validation sets using the train_test_split() function from sklearn. The split is done with a test size of 20% and a random state of 42."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 15, "execution_count": 9,
"id": "c9034cbe", "id": "c9034cbe",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -113,7 +140,7 @@ ...@@ -113,7 +140,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 16, "execution_count": 10,
"id": "7adb379e", "id": "7adb379e",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -122,9 +149,19 @@ ...@@ -122,9 +149,19 @@
"train_data, val_data, train_labels, val_labels = train_test_split(data, labels, test_size=0.2, random_state=42)\n" "train_data, val_data, train_labels, val_labels = train_test_split(data, labels, test_size=0.2, random_state=42)\n"
] ]
}, },
{
"cell_type": "markdown",
"id": "34d79d4d",
"metadata": {},
"source": [
"### Define the Model Architecture and Compile\n",
"\n",
"In this section, I define the model architecture using the Sequential API from TensorFlow's Keras API. The model consists of a series of convolutional and pooling layers, followed by flatten and dense layers. The convolutional layers extract features from the input images, and the dense layers perform classification based on these extracted features. The model is compiled with the Adam optimizer, categorical cross-entropy loss function, and accuracy as the evaluation metric."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 17, "execution_count": 11,
"id": "d44f7806", "id": "d44f7806",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -142,9 +179,19 @@ ...@@ -142,9 +179,19 @@
"])\n" "])\n"
] ]
}, },
{
"cell_type": "markdown",
"id": "ab7b7e82",
"metadata": {},
"source": [
"### Train the Model\n",
"\n",
"In this section, I train the model using the fit() function. It takes the training data and labels as inputs and trains the model for the specified number of epochs and batch size. The validation data and labels are provided for evaluation during training. The model learns from the training data to minimize the defined loss function and improve its accuracy."
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 18, "execution_count": 12,
"id": "ff4f0d06", "id": "ff4f0d06",
"metadata": { "metadata": {
"scrolled": true "scrolled": true
...@@ -154,35 +201,55 @@ ...@@ -154,35 +201,55 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"Epoch 1/10\n", "Epoch 1/20\n",
"152/152 [==============================] - 217s 1s/step - loss: 0.8329 - accuracy: 0.7585 - val_loss: 0.0838 - val_accuracy: 0.9860\n", "152/152 [==============================] - 238s 2s/step - loss: 0.7102 - accuracy: 0.8011 - val_loss: 0.1194 - val_accuracy: 0.9703\n",
"Epoch 2/10\n", "Epoch 2/20\n",
"152/152 [==============================] - 205s 1s/step - loss: 0.0374 - accuracy: 0.9913 - val_loss: 0.0139 - val_accuracy: 0.9942\n", "152/152 [==============================] - 238s 2s/step - loss: 0.0302 - accuracy: 0.9926 - val_loss: 0.0199 - val_accuracy: 0.9934\n",
"Epoch 3/10\n", "Epoch 3/20\n",
"152/152 [==============================] - 212s 1s/step - loss: 0.0022 - accuracy: 0.9998 - val_loss: 0.0106 - val_accuracy: 0.9959\n", "152/152 [==============================] - 239s 2s/step - loss: 0.0124 - accuracy: 0.9963 - val_loss: 0.0118 - val_accuracy: 0.9959\n",
"Epoch 4/10\n", "Epoch 4/20\n",
"152/152 [==============================] - 211s 1s/step - loss: 0.0147 - accuracy: 0.9955 - val_loss: 0.0418 - val_accuracy: 0.9818\n", "152/152 [==============================] - 229s 2s/step - loss: 0.0066 - accuracy: 0.9971 - val_loss: 0.0149 - val_accuracy: 0.9967\n",
"Epoch 5/10\n", "Epoch 5/20\n",
"152/152 [==============================] - 205s 1s/step - loss: 0.0190 - accuracy: 0.9955 - val_loss: 0.0273 - val_accuracy: 0.9917\n", "152/152 [==============================] - 232s 2s/step - loss: 0.0108 - accuracy: 0.9973 - val_loss: 0.0066 - val_accuracy: 0.9967\n",
"Epoch 6/10\n", "Epoch 6/20\n",
"152/152 [==============================] - 205s 1s/step - loss: 0.0142 - accuracy: 0.9967 - val_loss: 0.0509 - val_accuracy: 0.9942\n", "152/152 [==============================] - 231s 2s/step - loss: 5.5893e-05 - accuracy: 1.0000 - val_loss: 0.0039 - val_accuracy: 0.9975\n",
"Epoch 7/10\n", "Epoch 7/20\n",
"152/152 [==============================] - 214s 1s/step - loss: 0.0037 - accuracy: 0.9990 - val_loss: 0.0027 - val_accuracy: 0.9992\n", "152/152 [==============================] - 232s 2s/step - loss: 2.1583e-05 - accuracy: 1.0000 - val_loss: 0.0033 - val_accuracy: 0.9975\n",
"Epoch 8/10\n", "Epoch 8/20\n",
"152/152 [==============================] - 230s 2s/step - loss: 0.0110 - accuracy: 0.9969 - val_loss: 0.0188 - val_accuracy: 0.9967\n", "152/152 [==============================] - 229s 2s/step - loss: 1.3742e-05 - accuracy: 1.0000 - val_loss: 0.0029 - val_accuracy: 0.9983\n",
"Epoch 9/10\n", "Epoch 9/20\n",
"152/152 [==============================] - 220s 1s/step - loss: 1.7629e-04 - accuracy: 1.0000 - val_loss: 0.0190 - val_accuracy: 0.9967\n", "152/152 [==============================] - 241s 2s/step - loss: 9.8952e-06 - accuracy: 1.0000 - val_loss: 0.0026 - val_accuracy: 0.9983\n",
"Epoch 10/10\n", "Epoch 10/20\n",
"152/152 [==============================] - 208s 1s/step - loss: 1.5000e-05 - accuracy: 1.0000 - val_loss: 0.0197 - val_accuracy: 0.9967\n" "152/152 [==============================] - 248s 2s/step - loss: 7.5536e-06 - accuracy: 1.0000 - val_loss: 0.0024 - val_accuracy: 0.9983\n",
"Epoch 11/20\n",
"152/152 [==============================] - 250s 2s/step - loss: 5.9049e-06 - accuracy: 1.0000 - val_loss: 0.0021 - val_accuracy: 0.9983\n",
"Epoch 12/20\n",
"152/152 [==============================] - 235s 2s/step - loss: 4.7004e-06 - accuracy: 1.0000 - val_loss: 0.0018 - val_accuracy: 0.9992\n",
"Epoch 13/20\n",
"152/152 [==============================] - 235s 2s/step - loss: 3.7410e-06 - accuracy: 1.0000 - val_loss: 0.0016 - val_accuracy: 1.0000\n",
"Epoch 14/20\n",
"152/152 [==============================] - 217s 1s/step - loss: 2.8805e-06 - accuracy: 1.0000 - val_loss: 0.0015 - val_accuracy: 1.0000\n",
"Epoch 15/20\n",
"152/152 [==============================] - 208s 1s/step - loss: 2.1801e-06 - accuracy: 1.0000 - val_loss: 0.0012 - val_accuracy: 1.0000\n",
"Epoch 16/20\n",
"152/152 [==============================] - 206s 1s/step - loss: 1.6329e-06 - accuracy: 1.0000 - val_loss: 9.0677e-04 - val_accuracy: 1.0000\n",
"Epoch 17/20\n",
"152/152 [==============================] - 203s 1s/step - loss: 1.2007e-06 - accuracy: 1.0000 - val_loss: 7.3540e-04 - val_accuracy: 1.0000\n",
"Epoch 18/20\n",
"152/152 [==============================] - 205s 1s/step - loss: 9.0502e-07 - accuracy: 1.0000 - val_loss: 6.9178e-04 - val_accuracy: 1.0000\n",
"Epoch 19/20\n",
"152/152 [==============================] - 205s 1s/step - loss: 6.9678e-07 - accuracy: 1.0000 - val_loss: 6.0417e-04 - val_accuracy: 1.0000\n",
"Epoch 20/20\n",
"152/152 [==============================] - 204s 1s/step - loss: 5.5925e-07 - accuracy: 1.0000 - val_loss: 6.0406e-04 - val_accuracy: 1.0000\n"
] ]
}, },
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"<keras.callbacks.History at 0x2d6000ebeb0>" "<keras.callbacks.History at 0x283f3ad5a90>"
] ]
}, },
"execution_count": 18, "execution_count": 12,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
...@@ -194,7 +261,7 @@ ...@@ -194,7 +261,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 19, "execution_count": 13,
"id": "61d6a8d8", "id": "61d6a8d8",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -221,12 +288,12 @@ ...@@ -221,12 +288,12 @@
} }
], ],
"source": [ "source": [
"model.save('./models/model')" "model.save('./models/model') "
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 20, "execution_count": 14,
"id": "fdc9bfe6", "id": "fdc9bfe6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
...@@ -255,7 +322,7 @@ ...@@ -255,7 +322,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 21, "execution_count": 15,
"id": "297e3e3c", "id": "297e3e3c",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -263,8 +330,8 @@ ...@@ -263,8 +330,8 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"5/5 [==============================] - 2s 297ms/step\n", "5/5 [==============================] - 2s 299ms/step\n",
"Test Accuracy: 0.9225352112676056\n" "Test Accuracy: 0.9084507042253521\n"
] ]
} }
], ],
...@@ -278,7 +345,7 @@ ...@@ -278,7 +345,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 22, "execution_count": 16,
"id": "e22211b0", "id": "e22211b0",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
...@@ -286,7 +353,7 @@ ...@@ -286,7 +353,7 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"5/5 [==============================] - 2s 299ms/step\n" "5/5 [==============================] - 2s 323ms/step\n"
] ]
} }
], ],
...@@ -301,26 +368,6 @@ ...@@ -301,26 +368,6 @@
"predictions = loaded_model.predict(test_data)" "predictions = loaded_model.predict(test_data)"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"id": "885678c5",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"img = cv2.imread('./scene00548.png')\n",
"img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
"img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))\n",
"img = np.array([img], dtype=np.float32) / 255.0\n",
"prediction = model.predict(img)\n",
"class_index = np.argmax(prediction)\n",
"class_name = CLASSES[class_index]\n",
"sinhala_letter = letter_mapping.get(class_name, 'Unknown')\n",
"print(sinhala_letter)"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "69b66fc1", "id": "69b66fc1",
......
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
files/* files/*
!files/ !files/
*.pyc
*~
*.swp
# Created by https://www.toptal.com/developers/gitignore/api/python # Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python # Edit at https://www.toptal.com/developers/gitignore?templates=python
......
import base64
import os
import cv2
from fastapi import APIRouter, File, HTTPException,UploadFile from fastapi import APIRouter, File, HTTPException,UploadFile
import numpy as np
from pydantic import BaseModel from pydantic import BaseModel
import tensorflow as tf import tensorflow as tf
from core.logger import setup_logger
from core import setup_logger
from services.translate_service import SignLanguagePredictionService from services.translate_service import SignLanguagePredictionService
from utils import mappings from utils import mappings
......
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
import logging
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Create a file handler for logging to a file
file_handler = logging.FileHandler('app.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
# Create a stream handler for logging to console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
...@@ -2,7 +2,9 @@ from fastapi import FastAPI ...@@ -2,7 +2,9 @@ from fastapi import FastAPI
from controllers import translate_controler, users_controller from controllers import translate_controler, users_controller
from fastapi.responses import RedirectResponse from fastapi.responses import RedirectResponse
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from core import setup_logger
from core.logger import setup_logger
app = FastAPI() app = FastAPI()
......
...@@ -5,10 +5,8 @@ import numpy as np ...@@ -5,10 +5,8 @@ import numpy as np
from fastapi import HTTPException, UploadFile from fastapi import HTTPException, UploadFile
from typing import Dict from typing import Dict
import tensorflow as tf from core.logger import setup_logger
from core import setup_logger
from utils import mappings
logger = setup_logger() logger = setup_logger()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment