Commit 55d24997 authored by Karunarathna K.M.D.Y.K's avatar Karunarathna K.M.D.Y.K

Merge branch 'master' into IT19975382

parents fd7c6290 069ed7eb
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
{"cells":[{"cell_type":"markdown","metadata":{"id":"Y2jOdgL3j-o8"},"source":["# 🟨 Getting the Data"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"lDXyIngTMaXf","colab":{"base_uri":"https://localhost:8080/"},"outputId":"0367ec34-3f2b-4efc-ca93-82ae82f77f33","executionInfo":{"status":"ok","timestamp":1681147605246,"user_tz":-330,"elapsed":61704,"user":{"displayName":"Sanduni Rathnayake","userId":"14630985962646048848"}}},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"t-ZDw43fiy-1"},"outputs":[],"source":["data_location = 'content/drive/MyDrive/research/Okra_diseases.zip'"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"U-MmiQnjF9gG","colab":{"base_uri":"https://localhost:8080/","height":356},"executionInfo":{"status":"error","timestamp":1681147955614,"user_tz":-330,"elapsed":1065,"user":{"displayName":"Sanduni Rathnayake","userId":"14630985962646048848"}},"outputId":"2e08b354-99cc-4ed5-a3c9-3be40dd1fe23"},"outputs":[{"output_type":"error","ename":"FileNotFoundError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)","\u001b[0;32m<ipython-input-5-2a9895ce9af5>\u001b[0m in \u001b[0;36m<cell line: 3>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mzipfile\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;31m#unzip our data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mzip_ref\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mzipfile\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mZipFile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata_location\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"r\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0mzip_ref\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mextractall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mzip_ref\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/lib/python3.9/zipfile.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, file, mode, compression, allowZip64, compresslevel, strict_timestamps)\u001b[0m\n\u001b[1;32m 1246\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1247\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1248\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilemode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1249\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mOSError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1250\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfilemode\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mmodeDict\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'content/drive/MyDrive/research/Okra_diseases.zip'"]}],"source":["import zipfile\n","#unzip our data\n","zip_ref = zipfile.ZipFile(data_location, \"r\")\n","zip_ref.extractall()\n","zip_ref.close()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"_YdAEKT6GSoX"},"outputs":[],"source":["import os\n","for dirpath , dirnames , filenames in os.walk(\"Okra_diseases\"):\n"," print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"gLo3FV6OGVP0","colab":{"base_uri":"https://localhost:8080/"},"outputId":"36736836-f814-4e7e-e879-fd0272e96f27","executionInfo":{"status":"ok","timestamp":1681148090040,"user_tz":-330,"elapsed":630,"user":{"displayName":"Sanduni Rathnayake","userId":"14630985962646048848"}}},"outputs":[{"output_type":"stream","name":"stdout","text":["[]\n"]}],"source":["train_dir = \"Okra_diseases/Training\"\n","test_dir = \"Okra_diseases/Testing\"\n","\n","#lets get the class names\n","import pathlib\n","import numpy as np\n","\n","data_dir = pathlib.Path(train_dir)\n","class_names = np.array(sorted([item.name for item in data_dir.glob('*')]))\n","print(class_names)"]},{"cell_type":"markdown","metadata":{"id":"HvDEsB_fj7_7"},"source":["# 🟨 Data Visualization"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"vVCpvSuKlGG1"},"outputs":[],"source":["import matplotlib.pyplot as plt\n","import matplotlib.image as mpimg\n","import random\n","import os\n","\n","def view_random_image(target_dir , target_class):\n"," target_folder = target_dir+\"/\"+target_class\n"," random_image = random.sample(os.listdir(target_folder) ,1)\n","\n"," img = mpimg.imread(target_folder+\"/\"+random_image[0])\n"," plt.imshow(img)\n"," plt.title(target_class)\n"," plt.axis(\"off\")\n","\n"," print(f\"Image Shape:{img.shape}\") \n","\n"," return img"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":321},"id":"tVx10O9CGXn2","outputId":"ef6b2816-6ea2-4382-d1a2-b7c96edac192","executionInfo":{"status":"error","timestamp":1681148102389,"user_tz":-330,"elapsed":875,"user":{"displayName":"Sanduni Rathnayake","userId":"14630985962646048848"}}},"outputs":[{"output_type":"error","ename":"IndexError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)","\u001b[0;32m<ipython-input-9-c8fbca14f51a>\u001b[0m in \u001b[0;36m<cell line: 2>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mrandom\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m img = view_random_image(target_dir=train_dir , \n\u001b[0;32m----> 3\u001b[0;31m target_class = random.choice(class_names))\n\u001b[0m","\u001b[0;32m/usr/lib/python3.9/random.py\u001b[0m in \u001b[0;36mchoice\u001b[0;34m(self, seq)\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0;34m\"\"\"Choose a random element from a non-empty sequence.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[0;31m# raises IndexError if seq is empty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 346\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mseq\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_randbelow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mseq\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 347\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 348\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mshuffle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrandom\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mIndexError\u001b[0m: index 0 is out of bounds for axis 0 with size 0"]}],"source":["import random\n","img = view_random_image(target_dir=train_dir , \n"," target_class = random.choice(class_names))"]},{"cell_type":"markdown","metadata":{"id":"RFJbEeyUlUgn"},"source":["# 🟨 DATA Preprocessing"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"8uTOAKV80c_S"},"outputs":[],"source":["def to_grayscale_then_rgb(image):\n"," image = tf.image.rgb_to_grayscale(image)\n"," image = tf.image.grayscale_to_rgb(image)\n"," return image"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"oIC5RKBhG-t-"},"outputs":[],"source":["from tensorflow.keras.preprocessing.image import ImageDataGenerator\n","\n","#Creating Image Data Generators for Training Data with augmentation\n","train_data_gen = ImageDataGenerator(rescale=1/255.,\n"," #preprocessing_function=to_grayscale_then_rgb,\n"," rotation_range = 0.2,\n"," shear_range = 0.2,\n"," zoom_range = 0.2,\n"," width_shift_range=0.3,\n"," height_shift_range = 0.3,\n"," horizontal_flip= True)\n","\n","#Create ImageDatagenerator for testing data\n","test_data_gen = ImageDataGenerator(rescale=1/255. \n"," #preprocessing_function=to_grayscale_then_rgb\n"," )\n","\n","#Import and Transform/pre process the data\n","train_data_multi = train_data_gen.flow_from_directory(train_dir,\n"," target_size = (448,448),\n"," batch_size = 16,\n"," class_mode = 'categorical',\n"," shuffle = True)\n","test_data_multi = test_data_gen.flow_from_directory(test_dir,\n"," target_size = (448, 448),\n"," batch_size = 16,\n"," class_mode = 'categorical', #This will gives us one-hot encoded data\n"," shuffle = True)"]},{"cell_type":"markdown","source":["#TransferLoearning Models\n"],"metadata":{"id":"z_3CSlVYgcU7"}},{"cell_type":"code","source":["efficientnetb7_url = \"https://tfhub.dev/tensorflow/efficientnet/b7/feature-vector/1\"\n","resnet_v2_50_url = \"https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5\"\n","inception_v3_url = \"https://tfhub.dev/google/tf2-preview/inception_v3/feature_vector/4\"\n","resnet_152_v2_url = \"https://tfhub.dev/google/imagenet/resnet_v2_152/feature_vector/5\"\n","mobileNetv3_url = \"https://tfhub.dev/google/imagenet/mobilenet_v1_025_224/feature_vector/5\"\n"],"metadata":{"id":"UMfVmghAgha4"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import tensorflow as tf\n","import tensorflow_hub as hub\n","from tensorflow.keras import layers\n","def create_model(model_url , num_classes=7):\n","\n"," feature_extractor_layer = hub.KerasLayer(model_url,\n"," trainable = False, #freeze the already learned patterns \n"," name = \"feature_extraction_layer\",\n"," input_shape = (448, 448,3)) \n"," #Create our own model\n"," model = tf.keras.Sequential([\n"," feature_extractor_layer,\n"," layers.Dense(num_classes , activation=\"softmax\" , name=\"output_layer\")\n"," ])\n","\n"," return model"],"metadata":{"id":"cO9abOkFgnNs"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["# 01 - efficientnetb7_"],"metadata":{"id":"rX3XVcMpgoPE"}},{"cell_type":"code","source":["efficientnetb7_model = create_model(efficientnetb7_url , \n"," num_classes = 2)\n","efficientnetb7_model.compile(loss='categorical_crossentropy',\n"," optimizer = tf.keras.optimizers.Adam(),\n"," metrics=[\"accuracy\"])\n","\n","efficientnetb7_hist = efficientnetb7_model.fit(train_data_multi,\n"," epochs=5,\n"," steps_per_epoch=len(train_data_multi),\n"," validation_data = test_data_multi,\n"," validation_steps = len(test_data_multi)\n"," )"],"metadata":{"id":"HzB5iBCig11l"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["efficientnetb7_model.save('/content/drive/MyDrive/research/private/save_models/Okra_diseases_efficientnetb7.h5')"],"metadata":{"id":"eTHdQekBuSoD"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["# 02 - resnet_v2_50"],"metadata":{"id":"-zsedWS-u5VT"}},{"cell_type":"code","source":["resnet_v2_50_model = create_model(resnet_v2_50_url , \n"," num_classes = 2)\n","resnet_v2_50_model.compile(loss='categorical_crossentropy',\n"," optimizer = tf.keras.optimizers.Adam(),\n"," metrics=[\"accuracy\"])\n","\n","resnet_v2_50_hist = resnet_v2_50_model.fit(train_data_multi,\n"," epochs=1,\n"," steps_per_epoch=len(train_data_multi),\n"," validation_data = test_data_multi,\n"," validation_steps = len(test_data_multi)\n"," )"],"metadata":{"id":"lqYx2WVNvAh6"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["resnet_v2_50_model.save('/content/drive/MyDrive/research/private/save_models/Okra_diseases_resnet_v2_50.h5')"],"metadata":{"id":"mc0hnm08x0-k"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["# 03 - inception_v3"],"metadata":{"id":"lteOh74Hxcnr"}},{"cell_type":"code","source":["inception_v3_model = create_model(inception_v3_url , \n"," num_classes = 2)\n","inception_v3_model.compile(loss='categorical_crossentropy',\n"," optimizer = tf.keras.optimizers.Adam(),\n"," metrics=[\"accuracy\"])\n","\n","inception_v3_hist = inception_v3_model.fit(train_data_multi,\n"," epochs=1,\n"," steps_per_epoch=len(train_data_multi),\n"," validation_data = test_data_multi,\n"," validation_steps = len(test_data_multi)\n"," )"],"metadata":{"id":"45K34_uzxf3C"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["inception_v3_model.save('/content/drive/MyDrive/research/private/save_models/Okra_diseases_inception_v3.h5')"],"metadata":{"id":"5SxvrQ6Txlxo"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["# 04 - resnet_152_v2 "],"metadata":{"id":"GbyS-F1NzXLb"}},{"cell_type":"code","source":["resnet_152_v2_model = create_model(resnet_152_v2_url , \n"," num_classes = 2)\n","resnet_152_v2_model.compile(loss='categorical_crossentropy',\n"," optimizer = tf.keras.optimizers.Adam(),\n"," metrics=[\"accuracy\"])\n","resnet_152_v2_hist = resnet_152_v2_model.fit(train_data_multi,\n"," epochs=1,\n"," steps_per_epoch=len(train_data_multi),\n"," validation_data = test_data_multi,\n"," validation_steps = len(test_data_multi)\n"," )"],"metadata":{"id":"v-AOU2_4zbdy"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["resnet_152_v2_model.save('/content/drive/MyDrive/research/private/save_models/Okra_diseases_resnet_152_v2.h5')"],"metadata":{"id":"BjfGnetqzcza"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["# 05 - Mobile Net"],"metadata":{"id":"VL76DAJATZPz"}},{"cell_type":"code","source":["mobileNetv3_model = create_model(mobileNetv3_url , \n"," num_classes = 2)\n","mobileNetv3_model.compile(loss='categorical_crossentropy',\n"," optimizer = tf.keras.optimizers.Adam(),\n"," metrics=[\"accuracy\"])\n","\n","mobileNetv3_hist = mobileNetv3_model.fit(train_data_multi,\n"," epochs=1,\n"," steps_per_epoch=len(train_data_multi),\n"," validation_data = test_data_multi,\n"," validation_steps = len(test_data_multi)\n"," )"],"metadata":{"id":"pZrZ-ekXT4gU"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["#PLOTS"],"metadata":{"id":"dd5VJMBEr-iq"}},{"cell_type":"code","source":["def plot_loss_curve(history):\n"," '''\n"," Return separate loss curves for training and validation metrics\n"," '''\n"," loss = history.history[\"loss\"]\n"," val_loss = history.history[\"val_loss\"]\n","\n"," accuracy = history.history[\"accuracy\"]\n"," val_accuracy = history.history[\"val_accuracy\"]\n","\n"," #get the number of epochs that we run for\n"," epochs = range(len(history.history[\"loss\"]))\n","\n"," #Plot the lost\n"," plt.plot(epochs , loss , label=\"Training Loss\")\n"," plt.plot(epochs , val_loss , label=\"Validation Loss\")\n"," plt.title(\"Loss\")\n"," plt.xlabel(\"Epochs\")\n"," plt.legend()\n","\n"," #Plot the accuracy\n"," plt.figure()\n"," plt.plot(epochs , accuracy , label=\"Training accuracy\")\n"," plt.plot(epochs , val_accuracy , label=\"Validation accuracy\")\n"," plt.title(\"accuracy\")\n"," plt.xlabel(\"Epochs\")\n"," plt.legend()\n"],"metadata":{"id":"bb6t1J5NsB5-"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["plot_loss_curve(mobileNetv3_hist)"],"metadata":{"id":"fQ97QRodq06f"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"gx3orjVQMLLt"},"source":["#Make Predictions"]},{"cell_type":"code","source":["#Create a function to improt an image and resize it to be able to used with our model\n","def load_and_prep_image(filename , img_shape=224):\n"," \"\"\"\n"," Reads an image from file name , turns it into a tensor and reshapes it to (img_shape , color_channel\n"," \"\"\"\n"," print('FILe Name type - ', type(filename))\n"," #Read in the image\n"," img = tf.io.read_file(filename)\n"," print('img 1- ', type(img))\n","\n"," #Decode the read file into a tensor\n"," img = tf.image.decode_image(img)\n"," print('img 2- ', type(img))\n","\n"," #Resiz the Image\n"," img = tf.image.resize(img , size=[img_shape , img_shape])\n","\n"," #Re scale the image - get all values between 0 and 255\n"," img = img/255.\n","\n"," return img\n"],"metadata":{"id":"0AiHHV3BhBjK"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def pred_and_plot(model, filename , class_names = class_names):\n"," \"\"\"\n"," Import an image located at filename , makes a prediction with model,\n"," and plot the image with the prediced class as the title\n"," \"\"\"\n"," #Import the target image and pre-process\n"," img = load_and_prep_image(filename)\n"," print('FILe Name type - ', type(filename))\n","\n"," #Make a Prediction\n"," pred = model.predict(tf.expand_dims(img , axis=0))\n","\n"," #Get the predicted class\n"," pred_class = class_names[np.argmax(tf.round(pred))]\n","\n"," #Plot the image and the predicted class\n"," plt.imshow(img)\n"," plt.title(f\"Prediction :{pred_class}\")\n"," plt.axis(False)\n","\n"," "],"metadata":{"id":"NJWZDQ58hV_T"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["image_path = \"/content/Leaf_Identification/Testing/Corn/RS_Rust 1570.JPG\"\n","pred_and_plot(resnet_model , image_path)"],"metadata":{"id":"OyOk8CB_hbDi"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["image_path2 = \"/content/Leaf_Identification/Testing/Cassava/1616768383552.jpg\"\n","pred_and_plot(resnet_model , image_path2)"],"metadata":{"id":"L0aoGSOX_zkG"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["image_path3 = \"/content/Leaf_Identification/Testing/Beans/angular_leaf_spot_23.jpg\"\n","pred_and_plot(resnet_model , image_path3)"],"metadata":{"id":"13h_bFKo_9SN"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["image_path4 = \"/content/Leaf_Identification/Testing/Okra/20210607_164357.jpg\"\n","pred_and_plot(resnet_model , image_path4)"],"metadata":{"id":"yIR2nFWnAGDo"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["image_path5 = \"/content/Leaf_Identification/Testing/Tomato/0ff228a7-60f4-47f0-b1a3-96969a63105f___GCREC_Bact.Sp 6056.JPG\"\n","pred_and_plot(resnet_model , image_path5)"],"metadata":{"id":"rZBWANZuAUPg"},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"provenance":[]},"gpuClass":"standard","kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
\ No newline at end of file
IC
\ No newline at end of file
changed
\ No newline at end of file
# -*- coding: utf-8 -*-
"""deep_transfer_cnn_classifier.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hZV9IeMob8g41niThWxjhhyHh37jOY40
"""
class DeepCNNClassifier:
def __init__(self , dataset_link , model_name):
self.dataset_link = dataset_link
self.model_name = model_name
self.number_of_classes = 2 #Default
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def extract_dataset(self):
import zipfile
import os
destination_path = '/content'
zip_ref = zipfile.ZipFile(self.dataset_link, "r")
extracted = zip_ref.namelist()
zip_ref.extractall(destination_path)
zip_ref.close()
extracted_file = os.path.join(destination_path, extracted[0])
print("=========== Dataset Extracted to - {}===========".format(extracted_file))
self.extracted_file_path = extracted_file
#return extracted_file
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def set_details(self , train_dir , test_dir , transfer_model_url = None , image_height= 224 , image_width = 224 , isGrayScale = False):
self.train_dir = train_dir
self.test_dir = test_dir
self.transfer_model_url = transfer_model_url
self.image_height =image_height
self.image_width = image_width
self.isGrayScale = isGrayScale
if(self.isGrayScale):
self.num_color_channels = 1
else:
self.num_color_channels = 3
print("=========== Train/Test Directories Configured===========")
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def show_dataset_summary(self):
import os
for dirpath , dirnames , filenames in os.walk(self.extracted_file_path):
print(f"-->{len(dirnames)} directories AND {len(filenames)} images in '{dirpath}'.")
#----------------------------------------------------WILL_USE_BY_ANOTHER_FUNCTION-----------------------------------------------------------------------------
def get_class_names(self):
import pathlib
import numpy as np
data_dir = pathlib.Path(self.train_dir)
class_names = np.array(sorted([item.name for item in data_dir.glob('*')]))
print("=========== Class Names Obtained - {} ".format(class_names))
self.class_names = class_names
self.number_of_classes = len(class_names)
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def view_random_image(self):
self.get_class_names()
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
import os
target_dir = self.train_dir
target_class = random.choice(self.class_names)
target_folder = target_dir+"/"+target_class
random_image = random.sample(os.listdir(target_folder) ,1)
img = mpimg.imread(target_folder+"/"+random_image[0])
plt.imshow(img)
plt.title(target_class)
plt.axis("off")
print(f"Image Shape:{img.shape}")
#----------------------------------------------------WILL_USE_BY_ANOTHER_FUNCTION-----------------------------------------------------------------------------
def preprocess_dataset(self):
print("=========== Data Pre-Processing Started ===========")
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
PRE_RPOCESS_FUNC = None
CLASS_MODE = 'binary'
def to_grayscale_then_rgb(image):
image = tf.image.rgb_to_grayscale(image)
image = tf.image.grayscale_to_rgb(image)
return image
if(self.isGrayScale):
print("=========== Training will use GrayScale Images !!!")
PRE_RPOCESS_FUNC = to_grayscale_then_rgb
if(self.number_of_classes >2):
CLASS_MODE = 'categorical'
train_data_gen = ImageDataGenerator(rescale=1/255.,
preprocessing_function= PRE_RPOCESS_FUNC,
rotation_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
width_shift_range=0.3,
height_shift_range = 0.3,
horizontal_flip= True)
test_data_gen = ImageDataGenerator(rescale=1/255. ,
preprocessing_function=PRE_RPOCESS_FUNC)
#Import and Transform/pre process the data
train_data_ = train_data_gen.flow_from_directory(self.train_dir,
target_size = (self.image_height,self.image_width),
batch_size = 32,
class_mode = CLASS_MODE,
shuffle = True)
test_data_ = test_data_gen.flow_from_directory(self.test_dir,
target_size = (self.image_height, self.image_width),
batch_size = 32,
class_mode = CLASS_MODE, #This will gives us one-hot encoded data
shuffle = True)
print("=========== Data Pre-Processing Completed ===========")
return (train_data_ , test_data_)
#----------------------------------------------------WILL_USE_BY_ANOTHER_FUNCTION-----------------------------------------------------------------------------
def create_model(self):
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
#Download the pretrained model and save it as a keras layer
feature_extractor_layer = hub.KerasLayer(self.transfer_model_url,
trainable = False, #freeze the already learned patterns
name = "feature_extraction_layer",
input_shape = (self.image_height, self.image_width, self.num_color_channels))
self.activation_func = 'sigmoid'
if(self.number_of_classes > 2):
self.activation_func = 'softmax'
model = tf.keras.Sequential([
feature_extractor_layer,
layers.Dense(self.number_of_classes , activation= self.activation_func , name="output_layer")
])
print("=========== Model is Created ==========")
return model
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def start_training(self , num_epochs = 5):
import tensorflow as tf
train_data_ , test_data_ = self.preprocess_dataset()
cnn_model = self.create_model()
self.loss_function = 'binary_crossentropy'
if(self.number_of_classes > 2):
self.loss_function = 'categorical_crossentropy'
cnn_model.compile(loss=self.loss_function,
optimizer = tf.keras.optimizers.Adam(),
metrics=["accuracy"])
print("=========== Model Compiled SuccessFully ==========")
model_hist = cnn_model.fit(train_data_,
epochs=num_epochs,
steps_per_epoch=len(train_data_),
validation_data = test_data_,
validation_steps = len(test_data_))
print("=========== Model Trained SuccessFully ==========")
self.trained_model = cnn_model
self.model_train_history = model_hist
return cnn_model
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def save_model(self, save_to_path = '/content'):
model_name = str(self.model_name)+".h5"
model = self.trained_model
model.save(model_name)
print("=========== Model Saved to --> {}".format(model_name))
#---------------------------------------------------SHOULD_CALL_BY_USER / Function------------------------------------------------------------------------------
def plot_loss_curves(self, ):
import matplotlib.pyplot as plt
history = self.model_train_history
loss = history.history["loss"]
val_loss = history.history["val_loss"]
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
#get the number of epochs that we run for
epochs = range(len(history.history["loss"]))
#Plot the lost
plt.plot(epochs , loss , label="Training Loss")
plt.plot(epochs , val_loss , label="Validation Loss")
plt.title("Loss")
plt.xlabel("Epochs")
plt.legend()
#Plot the accuracy
plt.figure()
plt.plot(epochs , accuracy , label="Training accuracy")
plt.plot(epochs , val_accuracy , label="Validation accuracy")
plt.title("accuracy")
plt.xlabel("Epochs")
plt.legend()
#----------------------------------------------------WILL_USE_BY_ANOTHER_FUNCTION-----------------------------------------------------------------------------
def load_and_prep_image(self, filename):
import tensorflow as tf
img = tf.io.read_file(filename)
img = tf.image.decode_image(img)
#Resiz the Image
img = tf.image.resize(img , size=[self.image_height , self.image_width])
#Re scale the image - get all values between 0 and 255
img = img/255.
return img
#---------------------------------------------------SHOULD_CALL_BY_USER------------------------------------------------------------------------------
def pred_and_plot(self, filename):
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
model = self.trained_model
class_names = self.class_names
img = self.load_and_prep_image(filename)
pred = model.predict(tf.expand_dims(img , axis=0))
pred_class = class_names[np.argmax(tf.round(pred))]
#Plot the image and the predicted class
plt.imshow(img)
plt.title(f"Prediction :{pred_class}")
plt.axis(False)
class ErrorModel {
static String errorMessage = 'Something went wrong , please try again';
}
import 'package:flutter/gestures.dart';
import 'package:flutter/material.dart';
import 'package:govimithura/constants/user_types.dart';
import 'package:govimithura/models/error_model.dart';
import 'package:govimithura/providers/authentication_provider.dart';
import 'package:govimithura/screens/admin_home.dart';
import 'package:govimithura/screens/forget_password.dart';
import 'package:govimithura/screens/register.dart';
import 'package:govimithura/utils/loading_overlay.dart';
import 'package:govimithura/utils/screen_size.dart';
import 'package:govimithura/utils/utils.dart';
import 'package:govimithura/widgets/utils/buttons/custom_elevated_button.dart';
import 'package:govimithura/widgets/utils/common_widget.dart';
import 'package:govimithura/widgets/utils/text_fields/primary_textfield.dart';
import 'package:provider/provider.dart';
import 'home.dart';
class LoginScreen extends StatefulWidget {
const LoginScreen({super.key});
@override
State<LoginScreen> createState() => _LoginScreenState();
}
class _LoginScreenState extends State<LoginScreen> {
late AuthenticationProvider pAuthentication;
GlobalKey<FormState> loginFormKey = GlobalKey<FormState>();
@override
void initState() {
super.initState();
pAuthentication =
Provider.of<AuthenticationProvider>(context, listen: false);
}
@override
Widget build(BuildContext context) {
return Scaffold(
body: SingleChildScrollView(
child: SizedBox(
height: ScreenSize.height,
width: ScreenSize.width,
child: Form(
key: loginFormKey,
child: Column(
mainAxisSize: MainAxisSize.max,
mainAxisAlignment: MainAxisAlignment.center,
children: [
Padding(
padding: const EdgeInsets.symmetric(horizontal: 20),
child: Column(
children: [
const LogoText(),
spacingWidget(30, SpaceDirection.vertical),
Text(
'Welcome again to Govimithura',
style: Theme.of(context).textTheme.titleLarge,
),
spacingWidget(20, SpaceDirection.vertical),
PrimaryTextField(
hintText: "Enter your email",
validator: (value) {
if (value == null || value.isEmpty) {
return 'Required';
} else if (!(value.contains("@") &&
value.contains("."))) {
return 'Enter valid Email Address';
}
return null;
},
onChanged: (value) {
pAuthentication.setEmail(value.trim());
},
),
spacingWidget(20, SpaceDirection.vertical),
PrimaryTextField(
isPassword: true,
hintText: "Enter password",
validator: (value) {
if (value == null || value.isEmpty) {
return 'Required';
}
return null;
},
onChanged: (value) {
pAuthentication.setPassWord(value.trim());
},
),
spacingWidget(10, SpaceDirection.vertical),
GestureDetector(
onTap: () {
Navigator.push(
context,
MaterialPageRoute(
builder: (_) => const ForgetPasswordScreen(),
),
);
},
child: const Align(
alignment: Alignment.centerRight,
child: Text(
'Forgot Password?',
style: TextStyle(
fontSize: 15,
),
textAlign: TextAlign.end,
),
),
),
spacingWidget(20, SpaceDirection.vertical),
CustomElevatedButton(
text: "Login",
onPressed: () async {
if (loginFormKey.currentState!.validate()) {
bool success = await LoadingOverlay.of(context)
.during(pAuthentication.login());
if (success) {
if (mounted) {
Navigator.pushReplacement(
context,
MaterialPageRoute(
builder: (_) =>
pAuthentication.authModel.userType ==
UserType.user
? const Home()
: const AdminHome(),
),
);
}
} else {
if (mounted) {
Utils.showDialogBox(
context, ErrorModel.errorMessage);
}
}
}
},
),
spacingWidget(20, SpaceDirection.vertical),
RichText(
text: TextSpan(
text: "Don't have an account? ",
style: Theme.of(context).textTheme.bodyLarge,
children: [
TextSpan(
text: "Sign Up",
style: Theme.of(context)
.textTheme
.bodyLarge
?.copyWith(
color: Theme.of(context).primaryColor,
),
recognizer: TapGestureRecognizer()
..onTap = () {
Navigator.pushReplacement(
context,
MaterialPageRoute(
builder: (context) =>
const RegisterScreen(),
),
);
},
),
],
),
)
],
),
),
],
),
),
),
),
);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment