Commit c52cb39c authored by Warnasooriya M.D.S.'s avatar Warnasooriya M.D.S.

Fixed Evaluated Model

parent b093e4e1
......@@ -9,78 +9,39 @@
"# 🟢 Getting the Data"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "KG86cI4E5fi0"
},
"source": [
"# 🟢 Visualize the Data"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "r6jZtph2HCDv"
},
"source": [
"# Model - 01"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "e_l--HlzDmE_"
},
"source": [
"# 🟢 ResNetModel"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "XfPJz4PK_czR"
},
"source": [
"# 🟢 Evaluating Models "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kwgfR8nzq50J"
"id": "6mdmCmSGKt1m"
},
"outputs": [],
"source": [
"#Plot the validation and training curves separately\n",
"def plot_loss_curve(history):\n",
" '''\n",
" Return separate loss curves for training and validation metrics\n",
" '''\n",
" loss = history.history[\"loss\"]\n",
" val_loss = history.history[\"val_loss\"]\n",
"\n",
" accuracy = history.history[\"accuracy\"]\n",
" val_accuracy = history.history[\"val_accuracy\"]\n",
"\n",
" #get the number of epochs that we run for\n",
" epochs = range(len(history.history[\"loss\"]))\n",
"\n",
" #Plot the lost\n",
" plt.plot(epochs , loss , label=\"Training Loss\")\n",
" plt.plot(epochs , val_loss , label=\"Validation Loss\")\n",
" plt.title(\"Loss\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.legend()\n",
"# Imports\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import pandas as pd\n",
"import os\n",
"import cv2\n",
"import matplotlib.pyplot as plt\n",
"\n",
" #Plot the accuracy\n",
" plt.figure()\n",
" plt.plot(epochs , accuracy , label=\"Training accuracy\")\n",
" plt.plot(epochs , val_accuracy , label=\"Validation accuracy\")\n",
" plt.title(\"accuracy\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.legend()\n"
"from sklearn.model_selection import train_test_split\n",
"from tensorflow.keras.layers import Dropout\n",
"from tensorflow.keras.layers import Flatten, BatchNormalization\n",
"from tensorflow.keras.layers import Dense, MaxPooling2D, Conv2D\n",
"from tensorflow.keras.layers import Input, Activation, Add\n",
"from tensorflow.keras.models import Model\n",
"from tensorflow.keras.regularizers import l2\n",
"from tensorflow.keras.optimizers import Adam\n",
"from tensorflow.keras.callbacks import ModelCheckpoint\n",
"from keras.models import Sequential, load_model, Model\n",
"from keras.layers import Conv2D, MaxPool2D, Dense, Dropout, BatchNormalization, Flatten, Input\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import plot_confusion_matrix\n",
"from sklearn.metrics import confusion_matrix\n",
"import seaborn as sns\n",
"import pandas as pd\n",
"from keras import backend as K"
]
},
{
......@@ -88,65 +49,48 @@
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 573
"base_uri": "https://localhost:8080/"
},
"id": "8WMvE6vEq-n8",
"outputId": "56adfdbb-d619-4501-96fd-755f64e25a25"
"id": "9xQ11-U-xDBx",
"outputId": "bfea25e2-4c9c-4c79-d0ad-5dd88c5785d3"
},
"outputs": [],
"source": [
"plot_loss_curve(resnet_hist)"
"from google.colab import drive\n",
"drive.mount('/content/drive')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6fAPtZLRqmaR"
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "AG5vw73LnjT0",
"outputId": "37d7e944-208f-483d-d02d-378ce40ca7d0"
},
"outputs": [],
"source": [
"#Import Dependencies\n",
"import tensorflow as tf\n",
"import tensorflow_hub as hub\n",
"from tensorflow.keras import layers\n",
"from tensorflow.keras.layers import Conv2D , MaxPool2D , Dense , Flatten"
"!pip install patool"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "mEfKr1tLqqiz"
},
"outputs": [],
"source": [
"def create_model(model_url , num_classes=3):\n",
"\n",
" feature_extractor_layer = hub.KerasLayer(model_url,\n",
" trainable = False, #freeze the already learned patterns \n",
" name = \"feature_extraction_layer\",\n",
" input_shape = (448, 448,3)) \n",
" #Create our own model\n",
" model = tf.keras.Sequential([\n",
" feature_extractor_layer,\n",
" \n",
" layers.Dense(num_classes , activation=\"softmax\" , name=\"output_layer\")\n",
" ])\n",
"\n",
" return model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "CF7QWBeMxXo6"
"colab": {
"base_uri": "https://localhost:8080/",
"height": 125
},
"id": "BR4VY_ypng2U",
"outputId": "1757b16a-d956-4c8c-bdfe-f23158bbe166"
},
"outputs": [],
"source": [
"resnet_url = \"https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5\""
"import patoolib\n",
"data_path = '/content/drive/MyDrive/RP_SmartFarmer/Dinuka Sandaruwan - Pest Management/DataSet/Damage Data.rar'\n",
"patoolib.extract_archive(data_path, outdir=\"/content\")"
]
},
{
......@@ -156,71 +100,14 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "QSnaA4Qbqt2b",
"outputId": "cfdfc8ab-acc9-4811-f8c7-22b08d11c22b"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/15\n",
"69/69 [==============================] - 62s 827ms/step - loss: 0.7628 - accuracy: 0.6727 - val_loss: 0.8452 - val_accuracy: 0.6351\n",
"Epoch 2/15\n",
"69/69 [==============================] - 53s 772ms/step - loss: 0.5198 - accuracy: 0.7794 - val_loss: 0.8006 - val_accuracy: 0.6419\n",
"Epoch 3/15\n",
"69/69 [==============================] - 53s 767ms/step - loss: 0.4339 - accuracy: 0.8332 - val_loss: 0.6867 - val_accuracy: 0.7095\n",
"Epoch 4/15\n",
"69/69 [==============================] - 53s 769ms/step - loss: 0.3879 - accuracy: 0.8578 - val_loss: 0.7505 - val_accuracy: 0.6892\n",
"Epoch 5/15\n",
"69/69 [==============================] - 57s 824ms/step - loss: 0.3395 - accuracy: 0.8760 - val_loss: 0.6813 - val_accuracy: 0.7365\n",
"Epoch 6/15\n",
"69/69 [==============================] - 54s 784ms/step - loss: 0.3365 - accuracy: 0.8715 - val_loss: 0.7276 - val_accuracy: 0.7095\n",
"Epoch 7/15\n",
"69/69 [==============================] - 54s 780ms/step - loss: 0.3101 - accuracy: 0.8888 - val_loss: 0.6131 - val_accuracy: 0.7703\n",
"Epoch 8/15\n",
"69/69 [==============================] - 53s 769ms/step - loss: 0.2798 - accuracy: 0.9061 - val_loss: 0.7144 - val_accuracy: 0.7297\n",
"Epoch 9/15\n",
"69/69 [==============================] - 54s 774ms/step - loss: 0.2568 - accuracy: 0.9088 - val_loss: 0.6302 - val_accuracy: 0.7838\n",
"Epoch 10/15\n",
"69/69 [==============================] - 57s 822ms/step - loss: 0.2506 - accuracy: 0.9180 - val_loss: 0.6371 - val_accuracy: 0.7770\n",
"Epoch 11/15\n",
"69/69 [==============================] - 54s 778ms/step - loss: 0.2318 - accuracy: 0.9216 - val_loss: 0.7219 - val_accuracy: 0.7432\n",
"Epoch 12/15\n",
"69/69 [==============================] - 53s 771ms/step - loss: 0.2210 - accuracy: 0.9289 - val_loss: 0.6702 - val_accuracy: 0.7770\n",
"Epoch 13/15\n",
"69/69 [==============================] - 54s 774ms/step - loss: 0.2247 - accuracy: 0.9307 - val_loss: 0.7111 - val_accuracy: 0.7703\n",
"Epoch 14/15\n",
"69/69 [==============================] - 57s 824ms/step - loss: 0.2071 - accuracy: 0.9417 - val_loss: 0.6763 - val_accuracy: 0.7770\n",
"Epoch 15/15\n",
"69/69 [==============================] - 54s 780ms/step - loss: 0.1968 - accuracy: 0.9362 - val_loss: 0.6465 - val_accuracy: 0.7973\n"
]
}
],
"source": [
"#Create Resnet Model\n",
"resnet_model = create_model(resnet_url , \n",
" num_classes = 3)\n",
"#Compile our resnet model\n",
"resnet_model.compile(loss='categorical_crossentropy',\n",
" optimizer = tf.keras.optimizers.Adam(),\n",
" metrics=[\"accuracy\"])\n",
"#Fitting the model\n",
"resnet_hist = resnet_model.fit(train_data,\n",
" epochs=15,\n",
" steps_per_epoch=len(train_data),\n",
" validation_data = test_data,\n",
" validation_steps = len(test_data)\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "B-TvvNxe6A5l"
"id": "wNxAwhXguiPY",
"outputId": "d893c9bd-80af-47e0-f0a0-b9ea5444298d"
},
"outputs": [],
"source": [
"## 🟢 Pre-process Data "
"import os\n",
"for dirpath , dirnames, filenames in os.walk(\"Damage Data\"):\n",
" print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")"
]
},
{
......@@ -230,165 +117,25 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "anWDmrMLHlHm",
"outputId": "fc45713a-ab85-4dc8-a9c3-2fd473e991b7"
"id": "95mky6a3vKfn",
"outputId": "b17d1bd5-32d0-4681-83de-203c418ea53f"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found 1097 images belonging to 3 classes.\n",
"Found 148 images belonging to 3 classes.\n"
]
}
],
"outputs": [],
"source": [
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"train_dir = \"Damage Data/Traning\"\n",
"test_dir = \"Damage Data/Testing\"\n",
"\n",
"#Normalize the data \n",
"train_datagen = ImageDataGenerator(rescale = 1/255.,\n",
" rotation_range = 0.2, #how much do you want to rotate an image\n",
" shear_range = 0.3,\n",
" zoom_range = 0.3, \n",
" width_shift_range=0.25,\n",
" height_shift_range=0.25,\n",
" horizontal_flip=True) \n",
"test_datagen = ImageDataGenerator(rescale = 1/255.)\n",
"#Loading the images from the directories\n",
"train_data = train_datagen.flow_from_directory(directory=train_dir,\n",
" target_size = (448, 448),\n",
" class_mode='categorical',\n",
" batch_size = 16,\n",
" shuffle=True)\n",
"\n",
"test_data = test_datagen.flow_from_directory(directory=test_dir,\n",
" target_size = (448, 448),\n",
" batch_size = 16,\n",
" class_mode = \"categorical\",\n",
" shuffle=True)"
"import pathlib\n",
"import numpy as np\n",
"data_dir = pathlib.Path(\"Damage Data/Traning\")\n",
"class_names = np.array(sorted([item.name for item in data_dir.glob(\"*\")]))\n",
"class_names"
]
},
{
"cell_type": "code",
"execution_count": null,
"cell_type": "markdown",
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "V1riWehswsza",
"outputId": "662c175c-5f7a-45cc-fd7b-6569f5041a50"
"id": "KG86cI4E5fi0"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.8617 - accuracy: 0.4622\n",
"Epoch 1: loss improved from inf to 1.86165, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 69s 958ms/step - loss: 1.8617 - accuracy: 0.4622 - val_loss: 3.9329 - val_accuracy: 0.3851\n",
"Epoch 2/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.5739 - accuracy: 0.5059\n",
"Epoch 2: loss improved from 1.86165 to 1.57390, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 65s 943ms/step - loss: 1.5739 - accuracy: 0.5059 - val_loss: 2.1447 - val_accuracy: 0.4324\n",
"Epoch 3/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.3094 - accuracy: 0.5397\n",
"Epoch 3: loss improved from 1.57390 to 1.30940, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 63s 907ms/step - loss: 1.3094 - accuracy: 0.5397 - val_loss: 1.2881 - val_accuracy: 0.3514\n",
"Epoch 4/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.2277 - accuracy: 0.5624\n",
"Epoch 4: loss improved from 1.30940 to 1.22773, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 64s 920ms/step - loss: 1.2277 - accuracy: 0.5624 - val_loss: 1.0698 - val_accuracy: 0.4730\n",
"Epoch 5/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.1769 - accuracy: 0.5469\n",
"Epoch 5: loss improved from 1.22773 to 1.17689, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 65s 936ms/step - loss: 1.1769 - accuracy: 0.5469 - val_loss: 1.5349 - val_accuracy: 0.3446\n",
"Epoch 6/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.0909 - accuracy: 0.5606\n",
"Epoch 6: loss improved from 1.17689 to 1.09090, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 63s 919ms/step - loss: 1.0909 - accuracy: 0.5606 - val_loss: 1.1054 - val_accuracy: 0.6014\n",
"Epoch 7/10\n",
"69/69 [==============================] - ETA: 0s - loss: 1.0655 - accuracy: 0.5679\n",
"Epoch 7: loss improved from 1.09090 to 1.06552, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 66s 962ms/step - loss: 1.0655 - accuracy: 0.5679 - val_loss: 0.9427 - val_accuracy: 0.5743\n",
"Epoch 8/10\n",
"69/69 [==============================] - ETA: 0s - loss: 0.9755 - accuracy: 0.6053\n",
"Epoch 8: loss improved from 1.06552 to 0.97551, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 64s 924ms/step - loss: 0.9755 - accuracy: 0.6053 - val_loss: 1.6016 - val_accuracy: 0.3649\n",
"Epoch 9/10\n",
"69/69 [==============================] - ETA: 0s - loss: 0.9616 - accuracy: 0.6080\n",
"Epoch 9: loss improved from 0.97551 to 0.96164, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 64s 928ms/step - loss: 0.9616 - accuracy: 0.6080 - val_loss: 1.2331 - val_accuracy: 0.6216\n",
"Epoch 10/10\n",
"69/69 [==============================] - ETA: 0s - loss: 0.9172 - accuracy: 0.6427\n",
"Epoch 10: loss improved from 0.96164 to 0.91717, saving model to /content/saved_models/pest_damages.h5\n",
"69/69 [==============================] - 67s 964ms/step - loss: 0.9172 - accuracy: 0.6427 - val_loss: 0.9896 - val_accuracy: 0.5338\n"
]
}
],
"source": [
"height = 448\n",
"width = 448\n",
"depth = 3\n",
"n_classes = 3\n",
"\n",
"\n",
"#Create the model\n",
"model = Sequential()\n",
"inputShape = (height, width, depth)\n",
"chanDim = -1\n",
"if K.image_data_format() == \"channels_first\":\n",
" inputShape = (depth, height, width)\n",
" chanDim = 1\n",
"model.add(Conv2D(32, (3, 3), padding=\"same\",input_shape=inputShape))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(3, 3)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(64, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(Conv2D(64, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(128, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(Conv2D(128, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Flatten())\n",
"model.add(Dense(1024))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization())\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(n_classes))\n",
"model.add(Activation(\"softmax\"))\n",
"\n",
"#Compile the model\n",
"model.compile(loss = tf.keras.losses.CategoricalCrossentropy(),\n",
" optimizer = tf.keras.optimizers.Adam(),\n",
" metrics = ['accuracy'])\n",
"fle_s = '/content/saved_models/pest_damages.h5'\n",
"checkpointer = ModelCheckpoint(fle_s, monitor='loss', verbose=1, save_best_only=True,\n",
" save_weights_only=False, mode='auto', save_freq='epoch')\n",
"callback_list = [checkpointer]\n",
"\n",
"model_history = model.fit(train_data,\n",
" epochs=10,\n",
" batch_size=16,\n",
" steps_per_epoch=len(train_data),\n",
" validation_data = test_data,\n",
" validation_steps = len(test_data),\n",
" callbacks=[callback_list]\n",
" )\n"
"# 🟢 Visualize the Data"
]
},
{
......@@ -460,69 +207,21 @@
]
},
{
"cell_type": "code",
"execution_count": 11,
"cell_type": "markdown",
"metadata": {
"id": "6mdmCmSGKt1m"
"id": "r6jZtph2HCDv"
},
"outputs": [],
"source": [
"# Imports\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import pandas as pd\n",
"import os\n",
"import cv2\n",
"import matplotlib.pyplot as plt\n",
"\n",
"from sklearn.model_selection import train_test_split\n",
"from tensorflow.keras.layers import Dropout\n",
"from tensorflow.keras.layers import Flatten, BatchNormalization\n",
"from tensorflow.keras.layers import Dense, MaxPooling2D, Conv2D\n",
"from tensorflow.keras.layers import Input, Activation, Add\n",
"from tensorflow.keras.models import Model\n",
"from tensorflow.keras.regularizers import l2\n",
"from tensorflow.keras.optimizers import Adam\n",
"from tensorflow.keras.callbacks import ModelCheckpoint\n",
"from keras.models import Sequential, load_model, Model\n",
"from keras.layers import Conv2D, MaxPool2D, Dense, Dropout, BatchNormalization, Flatten, Input\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import plot_confusion_matrix\n",
"from sklearn.metrics import confusion_matrix\n",
"import seaborn as sns\n",
"import pandas as pd\n",
"from keras import backend as K"
"# Model - 01"
]
},
{
"cell_type": "code",
"execution_count": 13,
"cell_type": "markdown",
"metadata": {
"id": "7zzt0QSS5h7u"
"id": "B-TvvNxe6A5l"
},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"import random\n",
"import os\n",
"\n",
"def view_random_image(target_dir , target_class):\n",
" #Setup the target directory (we will view images from here)\n",
" target_folder = target_dir+\"/\"+target_class\n",
"\n",
" #Get a random image path\n",
" random_image = random.sample(os.listdir(target_folder) ,1)\n",
"\n",
" #Read in the image and plotted using the matplotlib\n",
" img = mpimg.imread(target_folder+\"/\"+random_image[0])\n",
" plt.imshow(img)\n",
" plt.title(target_class)\n",
" plt.axis(\"off\")\n",
"\n",
" print(f\"Image Shape:{img.shape}\") #Show the shape of the image\n",
"\n",
" return img"
"## 🟢 Pre-process Data "
]
},
{
......@@ -530,27 +229,12 @@
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 191
"base_uri": "https://localhost:8080/"
},
"id": "mTkn0s0t7cPb",
"outputId": "9861f6d0-0265-46e4-8ef9-f7f4f37ec186"
"id": "anWDmrMLHlHm",
"outputId": "fc45713a-ab85-4dc8-a9c3-2fd473e991b7"
},
"outputs": [],
"source": [
"#Visualize Data\n",
"plt.figure()\n",
"plt.subplot(1 , 2 ,1)\n",
"img = view_random_image(target_dir=\"Damage Data/Traning\" , target_class=\"Thrips\")\n",
"plt.subplot(1 , 2, 2)\n",
"img = view_random_image(target_dir=\"Damage Data/Traning\" , target_class=\"Mealybug\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"train_dir = \"Damage Data/Traning\"\n",
......@@ -582,7 +266,13 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "V1riWehswsza",
"outputId": "662c175c-5f7a-45cc-fd7b-6569f5041a50"
},
"outputs": [],
"source": [
"height = 448\n",
......@@ -643,22 +333,24 @@
" validation_data = test_data,\n",
" validation_steps = len(test_data),\n",
" callbacks=[callback_list]\n",
" )"
" )\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"cell_type": "markdown",
"metadata": {
"id": "e_l--HlzDmE_"
},
"source": [
"🟢 ResNetModel"
"# 🟢 ResNetModel"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"id": "6fAPtZLRqmaR"
},
"outputs": [],
"source": [
"#Import Dependencies\n",
......@@ -671,7 +363,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"id": "mEfKr1tLqqiz"
},
"outputs": [],
"source": [
"def create_model(model_url , num_classes=3):\n",
......@@ -693,7 +387,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"id": "CF7QWBeMxXo6"
},
"outputs": [],
"source": [
"resnet_url = \"https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/5\""
......@@ -702,7 +398,13 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "QSnaA4Qbqt2b",
"outputId": "cfdfc8ab-acc9-4811-f8c7-22b08d11c22b"
},
"outputs": [],
"source": [
"#Create Resnet Model\n",
......@@ -721,6 +423,15 @@
" )"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "XfPJz4PK_czR"
},
"source": [
"# 🟢 Evaluating Models "
]
},
{
"cell_type": "code",
"execution_count": null,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment