side view model added

parent 8ff9e9dc
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"dataset_location = '/content/drive/MyDrive/RP_SmartFarmer/Sandhini Gamage - Weed identification /Dataset/FinalizedWeedDataSet.zip'" "dataset_location = '/content/drive/MyDrive/FinalizedWeedDataSet.zip'"
] ]
}, },
{ {
...@@ -55,6 +55,216 @@ ...@@ -55,6 +55,216 @@
"for dirpath , dirnames , filenames in os.walk(FILE_DIR):\n", "for dirpath , dirnames , filenames in os.walk(FILE_DIR):\n",
" print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")" " print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"✅ 01- A Model for Side View"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_dir_side = \"/content/content/Weed Dataset/Side view/training\" #Training Directory path of the Side View Image Dataset\n",
"test_dir_side = \"/content/content/Weed Dataset/Side view/testing\"\n",
"\n",
"#lets get the class names\n",
"import pathlib\n",
"import numpy as np\n",
"\n",
"data_dir = pathlib.Path(train_dir_side)\n",
"class_names = np.array(sorted([item.name for item in data_dir.glob('*')]))\n",
"print(class_names)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"🟨 Data Visualization"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"import random\n",
"import os\n",
"\n",
"def view_random_image(target_dir , target_class):\n",
" ''' \n",
" This Function will displays a random Image from the dataset\n",
" '''\n",
" target_folder = target_dir+\"/\"+target_class\n",
" random_image = random.sample(os.listdir(target_folder) ,1)\n",
"\n",
" img = mpimg.imread(target_folder+\"/\"+random_image[0])\n",
" plt.imshow(img)\n",
" plt.title(target_class)\n",
" plt.axis(\"off\")\n",
"\n",
" print(f\"Image Shape:{img.shape}\") \n",
"\n",
" return img"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"img = view_random_image(target_dir=train_dir_side , \n",
" target_class = random.choice(class_names))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"🟨 DATA Preprocessing"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"\n",
"#Creating Image Data Generators for Training Data with augmentation\n",
"train_data_gen = ImageDataGenerator(rescale=1/255.,\n",
" #preprocessing_function=to_grayscale_then_rgb,\n",
" rotation_range = 0.2,\n",
" shear_range = 0.2,\n",
" zoom_range = 0.2,\n",
" width_shift_range=0.3,\n",
" height_shift_range = 0.3,\n",
" horizontal_flip= True)\n",
"\n",
"#Create ImageDatagenerator for testing data\n",
"test_data_gen = ImageDataGenerator(rescale=1/255.)\n",
"\n",
"#Import and Transform/pre process the data\n",
"train_data_multi = train_data_gen.flow_from_directory(train_dir_side,\n",
" target_size = (224,224),\n",
" batch_size = 32,\n",
" class_mode = 'categorical',\n",
" shuffle = True)\n",
"test_data_multi = test_data_gen.flow_from_directory(test_dir_side,\n",
" target_size = (224, 224),\n",
" batch_size = 32,\n",
" class_mode = 'categorical', #This will gives us one-hot encoded data\n",
" shuffle = True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"🟨 EfficientNetB0 Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"efficientnet_url = \"https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Import Dependencies\n",
"import tensorflow as tf\n",
"import tensorflow_hub as hub\n",
"from tensorflow.keras import layers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Lets make a create_model() function to create a model from a URL\n",
"def create_model(model_url , num_classes=7):\n",
"\n",
" feature_extractor_layer = hub.KerasLayer(model_url,\n",
" trainable = False, #freeze the already learned patterns \n",
" name = \"feature_extraction_layer\",\n",
" input_shape = (224, 224,3)) \n",
" #Create our own model\n",
" model = tf.keras.Sequential([\n",
" feature_extractor_layer,\n",
" layers.Dense(num_classes , activation=\"softmax\" , name=\"output_layer\")\n",
" ])\n",
"\n",
" return model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Create Resnet Model\n",
"efficientnet_model_side = create_model(efficientnet_url , \n",
" num_classes = 3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Compile our resnet model\n",
"efficientnet_model_side.compile(loss='categorical_crossentropy',\n",
" optimizer = tf.keras.optimizers.Adam(),\n",
" metrics=[\"accuracy\"])\n",
"#Fitting the model\n",
"side_vw_model_hist = efficientnet_model_side.fit(train_data_multi,\n",
" epochs=8,\n",
" steps_per_epoch=len(train_data_multi),\n",
" validation_data = test_data_multi,\n",
" validation_steps = len(test_data_multi)\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Saving the h5 model back to the google drive\n",
"efficientnet_model_side.save('/content/drive/MyDrive/RP_SmartFarmer/Sandhini Gamage - Weed identification /private/saved_models/Weed_side_view2.h5')"
]
} }
], ],
"metadata": { "metadata": {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment