Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
22_23-J 65
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
22_23-J 65
22_23-J 65
Commits
3bcaac39
Commit
3bcaac39
authored
Jan 28, 2023
by
Warnasooriya M.D.S.
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Model 01 Developed
parent
940b3a2e
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
250 additions
and
0 deletions
+250
-0
Backend/.ipynb_checkpoints/SF__Dinuka_Pest_Damages_identification_V01-checkpoint.ipynb
...__Dinuka_Pest_Damages_identification_V01-checkpoint.ipynb
+125
-0
Backend/SF__Dinuka_Pest_Damages_identification_V01.ipynb
Backend/SF__Dinuka_Pest_Damages_identification_V01.ipynb
+125
-0
No files found.
Backend/.ipynb_checkpoints/SF__Dinuka_Pest_Damages_identification_V01-checkpoint.ipynb
View file @
3bcaac39
...
@@ -125,6 +125,131 @@
...
@@ -125,6 +125,131 @@
"plt.subplot(1 , 2, 2)\n",
"plt.subplot(1 , 2, 2)\n",
"img = view_random_image(target_dir=\"Damage Data/Traning\" , target_class=\"Mealybug\")"
"img = view_random_image(target_dir=\"Damage Data/Traning\" , target_class=\"Mealybug\")"
]
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Model - 01"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"🟢 Pre-process Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"train_dir = \"Damage Data/Traning\"\n",
"test_dir = \"Damage Data/Testing\"\n",
"\n",
"#Normalize the data \n",
"train_datagen = ImageDataGenerator(rescale = 1/255.,\n",
" rotation_range = 0.2, #how much do you want to rotate an image\n",
" shear_range = 0.3,\n",
" zoom_range = 0.3, \n",
" width_shift_range=0.25,\n",
" height_shift_range=0.25,\n",
" horizontal_flip=True) \n",
"test_datagen = ImageDataGenerator(rescale = 1/255.)\n",
"#Loading the images from the directories\n",
"train_data = train_datagen.flow_from_directory(directory=train_dir,\n",
" target_size = (448, 448),\n",
" class_mode='categorical',\n",
" batch_size = 16,\n",
" shuffle=True)\n",
"\n",
"test_data = test_datagen.flow_from_directory(directory=test_dir,\n",
" target_size = (448, 448),\n",
" batch_size = 16,\n",
" class_mode = \"categorical\",\n",
" shuffle=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"height = 448\n",
"width = 448\n",
"depth = 3\n",
"n_classes = 3\n",
"\n",
"\n",
"#Create the model\n",
"model = Sequential()\n",
"inputShape = (height, width, depth)\n",
"chanDim = -1\n",
"if K.image_data_format() == \"channels_first\":\n",
" inputShape = (depth, height, width)\n",
" chanDim = 1\n",
"model.add(Conv2D(32, (3, 3), padding=\"same\",input_shape=inputShape))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(3, 3)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(64, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(Conv2D(64, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(128, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(Conv2D(128, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Flatten())\n",
"model.add(Dense(1024))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization())\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(n_classes))\n",
"model.add(Activation(\"softmax\"))\n",
"\n",
"#Compile the model\n",
"model.compile(loss = tf.keras.losses.CategoricalCrossentropy(),\n",
" optimizer = tf.keras.optimizers.Adam(),\n",
" metrics = ['accuracy'])\n",
"fle_s = '/content/saved_models/pest_damages.h5'\n",
"checkpointer = ModelCheckpoint(fle_s, monitor='loss', verbose=1, save_best_only=True,\n",
" save_weights_only=False, mode='auto', save_freq='epoch')\n",
"callback_list = [checkpointer]\n",
"\n",
"model_history = model.fit(train_data,\n",
" epochs=10,\n",
" batch_size=16,\n",
" steps_per_epoch=len(train_data),\n",
" validation_data = test_data,\n",
" validation_steps = len(test_data),\n",
" callbacks=[callback_list]\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
}
],
],
"metadata": {
"metadata": {
...
...
Backend/SF__Dinuka_Pest_Damages_identification_V01.ipynb
View file @
3bcaac39
...
@@ -125,6 +125,131 @@
...
@@ -125,6 +125,131 @@
"plt.subplot(1 , 2, 2)\n",
"plt.subplot(1 , 2, 2)\n",
"img = view_random_image(target_dir=\"Damage Data/Traning\" , target_class=\"Mealybug\")"
"img = view_random_image(target_dir=\"Damage Data/Traning\" , target_class=\"Mealybug\")"
]
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"Model - 01"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"🟢 Pre-process Data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"train_dir = \"Damage Data/Traning\"\n",
"test_dir = \"Damage Data/Testing\"\n",
"\n",
"#Normalize the data \n",
"train_datagen = ImageDataGenerator(rescale = 1/255.,\n",
" rotation_range = 0.2, #how much do you want to rotate an image\n",
" shear_range = 0.3,\n",
" zoom_range = 0.3, \n",
" width_shift_range=0.25,\n",
" height_shift_range=0.25,\n",
" horizontal_flip=True) \n",
"test_datagen = ImageDataGenerator(rescale = 1/255.)\n",
"#Loading the images from the directories\n",
"train_data = train_datagen.flow_from_directory(directory=train_dir,\n",
" target_size = (448, 448),\n",
" class_mode='categorical',\n",
" batch_size = 16,\n",
" shuffle=True)\n",
"\n",
"test_data = test_datagen.flow_from_directory(directory=test_dir,\n",
" target_size = (448, 448),\n",
" batch_size = 16,\n",
" class_mode = \"categorical\",\n",
" shuffle=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"height = 448\n",
"width = 448\n",
"depth = 3\n",
"n_classes = 3\n",
"\n",
"\n",
"#Create the model\n",
"model = Sequential()\n",
"inputShape = (height, width, depth)\n",
"chanDim = -1\n",
"if K.image_data_format() == \"channels_first\":\n",
" inputShape = (depth, height, width)\n",
" chanDim = 1\n",
"model.add(Conv2D(32, (3, 3), padding=\"same\",input_shape=inputShape))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(3, 3)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(64, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(Conv2D(64, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Conv2D(128, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(Conv2D(128, (3, 3), padding=\"same\"))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization(axis=chanDim))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Flatten())\n",
"model.add(Dense(1024))\n",
"model.add(Activation(\"relu\"))\n",
"model.add(BatchNormalization())\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(n_classes))\n",
"model.add(Activation(\"softmax\"))\n",
"\n",
"#Compile the model\n",
"model.compile(loss = tf.keras.losses.CategoricalCrossentropy(),\n",
" optimizer = tf.keras.optimizers.Adam(),\n",
" metrics = ['accuracy'])\n",
"fle_s = '/content/saved_models/pest_damages.h5'\n",
"checkpointer = ModelCheckpoint(fle_s, monitor='loss', verbose=1, save_best_only=True,\n",
" save_weights_only=False, mode='auto', save_freq='epoch')\n",
"callback_list = [checkpointer]\n",
"\n",
"model_history = model.fit(train_data,\n",
" epochs=10,\n",
" batch_size=16,\n",
" steps_per_epoch=len(train_data),\n",
" validation_data = test_data,\n",
" validation_steps = len(test_data),\n",
" callbacks=[callback_list]\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
}
],
],
"metadata": {
"metadata": {
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment