Top view model part two modified

parent 78a3dad9
......@@ -524,6 +524,60 @@
"ensemble_model = Model(inputs = model_input , outputs = ensemble_output , name = 'ensemble')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#compile the ensemble model with adam optimizer + CategoricalCrossentropy as the loss function\n",
"ensemble_model.compile(optimizer='adam' , loss = tf.losses.CategoricalCrossentropy(from_logits=False), metrics =['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def plot_loss_curve(history):\n",
" '''\n",
" Return separate loss curves for training and validation metrics\n",
" '''\n",
" loss = history.history[\"loss\"]\n",
" val_loss = history.history[\"val_loss\"]\n",
"\n",
" accuracy = history.history[\"accuracy\"]\n",
" val_accuracy = history.history[\"val_accuracy\"]\n",
"\n",
" #get the number of epochs that we run for\n",
" epochs = range(len(history.history[\"loss\"]))\n",
"\n",
" #Plot the lost\n",
" plt.plot(epochs , loss , label=\"Training Loss\")\n",
" plt.plot(epochs , val_loss , label=\"Validation Loss\")\n",
" plt.title(\"Loss\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.legend()\n",
"\n",
" #Plot the accuracy\n",
" plt.figure()\n",
" plt.plot(epochs , accuracy , label=\"Training accuracy\")\n",
" plt.plot(epochs , val_accuracy , label=\"Validation accuracy\")\n",
" plt.title(\"accuracy\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plot_loss_curve(ensemble_hist)"
]
},
{
"cell_type": "code",
"execution_count": null,
......
......@@ -524,6 +524,60 @@
"ensemble_model = Model(inputs = model_input , outputs = ensemble_output , name = 'ensemble')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#compile the ensemble model with adam optimizer + CategoricalCrossentropy as the loss function\n",
"ensemble_model.compile(optimizer='adam' , loss = tf.losses.CategoricalCrossentropy(from_logits=False), metrics =['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def plot_loss_curve(history):\n",
" '''\n",
" Return separate loss curves for training and validation metrics\n",
" '''\n",
" loss = history.history[\"loss\"]\n",
" val_loss = history.history[\"val_loss\"]\n",
"\n",
" accuracy = history.history[\"accuracy\"]\n",
" val_accuracy = history.history[\"val_accuracy\"]\n",
"\n",
" #get the number of epochs that we run for\n",
" epochs = range(len(history.history[\"loss\"]))\n",
"\n",
" #Plot the lost\n",
" plt.plot(epochs , loss , label=\"Training Loss\")\n",
" plt.plot(epochs , val_loss , label=\"Validation Loss\")\n",
" plt.title(\"Loss\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.legend()\n",
"\n",
" #Plot the accuracy\n",
" plt.figure()\n",
" plt.plot(epochs , accuracy , label=\"Training accuracy\")\n",
" plt.plot(epochs , val_accuracy , label=\"Validation accuracy\")\n",
" plt.title(\"accuracy\")\n",
" plt.xlabel(\"Epochs\")\n",
" plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plot_loss_curve(ensemble_hist)"
]
},
{
"cell_type": "code",
"execution_count": null,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment