Commit 3ec0eded authored by Dinuka Nawanjana's avatar Dinuka Nawanjana

Initial commit

parent 7249487b
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret-key'
from routes import *
if __name__ == '__main__':
app.run(debug=True)
This diff is collapsed.
This diff is collapsed.
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "NvgOAuTNHnFv",
"outputId": "9af20872-9b69-4105-c9b0-a5189e93f11f"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Mounted at /content/drive\n"
]
}
],
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "e5VoXOheOrbn"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import os\n",
"import shutil\n",
"import random\n",
"metadata = pd.read_csv('/content/drive/MyDrive/Dataset_skin/HAM10000_metadata.csv')\n",
"\n",
"# Define source and destination directories\n",
"src_dir = '/content/drive/MyDrive/Dataset_skin/HAM10000_images_part_1/'\n",
"dst_dir = '/content/newData'\n",
"\n",
"# Create destination directories for each class\n",
"for class_name in metadata['dx'].unique():\n",
" os.makedirs(os.path.join(dst_dir, class_name), exist_ok=True)\n",
"\n",
"# Copy images to their respective class directories\n",
"for index, row in metadata.iterrows():\n",
" img_name = row['image_id'] + '.jpg'\n",
" src_path = os.path.join(src_dir, img_name)\n",
" dst_path = os.path.join(dst_dir, row['dx'], img_name)\n",
" shutil.copy(src_path, dst_path)\n",
" \n",
"print('Dataset splitting complete.')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wuoNOi_-B0mN"
},
"outputs": [],
"source": [
"!zip -r newData.zip newData/\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ekUMBoOJB9FL"
},
"outputs": [],
"source": [
"!mv newData.zip /content/drive/MyDrive\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MNEtNMmmQK6n"
},
"outputs": [],
"source": [
"\n",
"data_dir = '/content/newData'\n",
"train_dir = '/content/train_dir'\n",
"test_dir = '/content/test_dir'\n",
"\n",
"\n",
"# Define list of class names\n",
"class_names = [\"akiec\", \"bcc\", \"bkl\", \"df\", \"mel\", \"nv\", \"vasc\"]\n",
"\n",
"# Create train and test directories if they don't exist\n",
"if not os.path.exists(train_dir):\n",
" os.makedirs(train_dir)\n",
"if not os.path.exists(test_dir):\n",
" os.makedirs(test_dir)\n",
"\n",
"# Split data into train and test sets\n",
"for class_name in class_names:\n",
" class_dir = os.path.join(data_dir, class_name)\n",
" train_class_dir = os.path.join(train_dir, class_name)\n",
" test_class_dir = os.path.join(test_dir, class_name)\n",
" if not os.path.exists(train_class_dir):\n",
" os.makedirs(train_class_dir)\n",
" if not os.path.exists(test_class_dir):\n",
" os.makedirs(test_class_dir)\n",
" files = os.listdir(class_dir)\n",
" random.shuffle(files)\n",
" train_files = files[:int(len(files)*0.8)]\n",
" test_files = files[int(len(files)*0.8):]\n",
" for file in train_files:\n",
" src_path = os.path.join(class_dir, file)\n",
" dst_path = os.path.join(train_class_dir, file)\n",
" shutil.copy(src_path, dst_path)\n",
" for file in test_files:\n",
" src_path = os.path.join(class_dir, file)\n",
" dst_path = os.path.join(test_class_dir, file)\n",
" shutil.copy(src_path, dst_path)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "35cL5WbTA_Cr",
"outputId": "fff6d9c7-767d-40bb-da34-ee32b4ba51ec"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found 8010 images belonging to 7 classes.\n",
"Found 2005 images belonging to 7 classes.\n"
]
}
],
"source": [
"import os\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n",
"\n",
"# Define data paths and parameters\n",
"train_dir = '/content/train_dir/'\n",
"test_dir = '/content/test_dir/'\n",
"input_shape = (224, 224, 3)\n",
"batch_size = 32\n",
"epochs = 50\n",
"num_classes = 7\n",
"\n",
"# Create data generators with data augmentation\n",
"train_datagen = ImageDataGenerator(rescale=1./255,\n",
" rotation_range=40,\n",
" width_shift_range=0.2,\n",
" height_shift_range=0.2,\n",
" shear_range=0.2,\n",
" zoom_range=0.2,\n",
" horizontal_flip=True,\n",
" fill_mode='nearest')\n",
"test_datagen = ImageDataGenerator(rescale=1./255)\n",
"\n",
"train_generator = train_datagen.flow_from_directory(train_dir,\n",
" target_size=input_shape[:2],\n",
" batch_size=batch_size,\n",
" class_mode='categorical')\n",
"\n",
"test_generator = test_datagen.flow_from_directory(test_dir,\n",
" target_size=input_shape[:2],\n",
" batch_size=batch_size,\n",
" class_mode='categorical')\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UTqAmayWFPCF"
},
"outputs": [],
"source": [
"# Define the model architecture\n",
"model = Sequential()\n",
"model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Conv2D(64, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Conv2D(128, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Conv2D(128, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Flatten())\n",
"model.add(Dense(512, activation='relu'))\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(num_classes, activation='softmax'))\n",
"\n",
"# Compile the model\n",
"model.compile(loss='categorical_crossentropy',\n",
" optimizer='rmsprop',\n",
" metrics=['accuracy'])\n",
"\n",
"# Train the model\n",
"history = model.fit(train_generator,\n",
" steps_per_epoch=train_generator.samples//batch_size,\n",
" epochs=epochs,\n",
" validation_data=test_generator,\n",
" validation_steps=test_generator.samples//batch_size)\n",
"\n",
"# Save the model\n",
"model.save('skin_cancer_model.h5')\n"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"gpuClass": "standard",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "NvgOAuTNHnFv",
"outputId": "9af20872-9b69-4105-c9b0-a5189e93f11f"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Mounted at /content/drive\n"
]
}
],
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "e5VoXOheOrbn"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"import os\n",
"import shutil\n",
"import random\n",
"metadata = pd.read_csv('/content/drive/MyDrive/Dataset_skin/HAM10000_metadata.csv')\n",
"\n",
"# Define source and destination directories\n",
"src_dir = '/content/drive/MyDrive/Dataset_skin/HAM10000_images_part_1/'\n",
"dst_dir = '/content/newData'\n",
"\n",
"# Create destination directories for each class\n",
"for class_name in metadata['dx'].unique():\n",
" os.makedirs(os.path.join(dst_dir, class_name), exist_ok=True)\n",
"\n",
"# Copy images to their respective class directories\n",
"for index, row in metadata.iterrows():\n",
" img_name = row['image_id'] + '.jpg'\n",
" src_path = os.path.join(src_dir, img_name)\n",
" dst_path = os.path.join(dst_dir, row['dx'], img_name)\n",
" shutil.copy(src_path, dst_path)\n",
" \n",
"print('Dataset splitting complete.')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wuoNOi_-B0mN"
},
"outputs": [],
"source": [
"!zip -r newData.zip newData/\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ekUMBoOJB9FL"
},
"outputs": [],
"source": [
"!mv newData.zip /content/drive/MyDrive\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MNEtNMmmQK6n"
},
"outputs": [],
"source": [
"\n",
"data_dir = '/content/newData'\n",
"train_dir = '/content/train_dir'\n",
"test_dir = '/content/test_dir'\n",
"\n",
"\n",
"# Define list of class names\n",
"class_names = [\"akiec\", \"bcc\", \"bkl\", \"df\", \"mel\", \"nv\", \"vasc\"]\n",
"\n",
"# Create train and test directories if they don't exist\n",
"if not os.path.exists(train_dir):\n",
" os.makedirs(train_dir)\n",
"if not os.path.exists(test_dir):\n",
" os.makedirs(test_dir)\n",
"\n",
"# Split data into train and test sets\n",
"for class_name in class_names:\n",
" class_dir = os.path.join(data_dir, class_name)\n",
" train_class_dir = os.path.join(train_dir, class_name)\n",
" test_class_dir = os.path.join(test_dir, class_name)\n",
" if not os.path.exists(train_class_dir):\n",
" os.makedirs(train_class_dir)\n",
" if not os.path.exists(test_class_dir):\n",
" os.makedirs(test_class_dir)\n",
" files = os.listdir(class_dir)\n",
" random.shuffle(files)\n",
" train_files = files[:int(len(files)*0.8)]\n",
" test_files = files[int(len(files)*0.8):]\n",
" for file in train_files:\n",
" src_path = os.path.join(class_dir, file)\n",
" dst_path = os.path.join(train_class_dir, file)\n",
" shutil.copy(src_path, dst_path)\n",
" for file in test_files:\n",
" src_path = os.path.join(class_dir, file)\n",
" dst_path = os.path.join(test_class_dir, file)\n",
" shutil.copy(src_path, dst_path)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "35cL5WbTA_Cr",
"outputId": "fff6d9c7-767d-40bb-da34-ee32b4ba51ec"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found 8010 images belonging to 7 classes.\n",
"Found 2005 images belonging to 7 classes.\n"
]
}
],
"source": [
"import os\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n",
"\n",
"# Define data paths and parameters\n",
"train_dir = '/content/train_dir/'\n",
"test_dir = '/content/test_dir/'\n",
"input_shape = (224, 224, 3)\n",
"batch_size = 32\n",
"epochs = 50\n",
"num_classes = 7\n",
"\n",
"# Create data generators with data augmentation\n",
"train_datagen = ImageDataGenerator(rescale=1./255,\n",
" rotation_range=40,\n",
" width_shift_range=0.2,\n",
" height_shift_range=0.2,\n",
" shear_range=0.2,\n",
" zoom_range=0.2,\n",
" horizontal_flip=True,\n",
" fill_mode='nearest')\n",
"test_datagen = ImageDataGenerator(rescale=1./255)\n",
"\n",
"train_generator = train_datagen.flow_from_directory(train_dir,\n",
" target_size=input_shape[:2],\n",
" batch_size=batch_size,\n",
" class_mode='categorical')\n",
"\n",
"test_generator = test_datagen.flow_from_directory(test_dir,\n",
" target_size=input_shape[:2],\n",
" batch_size=batch_size,\n",
" class_mode='categorical')\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UTqAmayWFPCF"
},
"outputs": [],
"source": [
"# Define the model architecture\n",
"model = Sequential()\n",
"model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Conv2D(64, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Conv2D(128, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Conv2D(128, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D((2, 2)))\n",
"model.add(Flatten())\n",
"model.add(Dense(512, activation='relu'))\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(num_classes, activation='softmax'))\n",
"\n",
"# Compile the model\n",
"model.compile(loss='categorical_crossentropy',\n",
" optimizer='rmsprop',\n",
" metrics=['accuracy'])\n",
"\n",
"# Train the model\n",
"history = model.fit(train_generator,\n",
" steps_per_epoch=train_generator.samples//batch_size,\n",
" epochs=epochs,\n",
" validation_data=test_generator,\n",
" validation_steps=test_generator.samples//batch_size)\n",
"\n",
"# Save the model\n",
"model.save('skin_cancer_model.h5')\n"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"gpuClass": "standard",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
This diff is collapsed.
This diff is collapsed.
Babsl-py==1.4.0 Babsl-py==1.4.0
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment