Commit bdb40bfe authored by Saluk Bawantha's avatar Saluk Bawantha

Merge branch 'IT18124354_SALUKB.M.M' into 'master'

create cnn model with more diseases

See merge request !23
parents 5fe2d444 5073225c
import os
import predict_image_class
from flask import Flask, request
from flask_cors import CORS, cross_origin
from flask import send_file
app = Flask(__name__)
cors = CORS(app)
uploadPath = "uploads"
@app.route('/predict', methods=['POST'])
@cross_origin()
def uploadImage():
print(os.getcwd())
f = request.files['file']
splits = f.filename.split("/")
print(splits[len(splits)-1])
# os.remove(os.path.join(uploadPath, "upload.jpg"))
f.save(os.path.join(uploadPath, "upload.jpg"))
return predict_image_class.predictImageClass("uploads/upload.jpg")
@app.route('/getImage', methods=['POST', 'GET'])
@cross_origin()
def getImage():
return send_file("uploads/upload.jpg", mimetype='image/gif')
if __name__ == '__main__':
app.run(debug=True, port=5000, host="0.0.0.0")
...@@ -9,11 +9,11 @@ import matplotlib.pyplot as plt ...@@ -9,11 +9,11 @@ import matplotlib.pyplot as plt
img_width, img_height = 350, 350 img_width, img_height = 350, 350
train_data_dir = 'data/train' train_data_dir = 'v2_data/train'
validation_data_dir = 'data/test' validation_data_dir = 'v2_data/test'
nb_train_samples = 168 nb_train_samples = 150
nb_validation_samples = 20 nb_validation_samples = 30
epochs = 20 epochs = 15
batch_size = 20 batch_size = 20
if K.image_data_format() == 'channels_first': if K.image_data_format() == 'channels_first':
...@@ -26,20 +26,20 @@ model.add(Conv2D(32, (2, 2), input_shape = input_shape)) ...@@ -26,20 +26,20 @@ model.add(Conv2D(32, (2, 2), input_shape = input_shape))
model.add(Activation('relu')) model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2))) model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(32, (2, 2))) model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu')) model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2))) model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(64, (2, 2))) model.add(Conv2D(32, (2, 2)))
model.add(Activation('relu')) model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2))) model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Flatten()) model.add(Flatten())
model.add(Dense(64)) model.add(Dense(32))
model.add(Activation('relu')) model.add(Activation('relu'))
model.add(Dropout(0.5)) model.add(Dropout(0.5))
model.add(Dense(2)) model.add(Dense(3))
model.add(Activation('softmax')) model.add(Activation('softmax'))
model.compile(loss ='sparse_categorical_crossentropy', model.compile(loss ='sparse_categorical_crossentropy',
......
skin_disease/model loss.png

24.6 KB | W: | H:

skin_disease/model loss.png

28 KB | W: | H:

skin_disease/model loss.png
skin_disease/model loss.png
skin_disease/model loss.png
skin_disease/model loss.png
  • 2-up
  • Swipe
  • Onion skin
import random
from keras.layers import Activation from keras.layers import Activation
import keras import keras
import numpy as np import numpy as np
...@@ -14,10 +16,6 @@ def predictImageClass(image): ...@@ -14,10 +16,6 @@ def predictImageClass(image):
K.clear_session() K.clear_session()
# tensorflow.reset_default_graph() # tensorflow.reset_default_graph()
nb_train_samples = 112
nb_validation_samples = 20
epochs = 10
batch_size = 2
img_width, img_height = 350, 350 img_width, img_height = 350, 350
if K.image_data_format() == 'channels_first': if K.image_data_format() == 'channels_first':
...@@ -25,31 +23,32 @@ def predictImageClass(image): ...@@ -25,31 +23,32 @@ def predictImageClass(image):
else: else:
input_shape = (img_width, img_height, 3) input_shape = (img_width, img_height, 3)
modelI = Sequential() model = Sequential()
modelI.add(Conv2D(32, (2, 2), input_shape=input_shape)) model.add(Conv2D(32, (2, 2), input_shape = input_shape))
modelI.add(Activation('relu')) model.add(Activation('relu'))
modelI.add(MaxPooling2D(pool_size=(2, 2))) model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
modelI.add(Conv2D(32, (2, 2))) model.add(Conv2D(32, (2, 2)))
modelI.add(Activation('relu')) model.add(Activation('relu'))
modelI.add(MaxPooling2D(pool_size=(2, 2))) model.add(MaxPooling2D(pool_size =(2, 2)))
modelI.add(Conv2D(64, (2, 2))) model.add(Flatten())
modelI.add(Activation('relu')) model.add(Dense(32))
modelI.add(MaxPooling2D(pool_size=(2, 2))) model.add(Activation('relu'))
modelI.add(Flatten()) model.add(Dropout(0.5))
modelI.add(Dense(64)) model.add(Dense(3))
modelI.add(Activation('relu')) model.add(Activation('softmax'))
modelI.add(Dropout(0.5))
modelI.add(Dense(2))
modelI.add(Activation('softmax'))
modelI.compile(loss='sparse_categorical_crossentropy', model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', optimizer='adam',
metrics=['accuracy']) metrics=['accuracy'])
modelI.load_weights('model_saved.h5') model.load_weights('model_saved.h5')
sess = keras.backend.get_session() sess = keras.backend.get_session()
# image = request.args.get('image', default = 1, type = str) # image = request.args.get('image', default = 1, type = str)
...@@ -63,12 +62,13 @@ def predictImageClass(image): ...@@ -63,12 +62,13 @@ def predictImageClass(image):
img = img.eval(session=sess) # convert to numpy array img = img.eval(session=sess) # convert to numpy array
img = np.expand_dims(img, 0) # make 'batch' of 1 img = np.expand_dims(img, 0) # make 'batch' of 1
pred = modelI.predict(img) pred = model.predict(img)
# pred = labels["label_names"][np.argmax(pred)] # pred = labels["label_names"][np.argmax(pred)]
print(pred) print(pred)
y = ["Sarcoptic & demodectic mange", "Ring worms"] y = ["Demodatic", "Dermatites", "ringworms"]
print(np.argmax(pred)) print(np.argmax(pred))
print(y[np.argmax(pred)]) print(y[np.argmax(pred)])
i = random.randint(0, 1)
return { return {
"pred": y[np.argmax(pred)], "pred": y[np.argmax(pred)],
"accuracy": str(pred[0][np.argmax(pred)]) "accuracy": str(pred[0][np.argmax(pred)])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment