Commit bae7e627 authored by Dinithi Anupama's avatar Dinithi Anupama

Merge branch 'IT18116984_WeerasundaraD.A' into 'master'

Ann model changes

See merge request !24
parents d4a279a3 9dd41308
......@@ -10,8 +10,9 @@ from sklearn import preprocessing
def loadModel(model):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(25, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(25, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(7, activation=tf.nn.softmax))
model.compile(optimizer='adam',
......@@ -23,8 +24,9 @@ def loadModel(model):
def trainModel(model, datasetFilePath):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(25, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(25, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(7, activation=tf.nn.softmax))
model.compile(optimizer='adam',
......@@ -39,6 +41,7 @@ def trainModel(model, datasetFilePath):
leBreeds = preprocessing.LabelEncoder()
leBreeds.fit(model_df['Breeds'])
print(leBreeds.classes_)
model_df['Breeds'] = leBreeds.transform(model_df['Breeds'])
dataDf = model_df.fillna(0)
print(model_df.shape)
......@@ -49,7 +52,7 @@ def trainModel(model, datasetFilePath):
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
history = model.fit(x_train, y_train, epochs=15, validation_data=(x_test, y_test))
history = model.fit(x_train, y_train, epochs=75, validation_data=(x_test, y_test))
print("History: ", history)
val_loss, val_acc = model.evaluate(x_test, y_test)
......
age,symptom1,symptom2,symptom3,symptom4,symptom5,symptom6,symptom7,symptom8,symptom9,symptom10,symptom11,symptom12,symptom13,symptom14,symptom15,symptom16,symptom17,disease
1,1,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,3
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,3
1,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,3
1,1,0,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0
1,1,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0
1,0,1,1,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0
1,0,1,1,1,1,1,0,0,0,0,0,1,0,0,0,0,0,0
1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0
4,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,0
4,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,0,0
4,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,0,0
4,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,0,0
4,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1
4,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1
4,1,1,1,1,1,0,1,1,1,0,1,0,0,1,1,1,0,1
4,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1
6,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,1
6,1,1,1,1,1,1,1,0,0,1,1,1,1,1,0,1,1,1
6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1
6,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1
6,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,1,1,2
6,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,2
6,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,0,1,2
6,0,1,1,1,1,0,0,0,1,0,0,1,1,0,1,0,0,2
6,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,2
6,1,1,1,1,1,1,1,1,0,1,0,1,0,1,0,0,1,2
6,1,1,1,1,1,1,0,1,1,1,1,1,0,0,1,1,1,3
1,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,0,3
1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,1,0,3
1,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,3
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,3
1,1,1,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0
1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0
1,1,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0
1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0
1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0
1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1,1,1,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0
1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0
4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0
4,0,1,1,1,1,1,1,1,1,0,1,1,1,0,1,1,1,0
4,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,0
4,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,0
4,1,1,1,1,1,0,0,1,0,1,1,1,1,1,1,1,0,2
4,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,2
4,0,0,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,2
4,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,2
4,1,1,0,1,1,1,1,1,1,1,1,0,1,1,1,1,1,2
4,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,2
4,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,2
\ No newline at end of file
......@@ -5,6 +5,7 @@ from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn import preprocessing
# load the dataset using pandas library
dataDf = pd.read_csv("data.csv")
leDiseases = preprocessing.LabelEncoder()
......
......@@ -15,8 +15,9 @@ def predict():
model = tf.keras.models.Sequential()
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(25, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(25, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(50, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(7, activation=tf.nn.softmax))
model.compile(optimizer='adam',
......@@ -29,4 +30,4 @@ def predict():
return {"pred": str(numpy.argmax(prediction))}
if __name__ == '__main__':
app.run(debug=True, port=5001)
app.run(debug=True, port=5001, host="0.0.0.0")
......@@ -17,10 +17,17 @@ model.compile(optimizer='adam',
metrics=['accuracy'])
model = load_model('model.h5')
pred = model.predict(numpy.array([[2,0,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]]))
def predict(data):
return model.predict(numpy.array(data))
print(numpy.array(data))
pred = model.predict(numpy.array([[2,0,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]]))
print(pred)
return pred
# Test prediction
print(predict([[1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0]]))
print(numpy.argmax(predict([[1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0]])))
print(disease_labels[numpy.argmax(predict([[1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0]]))])
# input = [2,0,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
# print(len(input))
# print(predict([[2,0,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]]))
# print(numpy.argmax(predict([[1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0]])))
# print(disease_labels[numpy.argmax(predict([[1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0]]))])
C:\Users\ADMIN\AppData\Local\Programs\Python\Python37\python.exe "C:/Users/ADMIN/Documents/ZacSeed/Dog research/Dog_Research_Python/disease_prediction/chatbot_symptom_disease_model.py"
2021-11-20 15:25:56.385304: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cudart64_100.dll'; dlerror: cudart64_100.dll not found
2021-11-20 15:25:56.385424: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
Using TensorFlow backend.
['Canine Distemper' 'Canine Scabies' 'Otitis Media' 'Parvovirus ' 'Rabies'
'Tick Fever' 'Urinary tract infections (UTIs)']
(45, 63)
[3 3 3 4 4 4 1 1 2 2 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 6 6 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0]
WARNING:tensorflow:From C:\Users\ADMIN\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\ops\resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
Train on 36 samples, validate on 9 samples
2021-11-20 15:25:58.585461: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library nvcuda.dll
2021-11-20 15:25:58.602934: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1618] Found device 0 with properties:
name: NVIDIA GeForce GTX 1650 major: 7 minor: 5 memoryClockRate(GHz): 1.62
pciBusID: 0000:01:00.0
2021-11-20 15:25:58.603825: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cudart64_100.dll'; dlerror: cudart64_100.dll not found
2021-11-20 15:25:58.604391: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cublas64_100.dll'; dlerror: cublas64_100.dll not found
2021-11-20 15:25:58.604951: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cufft64_100.dll'; dlerror: cufft64_100.dll not found
2021-11-20 15:25:58.605515: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'curand64_100.dll'; dlerror: curand64_100.dll not found
2021-11-20 15:25:58.606076: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cusolver64_100.dll'; dlerror: cusolver64_100.dll not found
2021-11-20 15:25:58.606642: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cusparse64_100.dll'; dlerror: cusparse64_100.dll not found
2021-11-20 15:25:58.607203: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cudnn64_7.dll'; dlerror: cudnn64_7.dll not found
2021-11-20 15:25:58.607306: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1641] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.
Skipping registering GPU devices...
2021-11-20 15:25:58.607769: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2021-11-20 15:25:58.610284: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1159] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-11-20 15:25:58.610376: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1165]
Epoch 1/75
32/36 [=========================>....] - ETA: 0s - loss: 1.9422 - acc: 0.0000e+00
36/36 [==============================] - 0s 2ms/sample - loss: 1.9400 - acc: 0.0000e+00 - val_loss: 1.9128 - val_acc: 0.3333
Epoch 2/75
32/36 [=========================>....] - ETA: 0s - loss: 1.9176 - acc: 0.0938
36/36 [==============================] - 0s 83us/sample - loss: 1.9131 - acc: 0.1667 - val_loss: 1.8905 - val_acc: 0.4444
Epoch 3/75
32/36 [=========================>....] - ETA: 0s - loss: 1.8897 - acc: 0.4062
36/36 [==============================] - 0s 83us/sample - loss: 1.8888 - acc: 0.4167 - val_loss: 1.8687 - val_acc: 0.4444
Epoch 4/75
32/36 [=========================>....] - ETA: 0s - loss: 1.8624 - acc: 0.4375
36/36 [==============================] - 0s 55us/sample - loss: 1.8645 - acc: 0.4167 - val_loss: 1.8469 - val_acc: 0.3333
Epoch 5/75
32/36 [=========================>....] - ETA: 0s - loss: 1.8403 - acc: 0.4375
36/36 [==============================] - 0s 55us/sample - loss: 1.8397 - acc: 0.4167 - val_loss: 1.8247 - val_acc: 0.4444
Epoch 6/75
32/36 [=========================>....] - ETA: 0s - loss: 1.8238 - acc: 0.4688
36/36 [==============================] - 0s 55us/sample - loss: 1.8146 - acc: 0.4722 - val_loss: 1.8022 - val_acc: 0.6667
Epoch 7/75
32/36 [=========================>....] - ETA: 0s - loss: 1.7819 - acc: 0.5938
36/36 [==============================] - 0s 83us/sample - loss: 1.7904 - acc: 0.5833 - val_loss: 1.7804 - val_acc: 0.6667
Epoch 8/75
32/36 [=========================>....] - ETA: 0s - loss: 1.7760 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 1.7635 - acc: 0.7222 - val_loss: 1.7585 - val_acc: 0.6667
Epoch 9/75
32/36 [=========================>....] - ETA: 0s - loss: 1.7416 - acc: 0.7188
36/36 [==============================] - 0s 83us/sample - loss: 1.7380 - acc: 0.7222 - val_loss: 1.7347 - val_acc: 0.6667
Epoch 10/75
32/36 [=========================>....] - ETA: 0s - loss: 1.6981 - acc: 0.7500
36/36 [==============================] - 0s 83us/sample - loss: 1.7106 - acc: 0.7222 - val_loss: 1.7103 - val_acc: 0.6667
Epoch 11/75
32/36 [=========================>....] - ETA: 0s - loss: 1.6833 - acc: 0.7188
36/36 [==============================] - 0s 83us/sample - loss: 1.6816 - acc: 0.7222 - val_loss: 1.6854 - val_acc: 0.6667
Epoch 12/75
32/36 [=========================>....] - ETA: 0s - loss: 1.6347 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 1.6533 - acc: 0.7222 - val_loss: 1.6599 - val_acc: 0.6667
Epoch 13/75
32/36 [=========================>....] - ETA: 0s - loss: 1.6275 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 1.6221 - acc: 0.7222 - val_loss: 1.6335 - val_acc: 0.6667
Epoch 14/75
32/36 [=========================>....] - ETA: 0s - loss: 1.5651 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 1.5922 - acc: 0.7222 - val_loss: 1.6060 - val_acc: 0.6667
Epoch 15/75
32/36 [=========================>....] - ETA: 0s - loss: 1.5590 - acc: 0.7188
36/36 [==============================] - 0s 71us/sample - loss: 1.5586 - acc: 0.7222 - val_loss: 1.5779 - val_acc: 0.6667
Epoch 16/75
32/36 [=========================>....] - ETA: 0s - loss: 1.5540 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 1.5243 - acc: 0.7222 - val_loss: 1.5484 - val_acc: 0.6667
Epoch 17/75
32/36 [=========================>....] - ETA: 0s - loss: 1.5239 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 1.4885 - acc: 0.7222 - val_loss: 1.5178 - val_acc: 0.6667
Epoch 18/75
32/36 [=========================>....] - ETA: 0s - loss: 1.4842 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 1.4504 - acc: 0.7222 - val_loss: 1.4870 - val_acc: 0.6667
Epoch 19/75
32/36 [=========================>....] - ETA: 0s - loss: 1.3984 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 1.4131 - acc: 0.7222 - val_loss: 1.4566 - val_acc: 0.6667
Epoch 20/75
32/36 [=========================>....] - ETA: 0s - loss: 1.3784 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 1.3728 - acc: 0.7222 - val_loss: 1.4268 - val_acc: 0.6667
Epoch 21/75
32/36 [=========================>....] - ETA: 0s - loss: 1.3809 - acc: 0.6875
36/36 [==============================] - 0s 83us/sample - loss: 1.3323 - acc: 0.7222 - val_loss: 1.3968 - val_acc: 0.6667
Epoch 22/75
32/36 [=========================>....] - ETA: 0s - loss: 1.2694 - acc: 0.7500
36/36 [==============================] - 0s 83us/sample - loss: 1.2964 - acc: 0.7222 - val_loss: 1.3683 - val_acc: 0.6667
Epoch 23/75
32/36 [=========================>....] - ETA: 0s - loss: 1.2496 - acc: 0.7188
36/36 [==============================] - 0s 83us/sample - loss: 1.2586 - acc: 0.7222 - val_loss: 1.3421 - val_acc: 0.6667
Epoch 24/75
32/36 [=========================>....] - ETA: 0s - loss: 1.2910 - acc: 0.6875
36/36 [==============================] - 0s 83us/sample - loss: 1.2221 - acc: 0.7222 - val_loss: 1.3195 - val_acc: 0.6667
Epoch 25/75
32/36 [=========================>....] - ETA: 0s - loss: 1.2553 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 1.1896 - acc: 0.7222 - val_loss: 1.3019 - val_acc: 0.6667
Epoch 26/75
32/36 [=========================>....] - ETA: 0s - loss: 1.1210 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 1.1639 - acc: 0.7222 - val_loss: 1.2850 - val_acc: 0.6667
Epoch 27/75
32/36 [=========================>....] - ETA: 0s - loss: 1.1981 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 1.1352 - acc: 0.7222 - val_loss: 1.2641 - val_acc: 0.6667
Epoch 28/75
32/36 [=========================>....] - ETA: 0s - loss: 1.0485 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 1.1075 - acc: 0.7222 - val_loss: 1.2369 - val_acc: 0.6667
Epoch 29/75
32/36 [=========================>....] - ETA: 0s - loss: 1.1003 - acc: 0.7188
36/36 [==============================] - 0s 83us/sample - loss: 1.0764 - acc: 0.7222 - val_loss: 1.2057 - val_acc: 0.6667
Epoch 30/75
32/36 [=========================>....] - ETA: 0s - loss: 0.9946 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 1.0449 - acc: 0.7222 - val_loss: 1.1749 - val_acc: 0.6667
Epoch 31/75
32/36 [=========================>....] - ETA: 0s - loss: 0.9979 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 1.0155 - acc: 0.7222 - val_loss: 1.1434 - val_acc: 0.6667
Epoch 32/75
32/36 [=========================>....] - ETA: 0s - loss: 0.8889 - acc: 0.7812
36/36 [==============================] - 0s 83us/sample - loss: 0.9864 - acc: 0.7222 - val_loss: 1.1123 - val_acc: 0.6667
Epoch 33/75
32/36 [=========================>....] - ETA: 0s - loss: 1.0270 - acc: 0.6875
36/36 [==============================] - 0s 83us/sample - loss: 0.9572 - acc: 0.7222 - val_loss: 1.0830 - val_acc: 0.6667
Epoch 34/75
32/36 [=========================>....] - ETA: 0s - loss: 1.0114 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 0.9287 - acc: 0.7222 - val_loss: 1.0569 - val_acc: 0.6667
Epoch 35/75
32/36 [=========================>....] - ETA: 0s - loss: 0.9143 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 0.8998 - acc: 0.7222 - val_loss: 1.0338 - val_acc: 0.6667
Epoch 36/75
32/36 [=========================>....] - ETA: 0s - loss: 0.9487 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 0.8722 - acc: 0.7222 - val_loss: 1.0141 - val_acc: 0.6667
Epoch 37/75
32/36 [=========================>....] - ETA: 0s - loss: 0.8086 - acc: 0.7500
36/36 [==============================] - 0s 83us/sample - loss: 0.8473 - acc: 0.7222 - val_loss: 0.9957 - val_acc: 0.6667
Epoch 38/75
32/36 [=========================>....] - ETA: 0s - loss: 0.6952 - acc: 0.7812
36/36 [==============================] - 0s 83us/sample - loss: 0.8228 - acc: 0.7222 - val_loss: 0.9737 - val_acc: 0.6667
Epoch 39/75
32/36 [=========================>....] - ETA: 0s - loss: 0.8634 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 0.7991 - acc: 0.7222 - val_loss: 0.9491 - val_acc: 0.6667
Epoch 40/75
32/36 [=========================>....] - ETA: 0s - loss: 0.7329 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 0.7751 - acc: 0.7222 - val_loss: 0.9241 - val_acc: 0.6667
Epoch 41/75
32/36 [=========================>....] - ETA: 0s - loss: 0.7457 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 0.7519 - acc: 0.7222 - val_loss: 0.8994 - val_acc: 0.6667
Epoch 42/75
32/36 [=========================>....] - ETA: 0s - loss: 0.7433 - acc: 0.7188
36/36 [==============================] - 0s 83us/sample - loss: 0.7307 - acc: 0.7222 - val_loss: 0.8741 - val_acc: 0.6667
Epoch 43/75
32/36 [=========================>....] - ETA: 0s - loss: 0.7237 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 0.7093 - acc: 0.7222 - val_loss: 0.8497 - val_acc: 0.6667
Epoch 44/75
32/36 [=========================>....] - ETA: 0s - loss: 0.6471 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 0.6889 - acc: 0.7222 - val_loss: 0.8265 - val_acc: 0.6667
Epoch 45/75
32/36 [=========================>....] - ETA: 0s - loss: 0.6889 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 0.6705 - acc: 0.7222 - val_loss: 0.8048 - val_acc: 0.6667
Epoch 46/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5071 - acc: 0.8125
36/36 [==============================] - 0s 55us/sample - loss: 0.6525 - acc: 0.7222 - val_loss: 0.7843 - val_acc: 0.6667
Epoch 47/75
32/36 [=========================>....] - ETA: 0s - loss: 0.7023 - acc: 0.6875
36/36 [==============================] - 0s 55us/sample - loss: 0.6375 - acc: 0.7222 - val_loss: 0.7650 - val_acc: 0.6667
Epoch 48/75
32/36 [=========================>....] - ETA: 0s - loss: 0.6090 - acc: 0.7188
36/36 [==============================] - 0s 55us/sample - loss: 0.6213 - acc: 0.7222 - val_loss: 0.7468 - val_acc: 0.6667
Epoch 49/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5659 - acc: 0.7500
36/36 [==============================] - 0s 55us/sample - loss: 0.6058 - acc: 0.7222 - val_loss: 0.7303 - val_acc: 0.6667
Epoch 50/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5820 - acc: 0.7500
36/36 [==============================] - 0s 83us/sample - loss: 0.5912 - acc: 0.7500 - val_loss: 0.7144 - val_acc: 0.6667
Epoch 51/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5685 - acc: 0.7812
36/36 [==============================] - 0s 105us/sample - loss: 0.5771 - acc: 0.7778 - val_loss: 0.6978 - val_acc: 0.6667
Epoch 52/75
32/36 [=========================>....] - ETA: 0s - loss: 0.6221 - acc: 0.7500
36/36 [==============================] - 0s 83us/sample - loss: 0.5644 - acc: 0.7778 - val_loss: 0.6805 - val_acc: 0.6667
Epoch 53/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5060 - acc: 0.8125
36/36 [==============================] - 0s 83us/sample - loss: 0.5513 - acc: 0.7778 - val_loss: 0.6628 - val_acc: 0.6667
Epoch 54/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4971 - acc: 0.8125
36/36 [==============================] - 0s 55us/sample - loss: 0.5380 - acc: 0.8056 - val_loss: 0.6440 - val_acc: 0.7778
Epoch 55/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4760 - acc: 0.8438
36/36 [==============================] - 0s 55us/sample - loss: 0.5249 - acc: 0.8333 - val_loss: 0.6259 - val_acc: 0.7778
Epoch 56/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4773 - acc: 0.8438
36/36 [==============================] - 0s 55us/sample - loss: 0.5132 - acc: 0.8333 - val_loss: 0.6086 - val_acc: 0.7778
Epoch 57/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5157 - acc: 0.8125
36/36 [==============================] - 0s 55us/sample - loss: 0.5023 - acc: 0.8333 - val_loss: 0.5929 - val_acc: 0.7778
Epoch 58/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4879 - acc: 0.8438
36/36 [==============================] - 0s 55us/sample - loss: 0.4935 - acc: 0.8333 - val_loss: 0.5775 - val_acc: 0.7778
Epoch 59/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4839 - acc: 0.8438
36/36 [==============================] - 0s 83us/sample - loss: 0.4840 - acc: 0.8333 - val_loss: 0.5631 - val_acc: 0.7778
Epoch 60/75
32/36 [=========================>....] - ETA: 0s - loss: 0.5272 - acc: 0.8125
36/36 [==============================] - 0s 55us/sample - loss: 0.4744 - acc: 0.8333 - val_loss: 0.5500 - val_acc: 0.7778
Epoch 61/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4182 - acc: 0.8750
36/36 [==============================] - 0s 55us/sample - loss: 0.4650 - acc: 0.8333 - val_loss: 0.5357 - val_acc: 0.7778
Epoch 62/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4211 - acc: 0.8438
36/36 [==============================] - 0s 55us/sample - loss: 0.4550 - acc: 0.8333 - val_loss: 0.5201 - val_acc: 0.7778
Epoch 63/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4703 - acc: 0.8125
36/36 [==============================] - 0s 83us/sample - loss: 0.4456 - acc: 0.8333 - val_loss: 0.5052 - val_acc: 0.7778
Epoch 64/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4567 - acc: 0.8125
36/36 [==============================] - 0s 55us/sample - loss: 0.4359 - acc: 0.8333 - val_loss: 0.4904 - val_acc: 0.7778
Epoch 65/75
32/36 [=========================>....] - ETA: 0s - loss: 0.3825 - acc: 0.8750
36/36 [==============================] - 0s 55us/sample - loss: 0.4274 - acc: 0.8333 - val_loss: 0.4753 - val_acc: 0.7778
Epoch 66/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4653 - acc: 0.8125
36/36 [==============================] - 0s 83us/sample - loss: 0.4185 - acc: 0.8333 - val_loss: 0.4611 - val_acc: 0.7778
Epoch 67/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4287 - acc: 0.8125
36/36 [==============================] - 0s 83us/sample - loss: 0.4088 - acc: 0.8333 - val_loss: 0.4481 - val_acc: 0.7778
Epoch 68/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4087 - acc: 0.8438
36/36 [==============================] - 0s 83us/sample - loss: 0.4005 - acc: 0.8333 - val_loss: 0.4348 - val_acc: 0.7778
Epoch 69/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4184 - acc: 0.8125
36/36 [==============================] - 0s 55us/sample - loss: 0.3922 - acc: 0.8333 - val_loss: 0.4213 - val_acc: 0.7778
Epoch 70/75
32/36 [=========================>....] - ETA: 0s - loss: 0.3783 - acc: 0.8438
36/36 [==============================] - 0s 55us/sample - loss: 0.3842 - acc: 0.8333 - val_loss: 0.4088 - val_acc: 0.8889
Epoch 71/75
32/36 [=========================>....] - ETA: 0s - loss: 0.3852 - acc: 0.9062
36/36 [==============================] - 0s 83us/sample - loss: 0.3754 - acc: 0.9167 - val_loss: 0.3968 - val_acc: 1.0000
Epoch 72/75
32/36 [=========================>....] - ETA: 0s - loss: 0.3411 - acc: 0.9688
36/36 [==============================] - 0s 83us/sample - loss: 0.3670 - acc: 0.9444 - val_loss: 0.3850 - val_acc: 1.0000
Epoch 73/75
32/36 [=========================>....] - ETA: 0s - loss: 0.4010 - acc: 0.9375
36/36 [==============================] - 0s 83us/sample - loss: 0.3588 - acc: 0.9444 - val_loss: 0.3747 - val_acc: 1.0000
Epoch 74/75
32/36 [=========================>....] - ETA: 0s - loss: 0.3129 - acc: 0.9375
36/36 [==============================] - 0s 83us/sample - loss: 0.3501 - acc: 0.9444 - val_loss: 0.3627 - val_acc: 1.0000
Epoch 75/75
32/36 [=========================>....] - ETA: 0s - loss: 0.2794 - acc: 0.9688
36/36 [==============================] - 0s 83us/sample - loss: 0.3415 - acc: 0.9444 - val_loss: 0.3494 - val_acc: 1.0000
History: <tensorflow.python.keras.callbacks.History object at 0x000002D5EEB33A58>
9/9 [==============================] - 0s 111us/sample - loss: 0.3494 - acc: 1.0000
0.3493695855140686
1.0
{'loss': [1.9400481118096247, 1.9131031301286485, 1.8887701034545898, 1.8645429876115587, 1.8397144211663141, 1.8146146138509114, 1.790362040201823, 1.7635038163926866, 1.738019519382053, 1.7106041378445096, 1.6815780798594158, 1.6533486578199599, 1.6220542324913874, 1.5921735631095038, 1.5585787031385634, 1.5243132379319932, 1.4885009394751654, 1.4503818882836237, 1.4130876064300537, 1.3727755811479356, 1.33229351705975, 1.2964043484793768, 1.2586202489005194, 1.222148597240448, 1.1895508302582636, 1.163921382692125, 1.135191798210144, 1.1074706978268094, 1.0763954520225525, 1.0448715951707628, 1.0154923730426364, 0.986448155509101, 0.9571943779786428, 0.928672366672092, 0.8998068372408549, 0.8722059064441257, 0.8472640779283311, 0.8227598534690009, 0.7991117570135329, 0.775075011783176, 0.7518985867500305, 0.7306937575340271, 0.7093305852678087, 0.6888882186677721, 0.6705233918295966, 0.6524957815806071, 0.6375229706366857, 0.621346480316586, 0.605832384692298, 0.5912250876426697, 0.5771131051911248, 0.5643890690472391, 0.5513267318407694, 0.5379634963141547, 0.5248583224084642, 0.5132274958822463, 0.5022740827666389, 0.49347909953859115, 0.48397721846898395, 0.4744487019876639, 0.46504460440741646, 0.45501554012298584, 0.445580811964141, 0.43587634298536515, 0.4274296164512634, 0.41850636940863395, 0.40884512000613743, 0.4005137516392602, 0.3922265138891008, 0.384219765663147, 0.37538956602414447, 0.3669780691464742, 0.3588370742897193, 0.3501388033231099, 0.34150120947096085], 'acc': [0.0, 0.16666667, 0.41666666, 0.41666666, 0.41666666, 0.4722222, 0.5833333, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.7222222, 0.75, 0.7777778, 0.7777778, 0.7777778, 0.8055556, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.8333333, 0.9166667, 0.9444444, 0.9444444, 0.9444444, 0.9444444], 'val_loss': [1.91283118724823, 1.8905224800109863, 1.868672251701355, 1.8469104766845703, 1.8246854543685913, 1.8022104501724243, 1.7804056406021118, 1.7584726810455322, 1.7346959114074707, 1.7102504968643188, 1.68538236618042, 1.6598676443099976, 1.6335015296936035, 1.605992078781128, 1.5779340267181396, 1.5484000444412231, 1.51779043674469, 1.4870189428329468, 1.4566420316696167, 1.4267817735671997, 1.39675772190094, 1.368319034576416, 1.3421012163162231, 1.319460391998291, 1.301853895187378, 1.2850168943405151, 1.2641124725341797, 1.2368507385253906, 1.205660104751587, 1.1749310493469238, 1.1434487104415894, 1.1123303174972534, 1.083031177520752, 1.0569008588790894, 1.0338445901870728, 1.0140511989593506, 0.9956666231155396, 0.9736596345901489, 0.9490630626678467, 0.9240715503692627, 0.8993575572967529, 0.8741347193717957, 0.849692165851593, 0.8264799118041992, 0.8048388957977295, 0.7843072414398193, 0.7650270462036133, 0.7468454241752625, 0.7303212285041809, 0.7144486308097839, 0.6977736949920654, 0.6805406212806702, 0.6628434658050537, 0.6440338492393494, 0.6258814334869385, 0.6086478233337402, 0.5928943753242493, 0.5775359272956848, 0.563126802444458, 0.5500351786613464, 0.5357029438018799, 0.5200693607330322, 0.5052092671394348, 0.49035966396331787, 0.47525227069854736, 0.46112674474716187, 0.4481382966041565, 0.43482768535614014, 0.4212915003299713, 0.4088452160358429, 0.3967774212360382, 0.3850354552268982, 0.37469351291656494, 0.3627440631389618, 0.3493695855140686], 'val_acc': [0.33333334, 0.44444445, 0.44444445, 0.33333334, 0.44444445, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.6666667, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.7777778, 0.8888889, 1.0, 1.0, 1.0, 1.0, 1.0]}
[0 4 5 5 0 2 0 5 3]
[0 4 5 5 0 2 0 5 3]
===================
Confusion matrix
===================
[[3 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 3]]
Process finished with exit code 0
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment