Commit 62c18b00 authored by M.A.S. Mudunkotuwa's avatar M.A.S. Mudunkotuwa

Update image_classifier.py, Static output.txt, image_classifier.ipynb,...

Update image_classifier.py, Static output.txt, image_classifier.ipynb, PredictionTest.py, counter.py, image_augmentor.py files
parent 10ce9ac5
import numpy
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.models import load_model
import keras, tensorflow
import numpy as np
nb_train_samples = 112
nb_validation_samples = 20
epochs = 10
batch_size = 2
img_width, img_height = 350, 350
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape = input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss ='sparse_categorical_crossentropy',
optimizer ='adam',
metrics =['accuracy'])
model.load_weights('model_saved.h5')
sess = keras.backend.get_session()
img = tensorflow.read_file('v_data/train/Adaraya/adaraya_2.jpg')
img = tensorflow.image.decode_jpeg(img, channels=3)
img.set_shape([None, None, 3])
img = tensorflow.image.resize_images(img, (350, 350))
img = img.eval(session=sess) # convert to numpy array
img = np.expand_dims(img, 0) # make 'batch' of 1
pred = model.predict(img)
# pred = labels["label_names"][np.argmax(pred)]
print(pred)
print(len(pred))
print(np.argmax(pred))
This diff is collapsed.
from multiprocessing.dummy import Lock
class Counter:
def __init__(self):
self.lock = Lock()
self._processed = 0
self._error = 0
self._skipped_no_match = 0
self._skipped_augmented = 0
def processed(self):
with self.lock:
self._processed += 1
def error(self):
with self.lock:
self._error += 1
def skipped_no_match(self):
with self.lock:
self._skipped_no_match += 1
def skipped_augmented(self):
with self.lock:
self._skipped_augmented += 1
def get(self):
with self.lock:
return {'processed' : self._processed, 'error' : self._error, 'skipped_no_match' : self._skipped_no_match, 'skipped_augmented' : self._skipped_augmented}
import sys, os, re, traceback
from os.path import isfile
from multiprocessing.dummy import Pool
from counter import Counter
from ops.rotate import Rotate
from ops.fliph import FlipH
from ops.flipv import FlipV
from ops.zoom import Zoom
from ops.blur import Blur
from ops.noise import Noise
from ops.translate import Translate
from skimage.io import imread, imsave
EXTENSIONS = ['png', 'jpg', 'jpeg', 'bmp']
WORKER_COUNT = max(os.cpu_count() - 1, 1)
OPERATIONS = [Rotate, FlipH, FlipV, Translate, Noise, Zoom, Blur]
'''
Augmented files will have names matching the regex below, eg
original__rot90__crop1__flipv.jpg
'''
AUGMENTED_FILE_REGEX = re.compile('^.*(__.+)+\\.[^\\.]+$')
EXTENSION_REGEX = re.compile('|'.join(['.*\\.' + n + '$' for n in EXTENSIONS]), re.IGNORECASE)
thread_pool = None
counter = None
def build_augmented_file_name(original_name, ops):
root, ext = os.path.splitext(original_name)
result = root
for op in ops:
result += '__' + op.code
return result + ext
def work(d, f, op_lists):
try:
in_path = os.path.join(d,f)
for op_list in op_lists:
out_file_name = build_augmented_file_name(f, op_list)
if isfile(os.path.join(d,out_file_name)):
continue
img = imread(in_path)
for op in op_list:
img = op.process(img)
imsave(os.path.join(d, out_file_name), img)
counter.processed()
except:
traceback.print_exc(file=sys.stdout)
def process(dir, file, op_lists):
thread_pool.apply_async(work, (dir, file, op_lists))
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: {} <image directory> <operation> (<operation> ...)'.format(sys.argv[0]))
sys.exit(1)
image_dir = sys.argv[1]
if not os.path.isdir(image_dir):
print('Invalid image directory: {}'.format(image_dir))
sys.exit(2)
op_codes = sys.argv[2:]
op_lists = []
for op_code_list in op_codes:
op_list = []
for op_code in op_code_list.split(','):
op = None
for op in OPERATIONS:
op = op.match_code(op_code)
if op:
op_list.append(op)
break
if not op:
print('Unknown operation {}'.format(op_code))
sys.exit(3)
op_lists.append(op_list)
counter = Counter()
thread_pool = Pool(WORKER_COUNT)
print('Thread pool initialised with {} worker{}'.format(WORKER_COUNT, '' if WORKER_COUNT == 1 else 's'))
matches = []
for dir_info in os.walk(image_dir):
dir_name, _, file_names = dir_info
print('Processing {}...'.format(dir_name))
for file_name in file_names:
if EXTENSION_REGEX.match(file_name):
if AUGMENTED_FILE_REGEX.match(file_name):
counter.skipped_augmented()
else:
process(dir_name, file_name, op_lists)
else:
counter.skipped_no_match()
print("Waiting for workers to complete...")
thread_pool.close()
thread_pool.join()
print(counter.get())
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"# Importing all necessary libraries \n",
"from keras.preprocessing.image import ImageDataGenerator \n",
"from keras.models import Sequential \n",
"from keras.layers import Conv2D, MaxPooling2D \n",
"from keras.layers import Activation, Dropout, Flatten, Dense \n",
"from keras import backend as K \n",
" \n",
"img_width, img_height = 224, 224"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# hyper params\n",
"train_data_dir = 'v_data/train'\n",
"validation_data_dir = 'v_data/test'\n",
"nb_train_samples =400 \n",
"nb_validation_samples = 100\n",
"epochs = 10\n",
"batch_size = 16"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"if K.image_data_format() == 'channels_first': \n",
" input_shape = (3, img_width, img_height) \n",
"else: \n",
" input_shape = (img_width, img_height, 3) "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model = Sequential() \n",
"model.add(Conv2D(32, (2, 2), input_shape=input_shape)) \n",
"model.add(Activation('relu')) \n",
"model.add(MaxPooling2D(pool_size=(2, 2))) \n",
" \n",
"model.add(Conv2D(32, (2, 2))) \n",
"model.add(Activation('relu')) \n",
"model.add(MaxPooling2D(pool_size=(2, 2))) \n",
" \n",
"model.add(Conv2D(64, (2, 2))) \n",
"model.add(Activation('relu')) \n",
"model.add(MaxPooling2D(pool_size=(2, 2))) \n",
" \n",
"model.add(Flatten()) \n",
"model.add(Dense(64)) \n",
"model.add(Activation('relu')) \n",
"model.add(Dropout(0.5)) \n",
"model.add(Dense(1)) \n",
"model.add(Activation('sigmoid')) "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss='binary_crossentropy', \n",
" optimizer='rmsprop', \n",
" metrics=['accuracy']) "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"train_datagen = ImageDataGenerator( \n",
" rescale=1. / 255, \n",
" shear_range=0.2, \n",
" zoom_range=0.2, \n",
" horizontal_flip=True) \n",
" \n",
"test_datagen = ImageDataGenerator(rescale=1. / 255) \n",
" \n",
"train_generator = train_datagen.flow_from_directory( \n",
" train_data_dir, \n",
" target_size=(img_width, img_height), \n",
" batch_size=batch_size, \n",
" class_mode='binary') \n",
" \n",
"validation_generator = test_datagen.flow_from_directory( \n",
" validation_data_dir, \n",
" target_size=(img_width, img_height), \n",
" batch_size=batch_size, \n",
" class_mode='binary') \n",
" \n",
"model.fit_generator( \n",
" train_generator, \n",
" steps_per_epoch=nb_train_samples // batch_size, \n",
" epochs=epochs, \n",
" validation_data=validation_generator, \n",
" validation_steps=nb_validation_samples // batch_size) "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model.save_weights('model_saved.h5')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
# importing libraries
import keras, tensorflow
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
img_width, img_height = 350, 350
train_data_dir = 'v_data/train'
validation_data_dir = 'v_data/test'
nb_train_samples = 448
nb_validation_samples = 20
epochs = 20
batch_size = 20
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape = input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss ='sparse_categorical_crossentropy',
optimizer ='adam',
metrics =['accuracy'])
train_datagen = ImageDataGenerator(
rescale = 1. / 255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1. / 255)
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size =(img_width, img_height),
batch_size = batch_size, class_mode ='sparse')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size =(img_width, img_height),
batch_size = batch_size, class_mode ='sparse')
model.fit_generator(train_generator,
steps_per_epoch = nb_train_samples // batch_size,
epochs = epochs, validation_data = validation_generator,
validation_steps = nb_validation_samples // batch_size)
model.save_weights('model_saved.h5')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment