Commit 7a68846a authored by Nilan Meegoda's avatar Nilan Meegoda

Merge branch 'IT19170480' into 'master'

It19170480

See merge request !8
parents 901e4ac6 faed5136
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
import cv2
import numpy as np
from keras.models import load_model
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import numpy as np
import matplotlib.pyplot as plt
# model = load_model('model_handDrawnrnSinhalaLettersRecognition-24-05-22.h5')
model = load_model('model_handDrawnrnShapeRecognition-25-05-22.h5')
# image_path
img_path = "activity3.png"
# read image
img_raw = cv2.imread(img_path)
# select ROI function
roi = cv2.selectROI(img_raw)
# print rectangle points of selected roi
print(roi)
# Crop selected roi from raw image
roi_cropped = img_raw[int(roi[1]):int(roi[1]+roi[3]),
int(roi[0]):int(roi[0]+roi[2])]
# #show cropped image
# cv2.imshow("ROI", roi_cropped)
cv2.imwrite("crop.jpeg", roi_cropped)
# #hold window
# cv2.waitKey(0)
img = cv2.imread('crop.jpeg')
# cv2.imread() -> takes an image as an input
h, w, channels = img.shape
half = w//2
# this will be the first column
left_part = img[:, :half]
# [:,:half] means all the rows and
# all the columns upto index half
# this will be the second column
right_part = img[:, half:]
# [:,half:] means al the rows and all
# the columns from index half to the end
# cv2.imshow is used for displaying the image
# cv2.imshow('Left part', left_part)
# cv2.imshow('Right part', right_part)
# this is horizontal division
half2 = h//2
top = img[:half2, :]
bottom = img[half2:, :]
# cv2.imshow('Top', top)
# cv2.imshow('Bottom', bottom)
# saving all the images
# cv2.imwrite() function will save the image
# into your pc
# cv2.imwrite('top.jpg', top)
# cv2.imwrite('bottom.jpg', bottom)
cv2.imwrite('right.jpg', right_part)
cv2.imwrite('left.jpg', left_part)
def predict_digit():
imgArray = ["right.jpg", "left.jpg"]
for image in imgArray:
img = cv2.imread(image)
img_copy = img.copy()
img = cv2.resize(img, (400, 440))
img_copy = cv2.GaussianBlur(img, (7, 7), 0)
_, img_thresh = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY_INV)
img_final = cv2.resize(img_thresh, (28, 28))
plt.imshow(img_final, cmap='gray')
# plt.show()
img_final = np.reshape(img_final, (-1, 28, 28, 1))
img = img/255.0
word_dict = {0: 'circle', 1: 'square', 2: 'star', 3: 'triangle'}
img_pred = word_dict[np.argmax(model.predict(img_final))]
print(img_pred)
# return img_pred
predict_digit()
cv2.waitKey(0)
from os import listdir
from PIL import Image as PImage
import cv2
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
import numpy as np
import argparse
import glob
import skimage.io
import matplotlib.pyplot as plt
import skimage.filters
import random
from skimage.util import random_noise
imageList = []
ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--imageFolder", required=True,help="path to the input image")
ap.add_argument("-o", "--output", required=True,
help="path to output directory to store augmentation examples")
ap.add_argument("-p", "--prefix", type=str, default="image",
help="output filename prefix")
args = vars(ap.parse_args())
# /home/cyrrup/Desktop/outnew/*.jpg
print("[INFO] loading example image...")
def add_noise(img):
'''Add random noise to an image'''
# VARIABILITY = 10
# deviation = VARIABILITY*random.random()
# noise = np.random.normal(0, deviation, img.shape)
# img += noise
# np.clip(img, 0., 255.)
sigma = 2.5
blurred = skimage.filters.gaussian(
img, sigma=(sigma, sigma), truncate=2.5, multichannel=True)
# img = blurred
return blurred
# for filename in glob.glob('E:/data separated/y/*.png'):
for filename in glob.glob('G:/data augmengt/Data-aumentation-of-images-folder-using-keras-and-PIL-master/h/*.jpg'):
#print (filename)
#print("[INFO] loading example image...")
image = load_img(filename)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# construct the image generator for data augmentation then
# initialize the total number of images generated thus far
# aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
# height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
# horizontal_flip=True, fill_mode="nearest")
# aug = ImageDataGenerator(rotation_range=30, fill_mode='nearest')
aug = ImageDataGenerator(preprocessing_function=add_noise)
total = 0
# construct the actual Python generator
print("[INFO] generating images...")
imageGen = aug.flow(image, batch_size=1, save_to_dir=args["output"],
save_prefix=args["prefix"], save_format="jpg")
# loop over examples from our image data augmentation generator
for image in imageGen:
# increment our counter
total += 1
# if we have reached 10 examples, break from the loop
if total == 5:
break
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment