Commit 2aa2c26c authored by Samesh Buddhika Alahakoon's avatar Samesh Buddhika Alahakoon

Merge branch 'dinushaCombined' into 'master'

Dinusha combined

See merge request !43
parents 463c836a 6598ae90
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyInterpreterInspection" enabled="false" level="WARNING" enabled_by_default="false" />
</profile>
</component>
\ No newline at end of file
import cv2
import os
import mrcnn.model as modellib
from mrcnn.config import Config
import numpy as np
def Blur():
input = 'static/theimage.jpg'
image = cv2.imread(input)
intent = 15
dimensions = image.shape
# blurredImage = gaussianBlur(image, intent)
blurredImage = cv2.GaussianBlur(image, (intent, intent), 0)
def convolution(inputimage, kernel):
kernelHeight = kernel.shape[0]
kernelWidth = kernel.shape[1]
if(len(inputimage.shape) == 3):
imagePadding = np.pad(inputimage, paddingWidth=((kernelHeight // 2, kernelHeight // 2),
(kernelWidth // 2, kernelWidth // 2),
(0, 0)),
mode='constant', valueConstant=0).astype(np.float32)
elif(len(inputimage.shape) == 2):
imagePadding = np.pad(inputimage, paddingWidth=((kernelHeight // 2, kernelHeight // 2),
(kernelWidth // 2, kernelWidth // 2)),
mode='constant', valueConstant=0).astype(np.float32)
height = kernelHeight // 2
width = kernelWidth // 2
imageConvolution = np.zeros(imagePadding.shape)
for i in range(height, imagePadding.shape[0]-height):
for j in range(width, imagePadding.shape[1]-width):
x = imagePadding[i-height:i-height+kernelHeight, j-width:j-width+kernelWidth]
x = x.flatten()*kernel.flatten()
imageConvolution[i][j] = x.sum()
heightmax = -height
widthmax = -width
if(height == 0):
return imageConvolution[height:, width:widthmax]
if(width == 0):
return imageConvolution[height:heightmax, width:]
return imageConvolution[height:heightmax, width:widthmax]
def gaussianBlur(image, sigma):
image = np.asarray(image)
filter = 2 * int(4 * sigma + 0.5) + 1
gaussianFilter = np.zeros((filter, filter), np.float32)
f1 = filter//2
f2 = filter//2
for x in range(-f1, f1+1):
for y in range(-f2, f2+1):
x1 = 2 * np.pi * (sigma ** 2)
x2 = np.exp(-(x ** 2 + y ** 2)/(2 * sigma ** 2))
gaussianFilter[x + f1, y + f2] = (1/x1) * x2
filterImage = np.zeros_like(image, dtype=np.float32)
for c in range(3):
filterImage[:, :, c] = convolution(image[:, :, c], gaussianFilter)
return (filterImage.astype(np.uint8))
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBlur(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
image[:, :, 0]
)
blurredImage[:, :, 1] = np.where(
mask == 0,
blurredImage[:, :, 1],
image[:, :, 1]
)
blurredImage[:, :, 2] = np.where(
mask == 0,
blurredImage[:, :, 2],
image[:, :, 2]
)
return blurredImage
def applyBlur(image, boxes, masks, class_ids, classes, scores, viewMask=True):
instances = boxes.shape[0]
if not instances:
print("No instances available!!!")
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if viewMask:
maskedImage = applyBlur(maskedImage, mask)
return maskedImage
detectionResults = model.detect([image], verbose=0)
r = detectionResults[0]
output = applyBlur(image, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
# output = cv2.resize(np.float32(output), (466, 700))
cv2.imwrite('static/blur.jpg', output)
return "blurdone"
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
def cutImage():
# Input the original image name
# original_image = 'samples/ex7.jpg'
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
height, width, channels = image.shape
print("Read original image successfully! The original image shape is:")
print(image.shape)
dimensions = (width, height)
# blur = cv2.GaussianBlur(image ,(intent,intent),0)
bg = cv2.imread('static/bg.jpg')
bg = cv2.resize(bg, dimensions, interpolation = cv2.INTER_AREA)
# blur1 = cv2.GaussianBlur(image ,(intent,intent),0)
# cv2.imwrite('original_image.jpg', bg)
# Load the pre-trained model data
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def apply_mask(image, mask):
bg[:, :, 0] = np.where(
mask == 0,
bg[:, :, 0],
image[:, :, 0]
)
bg[:, :, 1] = np.where(
mask == 0,
bg[:, :, 1],
image[:, :, 1]
)
bg[:, :, 2] = np.where(
mask == 0,
bg[:, :, 2],
image[:, :, 2]
)
return bg
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
# max_area will save the largest object for all the detection results
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2"""
results = model.detect([image], verbose=0)
r = results[0]
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
## save image
cv2.imwrite("static/cut.jpg", frame1)
return "done"
import cv2
import numpy as np
import os
import sys
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
import PIL
from PIL import Image
from matplotlib.pyplot import imread
import scipy as sc
from scipy import ndimage
from skimage import filters
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import math
from skimage.color import rgb2gray
rootLocation = os.path.join("D:/research -app/2021-129/Backend/")
modelLocation = os.path.join(rootLocation, "logs/")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode = "inference", model_dir = modelLocation, config = config)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def medBlur():
def apply_mask(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
image[:, :, 0]
)
blurredImage[:, :, 1] = np.where(
mask == 0,
blurredImage[:, :, 1],
image[:, :, 1]
)
blurredImage[:, :, 2] = np.where(
mask == 0,
blurredImage[:, :, 2],
image[:, :, 2]
)
return blurredImage
def applyBlur(image, boxes, masks, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
intent= 15
blurredImage = cv2.medianBlur(image, intent)
results = model.detect([image], verbose=0)
r = results[0]
output = applyBlur(image, r['rois'], r['masks'])
#output = cv2.resize(output, (466, 700))
cv2.imwrite('static/medblur.jpg', output)
return "blurdone"
\ No newline at end of file
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def Splash():
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
weights_path = WEIGHTS_PATH
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top'
]
def apply_mask(image, mask):
image[:, :, 0] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 0]
)
image[:, :, 1] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 1]
)
image[:, :, 2] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 2]
)
return image
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
image = apply_mask(image, mask)
return image"""
results = model.detect([image], verbose=0)
r = results[0]
frame = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame = cv2.resize(np.float32(frame), (466, 700))
cv2.imwrite('static/splash.jpg', frame)
return "splashdone"
import numpy as np
import cv2
def vignette():
input_image = 'static/theimage.jpg'
input_image = cv2.imread(input_image)
rows, cols = input_image.shape[:2]
X_resultant_kernel = cv2.getGaussianKernel(cols,200)
Y_resultant_kernel = cv2.getGaussianKernel(rows,200)
resultant_kernel = Y_resultant_kernel * X_resultant_kernel.T
mask = 255 * resultant_kernel / np.linalg.norm(resultant_kernel)
output = np.copy(input_image)
for i in range(3):
output[:, :, i] = output[:,:,i] * mask
cv2.imwrite('static/vignette.jpg', output)
return "vignettedone"
\ No newline at end of file
from flask import Flask, request
from flask_cors import CORS, cross_origin
from Cut import cutImage
from Splash import Splash
from Blur import Blur
from maskImage import mask
from bokeh import Bokeh
from MedBlur import medBlur
from Vignette import vignette
import os
import tensorflow as tf
from flask_ngrok import run_with_ngrok
import base64
app = Flask(__name__)
run_with_ngrok(app)
cors = CORS(app)
app.config["CACHE_TYPE"] = "null"
app.config['CORS_HEADERS'] = '*'
app.config["IMAGE_UPLOADS"] = "./"
graph = tf.get_default_graph()
def convert_and_save(b64_string):
with open("static/theimage.jpg", "wb") as fh:
fh.write(base64.decodebytes(b64_string.encode()))
@app.route("/")
def hello():
return "Checking Connectivity..."
@app.route('/upload', methods = ['POST'])
@cross_origin()
def upload_file():
if request.method == 'POST':
image = request.form.get('image')
convert_and_save(image)
return "done"
# @app.route('/upload', methods = ['POST'])
# @cross_origin()
# def upload_file():
# if request.method == 'POST':
# if request.files:
# image = request.files["image"]
# image.save(os.path.join(app.config["IMAGE_UPLOADS"], "static/theimage.jpg"))
# global graph
# with graph.as_default():
# mask()
# return "done"
@app.route('/uploadbg', methods = ['POST'])
@cross_origin()
def upload_bg():
if request.method == 'POST':
if request.files:
image = request.files["image"]
image.save(os.path.join(app.config["IMAGE_UPLOADS"], "static/bg.jpg"))
return "done"
@app.route('/cut', methods = ['GET'])
@cross_origin()
def cutGet():
global graph
with graph.as_default():
cutImage()
return "done"
@app.route('/splash', methods = ['GET'])
@cross_origin()
def splashGet():
global graph
with graph.as_default():
Splash()
return "done"
@app.route('/blur', methods = ['GET'])
@cross_origin()
def blurGet():
global graph
with graph.as_default():
Blur()
return "done"
@app.route('/bokeh', methods = ['GET'])
@cross_origin()
def bokehGet():
global graph
with graph.as_default():
Bokeh()
return "done"
@app.route('/vignette', methods = ['GET'])
@cross_origin()
def vignetteGet():
global graph
with graph.as_default():
vignette()
return "done"
@app.route('/medblur', methods = ['GET'])
@cross_origin()
def medblurGet():
global graph
with graph.as_default():
medBlur()
return "done"
if __name__ == '__main__':
app.run()
\ No newline at end of file
import cv2
import os
import mrcnn.model as modellib
from mrcnn.config import Config
import matplotlib.pyplot as plt
import numpy as np
def Bokeh():
input = 'static/theimage.jpg'
image = cv2.imread(input)
plt.rcParams["figure.figsize"]= (10,10)
np.set_printoptions(precision=3)
Array = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],], dtype='float')
binaryMask = Array
gaussianKernel = cv2.getGaussianKernel(11, 5.)
convoKernel = gaussianKernel*gaussianKernel.transpose()*binaryMask
kernel = convoKernel / np.sum(convoKernel)
print(kernel)
def bokeh(image):
r,g,b = cv2.split(image)
r = r / 255.
r = np.where(r > 0.9, r * 2, r)
filteredR = cv2.filter2D(r, -1, kernel)
filteredR = np.where(filteredR > 1., 1., filteredR)
g = g / 255.
g = np.where(g > 0.9, g * 2, g)
filteredG = cv2.filter2D(g, -1, kernel)
filteredG = np.where(filteredG > 1., 1., filteredG)
b = b / 255.
b = np.where(b > 0.9, b * 2, b)
filteredB = cv2.filter2D(b, -1, kernel)
filteredB = np.where(filteredB > 1., 1., filteredB)
result = cv2.merge((filteredR, filteredG, filteredB))
return result
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imageRGB = imageRGB / 255.
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBokeh(imageInput, boxes, masks, class_ids, classes, scores, show_mask=True):
instances = boxes.shape[0]
if not instances:
print("No instances available!!!")
for i in range(instances):
mask = masks[:, :, i]
invertImage = np.abs(1. - mask)
r,g,b = cv2.split(imageInput)
maskedR = r * mask
maskedG = g * mask
maskedB = b * mask
mergedImage = cv2.merge((maskedR, maskedG, maskedB))
invertR = r * invertImage
invertG = g * invertImage
invertB = b * invertImage
backgroundMerged = cv2.merge((invertR, invertG, invertB))
mergedImage = np.asarray(mergedImage * 255., dtype='uint8')
backgroundMerged = np.asarray(backgroundMerged * 255., dtype='uint8')
backgroundBokeh = bokeh(backgroundMerged)
backgroundBokeh = np.asarray(backgroundBokeh * 255., dtype='uint8')
maskedImage = cv2.add(mergedImage, backgroundBokeh)
maskedImage = cv2.cvtColor(maskedImage, cv2.COLOR_BGR2RGB)
return maskedImage
detectionResults = model.detect([image], verbose=0)
r = detectionResults[0]
output = applyBokeh(imageRGB, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
cv2.imwrite('static/bokeh.jpg', output)
return "bokehdone"
\ No newline at end of file
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
def mask():
path_to_new_image = 'static/theimage.jpg'
#image1 = mpimg.imread(path_to_new_image)
image1 = cv2.imread(path_to_new_image)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.5
IMAGES_PER_GPU = 1
config = InferenceConfig()
#config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
weights_path = WEIGHTS_PATH
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
visualize.display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
return True
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
import colorsys
from skimage.measure import find_contours
from matplotlib.patches import Polygon
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
#import custom
# Root directory of the project
ROOT_DIR = ""
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = "mask_rcnn_object_0010.h5"
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
config = InferenceConfig()
config.display()
#LOAD MODEL. Create model in inference mode
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load COCO weights Or, load the last model you trained
weights_path = WEIGHTS_PATH
# Load weights
# print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait','portrait_body', 'selfie_top']
# path = sys.argv[1]
#path_to_new_image = 'D:/research/maskRcnn/images/portrait demo 1.jpg'
path_to_new_image = path.split("/")[-1]
image1 = mpimg.imread(path_to_new_image)
plt.imshow(image1)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
# print(len([image1]))
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
\ No newline at end of file
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet101"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
This source diff could not be displayed because it is too large. You can view the blob instead.
"""
Mask R-CNN
Multi-GPU Support for Keras.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
Ideas and a small code snippets from these sources:
https://github.com/fchollet/keras/issues/2436
https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012
https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/
https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py
"""
import tensorflow as tf
import keras.backend as K
import keras.layers as KL
import keras.models as KM
class ParallelModel(KM.Model):
"""Subclasses the standard Keras Model and adds multi-GPU support.
It works by creating a copy of the model on each GPU. Then it slices
the inputs and sends a slice to each copy of the model, and then
merges the outputs together and applies the loss on the combined
outputs.
"""
def __init__(self, keras_model, gpu_count):
"""Class constructor.
keras_model: The Keras model to parallelize
gpu_count: Number of GPUs. Must be > 1
"""
self.inner_model = keras_model
self.gpu_count = gpu_count
merged_outputs = self.make_parallel()
super(ParallelModel, self).__init__(inputs=self.inner_model.inputs,
outputs=merged_outputs)
def __getattribute__(self, attrname):
"""Redirect loading and saving methods to the inner model. That's where
the weights are stored."""
if 'load' in attrname or 'save' in attrname:
return getattr(self.inner_model, attrname)
return super(ParallelModel, self).__getattribute__(attrname)
def summary(self, *args, **kwargs):
"""Override summary() to display summaries of both, the wrapper
and inner models."""
super(ParallelModel, self).summary(*args, **kwargs)
self.inner_model.summary(*args, **kwargs)
def make_parallel(self):
"""Creates a new wrapper model that consists of multiple replicas of
the original model placed on different GPUs.
"""
# Slice inputs. Slice inputs on the CPU to avoid sending a copy
# of the full inputs to all GPUs. Saves on bandwidth and memory.
input_slices = {name: tf.split(x, self.gpu_count)
for name, x in zip(self.inner_model.input_names,
self.inner_model.inputs)}
output_names = self.inner_model.output_names
outputs_all = []
for i in range(len(self.inner_model.outputs)):
outputs_all.append([])
# Run the model call() on each GPU to place the ops there
for i in range(self.gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
# Run a slice of inputs through this replica
zipped_inputs = zip(self.inner_model.input_names,
self.inner_model.inputs)
inputs = [
KL.Lambda(lambda s: input_slices[name][i],
output_shape=lambda s: (None,) + s[1:])(tensor)
for name, tensor in zipped_inputs]
# Create the model replica and get the outputs
outputs = self.inner_model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later
for l, o in enumerate(outputs):
outputs_all[l].append(o)
# Merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs, name in zip(outputs_all, output_names):
# Concatenate or average outputs?
# Outputs usually have a batch dimension and we concatenate
# across it. If they don't, then the output is likely a loss
# or a metric value that gets averaged across the batch.
# Keras expects losses and metrics to be scalars.
if K.int_shape(outputs[0]) == ():
# Average
m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)
else:
# Concatenate
m = KL.Concatenate(axis=0, name=name)(outputs)
merged.append(m)
return merged
if __name__ == "__main__":
# Testing code below. It creates a simple model to train on MNIST and
# tries to run it on 2 GPUs. It saves the graph so it can be viewed
# in TensorBoard. Run it as:
#
# python3 parallel_model.py
import os
import numpy as np
import keras.optimizers
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
GPU_COUNT = 2
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
def build_model(x_train, num_classes):
# Reset default graph. Keras leaves old ops in the graph,
# which are ignored for execution but clutter graph
# visualization in TensorBoard.
tf.reset_default_graph()
inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
name="conv1")(inputs)
x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
name="conv2")(x)
x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
x = KL.Flatten(name="flat1")(x)
x = KL.Dense(128, activation='relu', name="dense1")(x)
x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)
return KM.Model(inputs, x, "digit_classifier_model")
# Load MNIST Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype('float32') / 255
x_test = np.expand_dims(x_test, -1).astype('float32') / 255
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Build data generator and model
datagen = ImageDataGenerator()
model = build_model(x_train, 10)
# Add multi-GPU support.
model = ParallelModel(model, GPU_COUNT)
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
# Train
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=50, epochs=10, verbose=1,
validation_data=(x_test, y_test),
callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR,
write_graph=True)]
)
"""
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import sys
import os
import logging
import math
import random
import numpy as np
import tensorflow as tf
import scipy
import skimage.color
import skimage.io
import skimage.transform
import urllib.request
import shutil
import warnings
from distutils.version import LooseVersion
# URL from which to download the latest COCO trained weights
COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5"
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return np.zeros((masks1.shape[-1], masks2.shape[-1]))
# flatten masks and compute their areas
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add 1 to get
# indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.log(gt_height / height)
dw = tf.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's available online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids
def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"):
"""Resizes an image keeping the aspect ratio unchanged.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
min_scale: if provided, ensure that the image is scaled up by at least
this percent even if min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the image unchanged.
square: Resize and pad with zeros to get a square image
of size [max_dim, max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If min_dim or min_scale are provided, it scales the image up
before padding. max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the image. First, scales the image based
on min_dim and min_scale, then picks a random crop of
size min_dim x min_dim. Can be used in training only.
max_dim is not used in this mode.
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of image dtype and return results in the same dtype
image_dtype = image.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
padding = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return image, window, scale, padding, crop
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
if min_scale and scale < min_scale:
scale = min_scale
# Does it exceed max dim?
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image using bilinear interpolation
if scale != 1:
image = resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padding or cropping?
if mode == "square":
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
# Both sides must be divisible by 64
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def resize_mask(mask, scale, padding, crop=None):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the mask, are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mini_mask[:, :, i]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
return mask
# TODO: Build and use this function to reduce code duplication
def mold_mask(mask, config):
pass
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack(
[box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,
anchor_stride):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
Returns:
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
"""
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = []
for i in range(len(scales)):
anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],
feature_strides[i], anchor_stride))
return np.concatenate(anchors, axis=0)
############################################################
# Miscellaneous
############################################################
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps_masks(pred_masks, gt_masks)
# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[i])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] > -1:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = i
pred_match[i] = j
break
return gt_match, pred_match, overlaps
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps = compute_matches(
gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold)
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
def compute_ap_range(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_thresholds=None, verbose=1):
"""Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
# Default is 0.5 to 0.95 with increments of 0.05
iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
# Compute AP over range of IoU thresholds
AP = []
for iou_threshold in iou_thresholds:
ap, precisions, recalls, overlaps =\
compute_ap(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold)
if verbose:
print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap))
AP.append(ap)
AP = np.array(AP).mean()
if verbose:
print("AP @{:.2f}-{:.2f}:\t {:.3f}".format(
iou_thresholds[0], iou_thresholds[-1], AP))
return AP
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
# ## Batch Slicing
# Some custom layers support a batch size of 1 only, and require a lot of work
# to support batches greater than 1. This function slices an input tensor
# across the batch dimension and feeds batches of size 1. Effectively,
# an easy way to support batches > 1 quickly with little code modification.
# In the long run, it's more efficient to modify the code to support large
# batches and getting rid of this function. Consider this a temporary solution
def batch_slice(inputs, graph_fn, batch_size, names=None):
"""Splits inputs into slices and feeds each slice to a copy of the given
computation graph and then combines the results. It allows you to run a
graph on a batch of inputs even if the graph is written to support one
instance only.
inputs: list of tensors. All must have the same first dimension length
graph_fn: A function that returns a TF tensor that's part of a graph.
batch_size: number of slices to divide the data into.
names: If provided, assigns names to the resulting tensors.
"""
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for i in range(batch_size):
inputs_slice = [x[i] for x in inputs]
output_slice = graph_fn(*inputs_slice)
if not isinstance(output_slice, (tuple, list)):
output_slice = [output_slice]
outputs.append(output_slice)
# Change outputs from a list of slices where each is
# a list of outputs to a list of outputs and each has
# a list of slices
outputs = list(zip(*outputs))
if names is None:
names = [None] * len(outputs)
result = [tf.stack(o, axis=0, name=n)
for o, n in zip(outputs, names)]
if len(result) == 1:
result = result[0]
return result
def download_trained_weights(coco_model_path, verbose=1):
"""Download COCO trained weights from Releases.
coco_model_path: local path of COCO trained weights
"""
if verbose > 0:
print("Downloading pretrained model to " + coco_model_path + " ...")
with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
def norm_boxes(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range)
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import random
import itertools
import colorsys
import cv2
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
############################################################
# Visualization
############################################################
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None):
"""Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interpolation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
i = 1
for image, title in zip(images, titles):
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
plt.imshow(image.astype(np.uint8), cmap=cmap,
norm=norm, interpolation=interpolation)
i += 1
# plt.show()
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
cv2.imwrite('static/masked.jpg', masked_image.astype(np.uint8))
# if auto_show:
# plt.show()
def display_differences(image,
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
class_names, title="", ax=None,
show_mask=True, show_box=True,
iou_threshold=0.5, score_threshold=0.5):
"""Display ground truth and prediction instances on the same image."""
# Match predictions to ground truth
gt_match, pred_match, overlaps = utils.compute_matches(
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold, score_threshold=score_threshold)
# Ground truth = green. Predictions = red
colors = [(0, 1, 0, .8)] * len(gt_match)\
+ [(1, 0, 0, 1)] * len(pred_match)
# Concatenate GT and predictions
class_ids = np.concatenate([gt_class_id, pred_class_id])
scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])
boxes = np.concatenate([gt_box, pred_box])
masks = np.concatenate([gt_mask, pred_mask], axis=-1)
# Captions per instance show score/IoU
captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format(
pred_score[i],
(overlaps[i, int(pred_match[i])]
if pred_match[i] > -1 else overlaps[i].max()))
for i in range(len(pred_match))]
# Set title if not provided
title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU"
# Display
display_instances(
image,
boxes, masks, class_ids,
class_names, scores, ax=ax,
show_bbox=show_box, show_mask=show_mask,
colors=colors, captions=captions,
title=title)
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
ids = np.arange(rois.shape[0], dtype=np.int32)
ids = np.random.choice(
ids, limit, replace=False) if ids.shape[0] > limit else ids
fig, ax = plt.subplots(1, figsize=(12, 12))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
ax.axis('off')
for i, id in enumerate(ids):
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
m = utils.unmold_mask(mask[id], rois[id]
[:4].astype(np.int32), image.shape)
masked_image = apply_mask(masked_image, m, color)
# ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, np.where(class_ids == class_id)[0]]
m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)
to_display.append(m)
titles.append(class_names[class_id] if class_id != -1 else "-")
display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r")
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
overlaps, class_names, threshold=0.5):
"""Draw a grid showing how ground truth objects are classified.
gt_class_ids: [N] int. Ground truth class IDs
pred_class_id: [N] int. Predicted class IDs
pred_scores: [N] float. The probability scores of predicted classes
overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.
class_names: list of all class names in the dataset
threshold: Float. The prediction probability required to predict a class
"""
gt_class_ids = gt_class_ids[gt_class_ids != 0]
pred_class_ids = pred_class_ids[pred_class_ids != 0]
plt.figure(figsize=(12, 10))
plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
plt.yticks(np.arange(len(pred_class_ids)),
["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
for i, id in enumerate(pred_class_ids)])
plt.xticks(np.arange(len(gt_class_ids)),
[class_names[int(id)] for id in gt_class_ids], rotation=90)
thresh = overlaps.max() / 2.
for i, j in itertools.product(range(overlaps.shape[0]),
range(overlaps.shape[1])):
text = ""
if overlaps[i, j] > threshold:
text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
color = ("white" if overlaps[i, j] > thresh
else "black" if overlaps[i, j] > 0
else "grey")
plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
horizontalalignment="center", verticalalignment="center",
fontsize=9, color=color)
plt.tight_layout()
plt.xlabel("Ground Truth")
plt.ylabel("Predictions")
def draw_boxes(image, boxes=None, refined_boxes=None,
masks=None, captions=None, visibilities=None,
title="", ax=None):
"""Draw bounding boxes and segmentation masks with different
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
masks: [N, height, width]
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominent each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
"""
# Number of boxes
assert boxes is not None or refined_boxes is not None
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(12, 12))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
# Masks
if masks is not None:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
def display_table(table):
"""Display values in a table format.
table: an iterable of rows, and each row is an iterable of values.
"""
html = ""
for row in table:
row_html = ""
for col in row:
row_html += "<td>{:40}</td>".format(str(col))
html += "<tr>" + row_html + "</tr>"
html = "<table>" + html + "</table>"
IPython.display.display(IPython.display.HTML(html))
def display_weight_stats(model):
"""Scans all the weights in the model and returns a list of tuples
that contain stats about each weight.
"""
layers = model.get_trainable_layers()
table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
for l in layers:
weight_values = l.get_weights() # list of Numpy arrays
weight_tensors = l.weights # list of TF tensors
for i, w in enumerate(weight_values):
weight_name = weight_tensors[i].name
# Detect problematic layers. Exclude biases of conv layers.
alert = ""
if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
alert += "<span style='color:red'>*** dead?</span>"
if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
alert += "<span style='color:red'>*** Overflow?</span>"
# Add row
table.append([
weight_name + alert,
str(w.shape),
"{:+9.4f}".format(w.min()),
"{:+10.4f}".format(w.max()),
"{:+9.4f}".format(w.std()),
])
display_table(table)
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
MODEL_DIR = os.path.join("logs")
COCO_MODEL_PATH = 'D:/chrome/Y4S2/research/New folder (3)/Dinusha-IT18118346/Backend/mask_rcnn_object_0010.h5'
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
# Change the config infermation
class InferenceConfig(config.__class__):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
"""def apply_mask2(image, mask):
blur1[:, :, 0] = np.where(
mask == 0,
image[:, :, 0],
blur1[:, :,0]
)
blur1[:, :, 1] = np.where(
mask == 0,
image[:, :, 1],
blur1[:, :,1]
)
blur1[:, :, 2] = np.where(
mask == 0,
image[:, :, 2],
blur1[:, :,2]
)
return blur1 """
def Blur():
# Input the original image name
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
intent= 15
dimensions = image.shape
blur = cv2.GaussianBlur(image ,(intent,intent),0)
def apply_mask(image, mask):
blur[:, :, 0] = np.where(
mask == 0,
blur[:, :, 0],
image[:, :, 0]
)
blur[:, :, 1] = np.where(
mask == 0,
blur[:, :, 1],
image[:, :, 1]
)
blur[:, :, 2] = np.where(
mask == 0,
blur[:, :, 2],
image[:, :, 2]
)
return blur
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2
## run fast rcnn model
results = model.detect([image], verbose=0)
r = results[0]
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame1 = cv2.resize(frame1, (466, 700))
cv2.imwrite('static/blur.jpg', frame1)
return "blurdone"
......@@ -103,4 +103,12 @@ dependencies {
testImplementation 'junit:junit:4.+'
androidTestImplementation 'androidx.test.ext:junit:1.1.2'
androidTestImplementation 'androidx.test.espresso:espresso-core:3.3.0'
// retrofit
implementation 'com.squareup.retrofit2:retrofit:2.1.0'
implementation 'com.squareup.retrofit2:converter-gson:2.1.0'
implementation("com.squareup.okhttp3:okhttp:4.9.2")
implementation 'com.squareup.picasso:picasso:2.71828'
}
\ No newline at end of file
......@@ -61,6 +61,17 @@
android:label="Save"
android:screenOrientation="portrait" />
<activity android:name=".ImageProcessing.Backgroundupload"/>
<activity android:name=".ImageProcessing.Splashedit" />
<activity android:name=".ImageProcessing.Cut" />
<activity android:name=".ImageProcessing.Blur" />
<activity android:name=".ImageProcessing.Selectedit" />
<activity android:name=".ImageProcessing.MedBlur"/>
<activity android:name=".ImageProcessing.BackgroundCustomization"/>
<activity android:name=".ImageProcessing.Bokeh"/>
<activity android:name=".ImageProcessing.Vignette"/>
<meta-data
android:name="com.google.mlkit.vision.DEPENDENCIES"
android:value="face" />
......@@ -74,4 +85,4 @@
</receiver>
</application>
</manifest>
\ No newline at end of file
</manifest>
package com.app.smartphotoeditor.ImageProcessing;
import okhttp3.MultipartBody;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.http.GET;
import retrofit2.http.Multipart;
import retrofit2.http.POST;
import retrofit2.http.Part;
public interface Api {
String baseurl="http://10.0.2.15:5000";
@Multipart
@POST("upload/")
Call<ResponseBody> uploadImage( @Part MultipartBody.Part file);
@Multipart
@POST("uploadbg/")
Call<ResponseBody> uploadBgImage(@Part MultipartBody.Part file);
@GET("/blur")
Call<ResponseBody> blur();
@GET("/splash")
Call<ResponseBody> splash();
@GET("/cut")
Call<ResponseBody> cut();
@GET("/bokeh")
Call<ResponseBody> bokeh();
@GET("/vignette")
Call<ResponseBody> vignette();
@GET("/medblur")
Call<ResponseBody> medblur();
}
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;
import android.Manifest;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.Bundle;
import android.provider.MediaStore;
import android.util.Base64;
import android.util.Log;
import android.view.View;
import android.webkit.MimeTypeMap;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.Toast;
import android.util.Log;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.config.ImageList;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.util.concurrent.TimeUnit;
import okhttp3.FormBody;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
public class BackgroundCustomization extends AppCompatActivity
{
private Button btnedit,btnselectimage;
//private static final String UPLOAD_IMAGE_URL = "http://10.0.2.2:5000/upload";
private static final String UPLOAD_IMAGE_URL = "http://2989-112-134-170-172.ngrok.io/upload";
ImageView image;
String f_path,filePath,fileExtn;
String f_extension;
ProgressDialog progress;
Bitmap inputimage;
private static final int REQUEST_EXTERNAL_STORAGE = 1;
private static String[] PERMISSIONS_STORAGE = {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE
};
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_background_customization);
btnedit = (Button) findViewById(R.id.btnedit);
image = findViewById(R.id.img);
//displayFileChoose();
btnedit.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v) {
// if(filePath==null)
// {
//Toast.makeText(BackgroundCustomization.this,"Select An Image", Toast.LENGTH_LONG).show();
//}else{
//****
progress = new ProgressDialog(BackgroundCustomization.this);
progress.setTitle("Uploading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
f_path = filePath;
f_extension = fileExtn;
try
{
//if(fileExtn.equals("img") || fileExtn.equals("jpg") || fileExtn.equals("jpeg") || fileExtn.equals("gif") || fileExtn.equals("png")) {
//Toast.makeText(BackgroundCustomization.this, filePath, Toast.LENGTH_SHORT).show();
Thread t = new Thread(new Runnable()
{
@Override
public void run()
{
//To upload the image to server
String input = convert(ImageList.getInstance().getCurrentBitmap());
System.out.println(input);
//File file = new File(f_path);
//String content_type = MimeTypeMap.getSingleton().getMimeTypeFromExtension(f_extension);
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(320, TimeUnit.SECONDS)
.readTimeout(320, TimeUnit.SECONDS)
.writeTimeout(320, TimeUnit.SECONDS)
.build();
//RequestBody file_body = RequestBody.create(MediaType.parse(content_type), file);
RequestBody request_body = new FormBody.Builder()
.add("image", input)
//.addFormDataPart("image", f_path.substring(f_path.lastIndexOf("/") +1), file_body)
.build();
Request request = new Request.Builder()
.url(UPLOAD_IMAGE_URL)
.post(request_body)
.build();
try
{
//Request executed
okHttpClient.newCall(request).execute();
progress.dismiss();
openEditActivity();
}catch (Exception e)
{
e.printStackTrace();
progress.dismiss();
}
}
});
t.start();
//}else
//{
//}
}catch (Exception e)
{
e.printStackTrace();
}//******
//}
}
});
btnselectimage=(Button) findViewById(R.id.selectimage);
btnselectimage.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
verifyStoragePermissions(BackgroundCustomization.this);
displayFileChoose();
}
});
}
public void displayFileChoose()
{
Intent pickPhoto = new Intent(Intent.ACTION_PICK, MediaStore.Images.Media.EXTERNAL_CONTENT_URI);
pickPhoto.setType("image/*");
startActivityForResult(pickPhoto,1);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent ImageReturnedIntent)
{
super.onActivityResult(requestCode, resultCode, ImageReturnedIntent);
if(requestCode==1){
Uri selectedImage = ImageReturnedIntent.getData();
filePath = getPath(selectedImage);
fileExtn = filePath.substring(filePath.lastIndexOf(".")+1);
image.setImageURI(selectedImage);
}
}
public String getPath(Uri uri)
{
String[] projection = {MediaStore.MediaColumns.DATA};
//store query result in cursor variable
Cursor cursor = getContentResolver().query(uri,projection,null,null,null);
int column_index = cursor.getColumnIndexOrThrow(MediaStore.MediaColumns.DATA);
cursor.moveToFirst();
String imagePath = cursor.getString(column_index);
//Test Output
Log.d("Image Path : " , imagePath);
//return string
return cursor.getString(column_index);
}
public static void verifyStoragePermissions(Activity activity)
{
int permission = ActivityCompat.checkSelfPermission(activity, Manifest.permission.WRITE_EXTERNAL_STORAGE);
if (permission != PackageManager.PERMISSION_GRANTED)
{
ActivityCompat.requestPermissions(
activity,
PERMISSIONS_STORAGE,
REQUEST_EXTERNAL_STORAGE
);
}
}
public void openEditActivity()
{
Intent intent = new Intent(this, Selectedit.class);
startActivity(intent);
}
public static Bitmap convert(String base64Str) throws IllegalArgumentException
{
byte[] decodedBytes = Base64.decode( base64Str.substring(base64Str.indexOf(",") + 1), Base64.DEFAULT );
return BitmapFactory.decodeByteArray(decodedBytes, 0, decodedBytes.length);
}
public static String convert(Bitmap bitmap)
{
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
bitmap.compress(Bitmap.CompressFormat.PNG, 100, outputStream);
return Base64.encodeToString(outputStream.toByteArray(), Base64.DEFAULT);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;
import android.Manifest;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.net.Uri;
import android.os.Bundle;
import android.provider.MediaStore;
import android.view.View;
import android.webkit.MimeTypeMap;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.Toast;
import com.app.smartphotoeditor.R;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.Callback;
public class Backgroundupload extends AppCompatActivity {
private Button btncutprocess, btnselectimage;
ImageView image;
private static final String UPLOAD_IMAGE_URL="http://727a-112-134-169-152.ngrok.io/uploadbg";
String f_path, filePath, fileExtn;
String f_extension;
ProgressDialog progress;
private static final int REQUEST_EXTERNAL_STORAGE = 1;
private static String[] PERMISSIONS_STORAGE = {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE
};
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_backgroundupload);
btncutprocess =(Button) findViewById(R.id.btncutprocess);
image = findViewById(R.id.imgbg);
btnselectimage =(Button) findViewById(R.id.selectbgimage);
btnselectimage.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
verifyStoragePermissions(Backgroundupload.this);
displayFileChoose();
}
});
btncutprocess.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
if(filePath == null)
{
Toast.makeText(Backgroundupload.this, "Select an image!", Toast.LENGTH_LONG).show();
}else
{
progress = new ProgressDialog(Backgroundupload.this);
progress.setTitle("Uploading");
progress.setMessage("Please wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
f_path = filePath;
f_extension = fileExtn;
try
{
if (fileExtn.equals("img") || fileExtn.equals("jpg") || fileExtn.equals("jpeg") || fileExtn.equals("gif") || fileExtn.equals("png"))
{
Toast.makeText(Backgroundupload.this, filePath, Toast.LENGTH_LONG).show();
Thread t = new Thread(new Runnable() {
@Override
public void run() {
//uploading the file to server (same as before)
File file = new File(f_path);
String content_type = MimeTypeMap.getSingleton().getMimeTypeFromExtension(f_extension);
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(120, TimeUnit.SECONDS)
.readTimeout(120, TimeUnit.SECONDS)
.writeTimeout(120, TimeUnit.SECONDS)
.build();
RequestBody file_body = RequestBody.create(MediaType.parse(content_type), file);
RequestBody request_Body = new MultipartBody.Builder()
.setType(MultipartBody.FORM)
.addFormDataPart("type", content_type)
.addFormDataPart("image", f_path.substring(f_path.lastIndexOf("/") +1), file_body)
.build();
Request request = new Request.Builder()
.url(UPLOAD_IMAGE_URL)
.post(request_Body)
.build();
try
{
okHttpClient.newCall(request).execute();
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.cut();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, retrofit2.Response<ResponseBody> response)
{
try {
//String s = response.body().string();
//Toast.makeText(Backgroundupload.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openCutActivity();
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t)
{
progress.dismiss();
Toast.makeText(Backgroundupload.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}catch (Exception e)
{
e.printStackTrace();
Toast.makeText(Backgroundupload.this, "Error!!!", Toast.LENGTH_LONG).show();
progress.dismiss();
}
}
});
t.start();
}else
{
}
}catch (Exception e)
{
e.printStackTrace();
}
}
}
});
}
public void displayFileChoose()
{
Intent pickPhoto = new Intent(Intent.ACTION_PICK, MediaStore.Images.Media.EXTERNAL_CONTENT_URI);
pickPhoto.setType("image/*");
startActivityForResult(pickPhoto,1);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent ImageReturnedIntent)
{
super.onActivityResult(requestCode, resultCode, ImageReturnedIntent);
if(requestCode == 1)
{
Uri selectedImage = ImageReturnedIntent.getData();
filePath = getPath(selectedImage);
fileExtn = filePath.substring(filePath.lastIndexOf(".") +1);
image.setImageURI(selectedImage);
}
}
public String getPath(Uri uri)
{
String[] projection = {MediaStore.MediaColumns.DATA};
Cursor cursor = getContentResolver().query(uri, projection, null, null, null);
int column_index = cursor.getColumnIndexOrThrow(MediaStore.MediaColumns.DATA);
cursor.moveToFirst();
return cursor.getString(column_index);
}
public static void verifyStoragePermissions(Activity activity)
{
int permission = ActivityCompat.checkSelfPermission(activity, Manifest.permission.WRITE_EXTERNAL_STORAGE);
if(permission != PackageManager.PERMISSION_GRANTED)
{
ActivityCompat.requestPermissions(
activity,
PERMISSIONS_STORAGE,
REQUEST_EXTERNAL_STORAGE
);
}
}
public void openCutActivity()
{
Intent intent = new Intent(this, Cut.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Blur extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_blur);
btnBack = findViewById(R.id.back);
imgView = findViewById(R.id.imgView);
Picasso
.get()
.load("https://" + Constants.IMGLINK + ".ngrok.io/static/blur.jpg")
.placeholder(R.drawable.progress_bar_material) //replaced my animation with this
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300, 300)
.into(imgView);//
btnBack.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Bokeh extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_bokeh);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/bokeh.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
public class Constants {
// This should be replaced with the server link when server starts running
public static final String IMGLINK = "727a-112-134-169-152";
}
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Cut extends AppCompatActivity {
Button btnBack;
ImageView imageView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_cut);
btnBack =(Button) findViewById(R.id.back);
imageView =(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://" + Constants.IMGLINK + ".ngrok.io/static/cut.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300, 300)
.into(imageView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class MedBlur extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_med_blur);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/medblur.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import java.util.concurrent.TimeUnit;
import okhttp3.OkHttpClient;
import retrofit2.Retrofit;
import retrofit2.converter.gson.GsonConverterFactory;
public class RetrofitClient {
// private static final String baseurl="http://10.0.2.15:5000";
private static final String baseurl="http://727a-112-134-169-152.ngrok.io";
private static RetrofitClient mInstance;
private Retrofit retrofit;
private RetrofitClient()
{
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(320, TimeUnit.SECONDS)
.readTimeout(320, TimeUnit.SECONDS)
.writeTimeout(320, TimeUnit.SECONDS)
.build();
retrofit=new Retrofit.Builder()
.client(okHttpClient)
.baseUrl(baseurl)
.addConverterFactory(GsonConverterFactory.create())
.build();
}
public static synchronized RetrofitClient getInstance()
{
if(mInstance==null)
{
mInstance=new RetrofitClient();
}
return mInstance;
}
public Api getApi()
{
return retrofit.create(Api.class);
}
}
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.Toast;
import com.app.smartphotoeditor.R;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
import java.io.IOException;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.Callback;
import retrofit2.Response;
public class Selectedit extends AppCompatActivity {
private Button btnBlur, btnSplash, btnCut, btnNewImage, btnBokeh, btnVignette, btnMedBlur;
private ImageView imgView;
ProgressDialog progress;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_selectedit);
btnBlur = (Button) findViewById(R.id.blur);
btnSplash = (Button) findViewById(R.id.splash);
btnCut = (Button) findViewById(R.id.cut);
btnNewImage = (Button) findViewById(R.id.newimage);
imgView = (ImageView) findViewById(R.id.imgview);
btnBokeh = (Button) findViewById(R.id.bokeh);
btnMedBlur = (Button) findViewById(R.id.medblur);
btnVignette = (Button) findViewById(R.id.vignette);
Picasso
.get()
.load("https:///" + Constants.IMGLINK + ".ngrok.io/static/theimage.jpg")
.placeholder(R.drawable.progress_bar_material)
.resize(300, 300)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.into(imgView);
btnNewImage.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
openNewImage();
}
});
btnBlur.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
progress = new ProgressDialog(Selectedit.this);
progress.setTitle("Loading");
progress.setMessage("Please wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.blur();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(Selectedit.this, s, Toast.LENGTH_LONG).show();
openBlurActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(Selectedit.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
}
});
btnBokeh.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
progress = new ProgressDialog(Selectedit.this);
progress.setTitle("Loading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.bokeh();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
//String s = response.body().string();
//Toast.makeText(Selectedit.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openBokehActivity();
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(Selectedit.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
}
});
btnSplash.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
progress = new ProgressDialog(Selectedit.this);
progress.setTitle("Loading");
progress.setMessage("Please wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.splash();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(Selectedit.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openSplashActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(Selectedit.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
}
});
btnVignette.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
progress = new ProgressDialog(Selectedit.this);
progress.setTitle("Loading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.vignette();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(Selectedit.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openVignetteActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(Selectedit.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
}
});
btnCut.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
openCutActivity();
}
});
btnMedBlur.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
progress = new ProgressDialog(Selectedit.this);
progress.setTitle("Loading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.medblur();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(Selectedit.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openMedBlurActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(Selectedit.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
}
});
}
public void openNewImage()
{
Intent intent = new Intent(this, BackgroundCustomization.class);
startActivity(intent);
}
public void openBlurActivity()
{
Intent intent = new Intent(this, Blur.class);
startActivity(intent);
}
public void openSplashActivity()
{
Intent intent = new Intent(this, Splashedit.class);
startActivity(intent);
}
public void openCutActivity()
{
Intent intent = new Intent(this, Backgroundupload.class);
startActivity(intent);
}
public void openBokehActivity() {
Intent intent = new Intent(this, Bokeh.class);
startActivity(intent);
}
public void openVignetteActivity() {
Intent intent = new Intent(this, Vignette.class);
startActivity(intent);
}
public void openMedBlurActivity() {
Intent intent = new Intent(this, MedBlur.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Splashedit extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_splashedit);
btnBack = findViewById(R.id.back);
imgView = findViewById(R.id.imgView);
Picasso
.get()
.load("https://" + Constants.IMGLINK +".ngrok.io/static/splash.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300, 300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Vignette extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_vignette);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/vignette.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.activities;
import android.app.ProgressDialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Color;
import android.os.Bundle;
import android.util.Base64;
import android.util.Log;
import android.view.View;
import android.widget.ImageView;
import android.widget.Toast;
......@@ -14,6 +19,17 @@ import androidx.constraintlayout.widget.ConstraintLayout;
import androidx.recyclerview.widget.LinearLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
import com.app.smartphotoeditor.ImageProcessing.Backgroundupload;
import com.app.smartphotoeditor.ImageProcessing.RetrofitClient;
import com.app.smartphotoeditor.ImageProcessing.Selectedit;
import com.app.smartphotoeditor.ImageProcessing.Vignette;
import com.app.smartphotoeditor.config.ImageList;
import com.app.smartphotoeditor.ImageProcessing.BackgroundCustomization;
import com.app.smartphotoeditor.ImageProcessing.Blur;
import com.app.smartphotoeditor.ImageProcessing.Bokeh;
import com.app.smartphotoeditor.ImageProcessing.MedBlur;
import com.app.smartphotoeditor.ImageProcessing.Splashedit;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.adapters.ToolsAdapter;
import com.app.smartphotoeditor.config.ImageList;
......@@ -30,8 +46,19 @@ import com.bumptech.glide.Glide;
import org.opencv.core.Mat;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import okhttp3.FormBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.Callback;
import retrofit2.Response;
public class EditorActivity extends AppCompatActivity
......@@ -49,6 +76,10 @@ public class EditorActivity extends AppCompatActivity
private ArrayList<View> viewsInDisplay = new ArrayList<>();
private static final String UPLOAD_IMAGE_URL = "http://727a-112-134-169-152.ngrok.io/upload";
ProgressDialog progress;
@Override
protected void onActivityResult(int requestCode, int resultCode, @Nullable Intent data)
......@@ -82,7 +113,57 @@ public class EditorActivity extends AppCompatActivity
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_editor);
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Uploading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
try
{
Thread t = new Thread(new Runnable()
{
@Override
public void run()
{
String input = convert(ImageList.getInstance().getCurrentBitmap());
System.out.println(input);
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(320, TimeUnit.SECONDS)
.readTimeout(320, TimeUnit.SECONDS)
.writeTimeout(320, TimeUnit.SECONDS)
.build();
RequestBody request_body = new FormBody.Builder()
.add("image", input)
.build();
Request request = new Request.Builder()
.url(UPLOAD_IMAGE_URL)
.post(request_body)
.build();
try
{
okHttpClient.newCall(request).execute();
progress.dismiss();
}catch (Exception e)
{
e.printStackTrace();
progress.dismiss();
}
}
});
t.start();
}catch (Exception e)
{
e.printStackTrace();
}
tools_rv = findViewById(R.id.tools_rv);
......@@ -104,7 +185,224 @@ public class EditorActivity extends AppCompatActivity
intent = new Intent(getApplicationContext(), RestorationActivity.class);
else if(clickedPos == 5)
intent = new Intent(getApplicationContext(), LowLightEnhanceActivity.class);
else if(clickedPos == 6) {
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Loading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.medblur();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openMedBlurActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
// intent = new Intent(getApplicationContext(), MedBlur.class);
}
else if(clickedPos == 7) {
openCutActivity();
//intent = new Intent(getApplicationContext(), Backgroundupload.class);
}
else if(clickedPos == 8){
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Loading");
progress.setMessage("Please wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.blur();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
openBlurActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
//intent = new Intent(getApplicationContext(), Blur.class);
}
else if(clickedPos == 9) {
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Loading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.bokeh();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openBokehActivity();
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
//intent = new Intent(getApplicationContext(), Bokeh.class);
}
else if(clickedPos == 10) {
// progress = new ProgressDialog(EditorActivity.this);
// progress.setTitle("Loading");
// progress.setMessage("Please Wait...");
// progress.show();
// progress.setCancelable(false);
// progress.setCanceledOnTouchOutside(false);
//
// Thread t = new Thread(new Runnable() {
// @Override
// public void run() {
//
// Call<ResponseBody> call = RetrofitClient
// .getInstance()
// .getApi()
// .vignette();
//
// call.enqueue(new Callback<ResponseBody>() {
// @Override
// public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
// try {
// String s = response.body().string();
// Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
// progress.dismiss();
// openVignetteActivity();
// } catch (IOException e) {
// e.printStackTrace();
// }
// }
//
// @Override
// public void onFailure(Call<ResponseBody> call, Throwable t) {
// progress.dismiss();
// Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
// }
// });
// }
// });
// t.start();
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Loading");
progress.setMessage("Please wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.splash();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openSplashActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
//intent = new Intent(getApplicationContext(), Splashedit.class);
}
if(intent != null)
{
......@@ -384,4 +682,50 @@ public class EditorActivity extends AppCompatActivity
});
}
public static Bitmap convert(String base64Str) throws IllegalArgumentException
{
byte[] decodedBytes = Base64.decode( base64Str.substring(base64Str.indexOf(",") + 1), Base64.DEFAULT );
return BitmapFactory.decodeByteArray(decodedBytes, 0, decodedBytes.length);
}
public static String convert(Bitmap bitmap)
{
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
bitmap.compress(Bitmap.CompressFormat.PNG, 100, outputStream);
return Base64.encodeToString(outputStream.toByteArray(), Base64.DEFAULT);
}
public void openBlurActivity()
{
Intent intent = new Intent(this, Blur.class);
startActivity(intent);
}
public void openSplashActivity()
{
Intent intent = new Intent(this, Splashedit.class);
startActivity(intent);
}
public void openCutActivity()
{
Intent intent = new Intent(this, Backgroundupload.class);
startActivity(intent);
}
public void openBokehActivity() {
Intent intent = new Intent(this, Bokeh.class);
startActivity(intent);
}
public void openVignetteActivity() {
Intent intent = new Intent(this, Vignette.class);
startActivity(intent);
}
public void openMedBlurActivity()
{
Intent intent = new Intent(this, MedBlur.class);
startActivity(intent);
}
}
\ No newline at end of file
......@@ -279,4 +279,4 @@ public class EnvironmentChecker extends AppCompatActivity implements OnEyeStatus
{
tts.speak(text,TextToSpeech.QUEUE_FLUSH,null,null);
}
}
\ No newline at end of file
}
......@@ -688,7 +688,7 @@ public class Methods
Canvas canvas = new Canvas(bmOut);
// setup default color
canvas.drawColor(0, PorterDuff.Mode.CLEAR);
// create a blur paint for capturing alpha
// create a Blur paint for capturing alpha
Paint ptBlur = new Paint();
ptBlur.setMaskFilter(new BlurMaskFilter(15, BlurMaskFilter.Blur.NORMAL));
int[] offsetXY = new int[2];
......
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.BackgroundCustomization">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent"
>
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/selectimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="100dp"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Select From Gallery" />
<ImageView
android:id="@+id/img"
android:layout_width="match_parent"
android:layout_height="275dp"
android:layout_margin="50dp"
tools:srcCompat="@tools:sample/avatars" />
<Button
android:id="@+id/btnedit"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Edit" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Backgroundupload">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent"
>
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/selectbgimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="50dp"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Select Background" />
<ImageView
android:id="@+id/imgbg"
android:layout_width="match_parent"
android:layout_height="275dp"
android:layout_margin="50dp"
tools:srcCompat="@tools:sample/avatars" />
<Button
android:id="@+id/btncutprocess"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Edit" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Blur">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Bokeh">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Cut">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.MedBlur">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Selectedit">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent"
>
<LinearLayout
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical">
<Button
android:id="@+id/newimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:text="New Image" />
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="horizontal"
android:layout_marginRight="10dp"
android:layout_marginLeft="10dp">
<TableLayout
android:layout_width="match_parent"
android:layout_height="wrap_content">
<TableRow
android:id="@+id/tableRow1"
android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="1" >
<Button
android:id="@+id/blur"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="Blur"/>
<Button
android:id="@+id/cut"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="Cut"/>
</TableRow>
<TableRow
android:id="@+id/tableRow2"
android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="1" >
<Button
android:id="@+id/splash"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_gravity="center"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:text="Splash" />
<Button
android:id="@+id/bokeh"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_weight="1"
android:layout_gravity="center"
android:text="Bokeh" />
</TableRow>
<TableRow
android:id="@+id/tableRow3"
android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="1" >
<Button
android:id="@+id/medblur"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="Medium Blur"
/>
<Button
android:id="@+id/vignette"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="vignette"/>
</TableRow>
</TableLayout>
</LinearLayout>
<ImageView
android:id="@+id/imgview"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="20dp"
android:adjustViewBounds="true"
app:srcCompat="@drawable/ic_launcher_background" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Splashedit">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Vignette">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
#Mon Apr 12 21:09:05 IST 2021
#Mon Sep 06 00:25:46 IST 2021
distributionBase=GRADLE_USER_HOME
distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-6.5-all.zip
zipStoreBase=GRADLE_USER_HOME
......@@ -8,8 +8,8 @@ due to bugs, placing projects in directories containing spaces in the
path, or characters like ", ' and &, have had issues. We're working to
eliminate these bugs, but to save yourself headaches you may want to
move your project to a location where this is not a problem.
D:\Other Account Android Projecst\2021-129
- - -
D:\research -app\2021-129
-
Ignored Files:
--------------
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment