Commit 2aa2c26c authored by Samesh Buddhika Alahakoon's avatar Samesh Buddhika Alahakoon

Merge branch 'dinushaCombined' into 'master'

Dinusha combined

See merge request !43
parents 463c836a 6598ae90
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyInterpreterInspection" enabled="false" level="WARNING" enabled_by_default="false" />
</profile>
</component>
\ No newline at end of file
import cv2
import os
import mrcnn.model as modellib
from mrcnn.config import Config
import numpy as np
def Blur():
input = 'static/theimage.jpg'
image = cv2.imread(input)
intent = 15
dimensions = image.shape
# blurredImage = gaussianBlur(image, intent)
blurredImage = cv2.GaussianBlur(image, (intent, intent), 0)
def convolution(inputimage, kernel):
kernelHeight = kernel.shape[0]
kernelWidth = kernel.shape[1]
if(len(inputimage.shape) == 3):
imagePadding = np.pad(inputimage, paddingWidth=((kernelHeight // 2, kernelHeight // 2),
(kernelWidth // 2, kernelWidth // 2),
(0, 0)),
mode='constant', valueConstant=0).astype(np.float32)
elif(len(inputimage.shape) == 2):
imagePadding = np.pad(inputimage, paddingWidth=((kernelHeight // 2, kernelHeight // 2),
(kernelWidth // 2, kernelWidth // 2)),
mode='constant', valueConstant=0).astype(np.float32)
height = kernelHeight // 2
width = kernelWidth // 2
imageConvolution = np.zeros(imagePadding.shape)
for i in range(height, imagePadding.shape[0]-height):
for j in range(width, imagePadding.shape[1]-width):
x = imagePadding[i-height:i-height+kernelHeight, j-width:j-width+kernelWidth]
x = x.flatten()*kernel.flatten()
imageConvolution[i][j] = x.sum()
heightmax = -height
widthmax = -width
if(height == 0):
return imageConvolution[height:, width:widthmax]
if(width == 0):
return imageConvolution[height:heightmax, width:]
return imageConvolution[height:heightmax, width:widthmax]
def gaussianBlur(image, sigma):
image = np.asarray(image)
filter = 2 * int(4 * sigma + 0.5) + 1
gaussianFilter = np.zeros((filter, filter), np.float32)
f1 = filter//2
f2 = filter//2
for x in range(-f1, f1+1):
for y in range(-f2, f2+1):
x1 = 2 * np.pi * (sigma ** 2)
x2 = np.exp(-(x ** 2 + y ** 2)/(2 * sigma ** 2))
gaussianFilter[x + f1, y + f2] = (1/x1) * x2
filterImage = np.zeros_like(image, dtype=np.float32)
for c in range(3):
filterImage[:, :, c] = convolution(image[:, :, c], gaussianFilter)
return (filterImage.astype(np.uint8))
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBlur(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
image[:, :, 0]
)
blurredImage[:, :, 1] = np.where(
mask == 0,
blurredImage[:, :, 1],
image[:, :, 1]
)
blurredImage[:, :, 2] = np.where(
mask == 0,
blurredImage[:, :, 2],
image[:, :, 2]
)
return blurredImage
def applyBlur(image, boxes, masks, class_ids, classes, scores, viewMask=True):
instances = boxes.shape[0]
if not instances:
print("No instances available!!!")
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if viewMask:
maskedImage = applyBlur(maskedImage, mask)
return maskedImage
detectionResults = model.detect([image], verbose=0)
r = detectionResults[0]
output = applyBlur(image, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
# output = cv2.resize(np.float32(output), (466, 700))
cv2.imwrite('static/blur.jpg', output)
return "blurdone"
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
def cutImage():
# Input the original image name
# original_image = 'samples/ex7.jpg'
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
height, width, channels = image.shape
print("Read original image successfully! The original image shape is:")
print(image.shape)
dimensions = (width, height)
# blur = cv2.GaussianBlur(image ,(intent,intent),0)
bg = cv2.imread('static/bg.jpg')
bg = cv2.resize(bg, dimensions, interpolation = cv2.INTER_AREA)
# blur1 = cv2.GaussianBlur(image ,(intent,intent),0)
# cv2.imwrite('original_image.jpg', bg)
# Load the pre-trained model data
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def apply_mask(image, mask):
bg[:, :, 0] = np.where(
mask == 0,
bg[:, :, 0],
image[:, :, 0]
)
bg[:, :, 1] = np.where(
mask == 0,
bg[:, :, 1],
image[:, :, 1]
)
bg[:, :, 2] = np.where(
mask == 0,
bg[:, :, 2],
image[:, :, 2]
)
return bg
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
# max_area will save the largest object for all the detection results
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2"""
results = model.detect([image], verbose=0)
r = results[0]
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
## save image
cv2.imwrite("static/cut.jpg", frame1)
return "done"
import cv2
import numpy as np
import os
import sys
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
import PIL
from PIL import Image
from matplotlib.pyplot import imread
import scipy as sc
from scipy import ndimage
from skimage import filters
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import math
from skimage.color import rgb2gray
rootLocation = os.path.join("D:/research -app/2021-129/Backend/")
modelLocation = os.path.join(rootLocation, "logs/")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode = "inference", model_dir = modelLocation, config = config)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def medBlur():
def apply_mask(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
image[:, :, 0]
)
blurredImage[:, :, 1] = np.where(
mask == 0,
blurredImage[:, :, 1],
image[:, :, 1]
)
blurredImage[:, :, 2] = np.where(
mask == 0,
blurredImage[:, :, 2],
image[:, :, 2]
)
return blurredImage
def applyBlur(image, boxes, masks, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
intent= 15
blurredImage = cv2.medianBlur(image, intent)
results = model.detect([image], verbose=0)
r = results[0]
output = applyBlur(image, r['rois'], r['masks'])
#output = cv2.resize(output, (466, 700))
cv2.imwrite('static/medblur.jpg', output)
return "blurdone"
\ No newline at end of file
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def Splash():
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
weights_path = WEIGHTS_PATH
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top'
]
def apply_mask(image, mask):
image[:, :, 0] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 0]
)
image[:, :, 1] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 1]
)
image[:, :, 2] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 2]
)
return image
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
image = apply_mask(image, mask)
return image"""
results = model.detect([image], verbose=0)
r = results[0]
frame = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame = cv2.resize(np.float32(frame), (466, 700))
cv2.imwrite('static/splash.jpg', frame)
return "splashdone"
import numpy as np
import cv2
def vignette():
input_image = 'static/theimage.jpg'
input_image = cv2.imread(input_image)
rows, cols = input_image.shape[:2]
X_resultant_kernel = cv2.getGaussianKernel(cols,200)
Y_resultant_kernel = cv2.getGaussianKernel(rows,200)
resultant_kernel = Y_resultant_kernel * X_resultant_kernel.T
mask = 255 * resultant_kernel / np.linalg.norm(resultant_kernel)
output = np.copy(input_image)
for i in range(3):
output[:, :, i] = output[:,:,i] * mask
cv2.imwrite('static/vignette.jpg', output)
return "vignettedone"
\ No newline at end of file
from flask import Flask, request
from flask_cors import CORS, cross_origin
from Cut import cutImage
from Splash import Splash
from Blur import Blur
from maskImage import mask
from bokeh import Bokeh
from MedBlur import medBlur
from Vignette import vignette
import os
import tensorflow as tf
from flask_ngrok import run_with_ngrok
import base64
app = Flask(__name__)
run_with_ngrok(app)
cors = CORS(app)
app.config["CACHE_TYPE"] = "null"
app.config['CORS_HEADERS'] = '*'
app.config["IMAGE_UPLOADS"] = "./"
graph = tf.get_default_graph()
def convert_and_save(b64_string):
with open("static/theimage.jpg", "wb") as fh:
fh.write(base64.decodebytes(b64_string.encode()))
@app.route("/")
def hello():
return "Checking Connectivity..."
@app.route('/upload', methods = ['POST'])
@cross_origin()
def upload_file():
if request.method == 'POST':
image = request.form.get('image')
convert_and_save(image)
return "done"
# @app.route('/upload', methods = ['POST'])
# @cross_origin()
# def upload_file():
# if request.method == 'POST':
# if request.files:
# image = request.files["image"]
# image.save(os.path.join(app.config["IMAGE_UPLOADS"], "static/theimage.jpg"))
# global graph
# with graph.as_default():
# mask()
# return "done"
@app.route('/uploadbg', methods = ['POST'])
@cross_origin()
def upload_bg():
if request.method == 'POST':
if request.files:
image = request.files["image"]
image.save(os.path.join(app.config["IMAGE_UPLOADS"], "static/bg.jpg"))
return "done"
@app.route('/cut', methods = ['GET'])
@cross_origin()
def cutGet():
global graph
with graph.as_default():
cutImage()
return "done"
@app.route('/splash', methods = ['GET'])
@cross_origin()
def splashGet():
global graph
with graph.as_default():
Splash()
return "done"
@app.route('/blur', methods = ['GET'])
@cross_origin()
def blurGet():
global graph
with graph.as_default():
Blur()
return "done"
@app.route('/bokeh', methods = ['GET'])
@cross_origin()
def bokehGet():
global graph
with graph.as_default():
Bokeh()
return "done"
@app.route('/vignette', methods = ['GET'])
@cross_origin()
def vignetteGet():
global graph
with graph.as_default():
vignette()
return "done"
@app.route('/medblur', methods = ['GET'])
@cross_origin()
def medblurGet():
global graph
with graph.as_default():
medBlur()
return "done"
if __name__ == '__main__':
app.run()
\ No newline at end of file
import cv2
import os
import mrcnn.model as modellib
from mrcnn.config import Config
import matplotlib.pyplot as plt
import numpy as np
def Bokeh():
input = 'static/theimage.jpg'
image = cv2.imread(input)
plt.rcParams["figure.figsize"]= (10,10)
np.set_printoptions(precision=3)
Array = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],], dtype='float')
binaryMask = Array
gaussianKernel = cv2.getGaussianKernel(11, 5.)
convoKernel = gaussianKernel*gaussianKernel.transpose()*binaryMask
kernel = convoKernel / np.sum(convoKernel)
print(kernel)
def bokeh(image):
r,g,b = cv2.split(image)
r = r / 255.
r = np.where(r > 0.9, r * 2, r)
filteredR = cv2.filter2D(r, -1, kernel)
filteredR = np.where(filteredR > 1., 1., filteredR)
g = g / 255.
g = np.where(g > 0.9, g * 2, g)
filteredG = cv2.filter2D(g, -1, kernel)
filteredG = np.where(filteredG > 1., 1., filteredG)
b = b / 255.
b = np.where(b > 0.9, b * 2, b)
filteredB = cv2.filter2D(b, -1, kernel)
filteredB = np.where(filteredB > 1., 1., filteredB)
result = cv2.merge((filteredR, filteredG, filteredB))
return result
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imageRGB = imageRGB / 255.
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBokeh(imageInput, boxes, masks, class_ids, classes, scores, show_mask=True):
instances = boxes.shape[0]
if not instances:
print("No instances available!!!")
for i in range(instances):
mask = masks[:, :, i]
invertImage = np.abs(1. - mask)
r,g,b = cv2.split(imageInput)
maskedR = r * mask
maskedG = g * mask
maskedB = b * mask
mergedImage = cv2.merge((maskedR, maskedG, maskedB))
invertR = r * invertImage
invertG = g * invertImage
invertB = b * invertImage
backgroundMerged = cv2.merge((invertR, invertG, invertB))
mergedImage = np.asarray(mergedImage * 255., dtype='uint8')
backgroundMerged = np.asarray(backgroundMerged * 255., dtype='uint8')
backgroundBokeh = bokeh(backgroundMerged)
backgroundBokeh = np.asarray(backgroundBokeh * 255., dtype='uint8')
maskedImage = cv2.add(mergedImage, backgroundBokeh)
maskedImage = cv2.cvtColor(maskedImage, cv2.COLOR_BGR2RGB)
return maskedImage
detectionResults = model.detect([image], verbose=0)
r = detectionResults[0]
output = applyBokeh(imageRGB, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
cv2.imwrite('static/bokeh.jpg', output)
return "bokehdone"
\ No newline at end of file
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
def mask():
path_to_new_image = 'static/theimage.jpg'
#image1 = mpimg.imread(path_to_new_image)
image1 = cv2.imread(path_to_new_image)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.5
IMAGES_PER_GPU = 1
config = InferenceConfig()
#config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
weights_path = WEIGHTS_PATH
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
visualize.display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
return True
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
import colorsys
from skimage.measure import find_contours
from matplotlib.patches import Polygon
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
#import custom
# Root directory of the project
ROOT_DIR = ""
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = "mask_rcnn_object_0010.h5"
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
config = InferenceConfig()
config.display()
#LOAD MODEL. Create model in inference mode
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load COCO weights Or, load the last model you trained
weights_path = WEIGHTS_PATH
# Load weights
# print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait','portrait_body', 'selfie_top']
# path = sys.argv[1]
#path_to_new_image = 'D:/research/maskRcnn/images/portrait demo 1.jpg'
path_to_new_image = path.split("/")[-1]
image1 = mpimg.imread(path_to_new_image)
plt.imshow(image1)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
# print(len([image1]))
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
\ No newline at end of file
"""
Mask R-CNN
Base Configurations class.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import numpy as np
# Base Configuration Class
# Don't use this class directly. Instead, sub-class it and override
# the configurations you need to change.
class Config(object):
"""Base configuration class. For custom configurations, create a
sub-class that inherits from this one and override properties
that need to be changed.
"""
# Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 2
# Number of training steps per epoch
# This doesn't need to match the size of the training set. Tensorboard
# updates are saved at the end of each epoch, so setting this to a
# smaller number means getting more frequent TensorBoard updates.
# Validation stats are also calculated at each epoch end and they
# might take a while, so don't set this too small to avoid spending
# a lot of time on validation stats.
STEPS_PER_EPOCH = 1000
# Number of validation steps to run at the end of every training epoch.
# A bigger number improves accuracy of validation stats, but slows
# down the training.
VALIDATION_STEPS = 50
# Backbone network architecture
# Supported values are: resnet50, resnet101.
# You can also provide a callable that should have the signature
# of model.resnet_graph. If you do so, you need to supply a callable
# to COMPUTE_BACKBONE_SHAPE as well
BACKBONE = "resnet101"
# Only useful if you supply a callable to BACKBONE. Should compute
# the shape of each layer of the FPN Pyramid.
# See model.compute_backbone_shapes
COMPUTE_BACKBONE_SHAPE = None
# The strides of each layer of the FPN Pyramid. These values
# are based on a Resnet101 backbone.
BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# Size of the fully-connected layers in the classification graph
FPN_CLASSIF_FC_LAYERS_SIZE = 1024
# Size of the top-down layers used to build the feature pyramid
TOP_DOWN_PYRAMID_SIZE = 256
# Number of classification classes (including background)
NUM_CLASSES = 1 # Override in sub-classes
# Length of square anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
RPN_ANCHOR_RATIOS = [0.5, 1, 2]
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# Non-max suppression threshold to filter RPN proposals.
# You can increase this during training to generate more propsals.
RPN_NMS_THRESHOLD = 0.7
# How many anchors per image to use for RPN training
RPN_TRAIN_ANCHORS_PER_IMAGE = 256
# ROIs kept after tf.nn.top_k and before non-maximum suppression
PRE_NMS_LIMIT = 6000
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_TRAINING = 2000
POST_NMS_ROIS_INFERENCE = 1000
# If enabled, resizes instance masks to a smaller size to reduce
# memory load. Recommended when using high-resolution images.
USE_MINI_MASK = True
MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
# Input image resizing
# Generally, use the "square" resizing mode for training and predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none: No resizing or padding. Return the image unchanged.
# square: Resize and pad with zeros to get a square image
# of size [max_dim, max_dim].
# pad64: Pads width and height with zeros to make them multiples of 64.
# If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales
# up before padding. IMAGE_MAX_DIM is ignored in this mode.
# The multiple of 64 is needed to ensure smooth scaling of feature
# maps up and down the 6 levels of the FPN pyramid (2**6=64).
# crop: Picks random crops from the image. First, scales the image based
# on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of
# size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.
# IMAGE_MAX_DIM is not used in this mode.
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further
# up scaling. For example, if set to 2 then images are scaled up to double
# the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.
# However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.
IMAGE_MIN_SCALE = 0
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Image mean (RGB)
MEAN_PIXEL = np.array([123.7, 116.8, 103.9])
# Number of ROIs per image to feed to classifier/mask heads
# The Mask RCNN paper uses 512 but often the RPN doesn't generate
# enough positive proposals to fill this and keep a positive:negative
# ratio of 1:3. You can increase the number of proposals by adjusting
# the RPN NMS threshold.
TRAIN_ROIS_PER_IMAGE = 200
# Percent of positive ROIs used to train classifier/mask heads
ROI_POSITIVE_RATIO = 0.33
# Pooled ROIs
POOL_SIZE = 7
MASK_POOL_SIZE = 14
# Shape of output mask
# To change this you also need to change the neural network mask branch
MASK_SHAPE = [28, 28]
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Bounding box refinement standard deviation for RPN and final detections.
RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
# Non-maximum suppression threshold for detection
DETECTION_NMS_THRESHOLD = 0.3
# Learning rate and momentum
# The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes
# weights to explode. Likely due to differences in optimizer
# implementation.
LEARNING_RATE = 0.001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# Use RPN ROIs or externally generated ROIs for training
# Keep this True for most situations. Set to False if you want to train
# the head branches on ROI generated by code rather than the ROIs from
# the RPN. For example, to debug the classifier head without having to
# train the RPN.
USE_RPN_ROIS = True
# Train or freeze batch normalization layers
# None: Train BN layers. This is the normal mode
# False: Freeze BN layers. Good when using a small batch size
# True: (don't use). Set layer in training mode even when predicting
TRAIN_BN = False # Defaulting to False since batch size is often small
# Gradient norm clipping
GRADIENT_CLIP_NORM = 5.0
def __init__(self):
"""Set values of computed attributes."""
# Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
# Input image size
if self.IMAGE_RESIZE_MODE == "crop":
self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,
self.IMAGE_CHANNEL_COUNT])
else:
self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,
self.IMAGE_CHANNEL_COUNT])
# Image meta data length
# See compose_image_meta() for details
self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
This source diff could not be displayed because it is too large. You can view the blob instead.
"""
Mask R-CNN
Multi-GPU Support for Keras.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
Ideas and a small code snippets from these sources:
https://github.com/fchollet/keras/issues/2436
https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012
https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/
https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py
"""
import tensorflow as tf
import keras.backend as K
import keras.layers as KL
import keras.models as KM
class ParallelModel(KM.Model):
"""Subclasses the standard Keras Model and adds multi-GPU support.
It works by creating a copy of the model on each GPU. Then it slices
the inputs and sends a slice to each copy of the model, and then
merges the outputs together and applies the loss on the combined
outputs.
"""
def __init__(self, keras_model, gpu_count):
"""Class constructor.
keras_model: The Keras model to parallelize
gpu_count: Number of GPUs. Must be > 1
"""
self.inner_model = keras_model
self.gpu_count = gpu_count
merged_outputs = self.make_parallel()
super(ParallelModel, self).__init__(inputs=self.inner_model.inputs,
outputs=merged_outputs)
def __getattribute__(self, attrname):
"""Redirect loading and saving methods to the inner model. That's where
the weights are stored."""
if 'load' in attrname or 'save' in attrname:
return getattr(self.inner_model, attrname)
return super(ParallelModel, self).__getattribute__(attrname)
def summary(self, *args, **kwargs):
"""Override summary() to display summaries of both, the wrapper
and inner models."""
super(ParallelModel, self).summary(*args, **kwargs)
self.inner_model.summary(*args, **kwargs)
def make_parallel(self):
"""Creates a new wrapper model that consists of multiple replicas of
the original model placed on different GPUs.
"""
# Slice inputs. Slice inputs on the CPU to avoid sending a copy
# of the full inputs to all GPUs. Saves on bandwidth and memory.
input_slices = {name: tf.split(x, self.gpu_count)
for name, x in zip(self.inner_model.input_names,
self.inner_model.inputs)}
output_names = self.inner_model.output_names
outputs_all = []
for i in range(len(self.inner_model.outputs)):
outputs_all.append([])
# Run the model call() on each GPU to place the ops there
for i in range(self.gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
# Run a slice of inputs through this replica
zipped_inputs = zip(self.inner_model.input_names,
self.inner_model.inputs)
inputs = [
KL.Lambda(lambda s: input_slices[name][i],
output_shape=lambda s: (None,) + s[1:])(tensor)
for name, tensor in zipped_inputs]
# Create the model replica and get the outputs
outputs = self.inner_model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save the outputs for merging back together later
for l, o in enumerate(outputs):
outputs_all[l].append(o)
# Merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs, name in zip(outputs_all, output_names):
# Concatenate or average outputs?
# Outputs usually have a batch dimension and we concatenate
# across it. If they don't, then the output is likely a loss
# or a metric value that gets averaged across the batch.
# Keras expects losses and metrics to be scalars.
if K.int_shape(outputs[0]) == ():
# Average
m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs)
else:
# Concatenate
m = KL.Concatenate(axis=0, name=name)(outputs)
merged.append(m)
return merged
if __name__ == "__main__":
# Testing code below. It creates a simple model to train on MNIST and
# tries to run it on 2 GPUs. It saves the graph so it can be viewed
# in TensorBoard. Run it as:
#
# python3 parallel_model.py
import os
import numpy as np
import keras.optimizers
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
GPU_COUNT = 2
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
def build_model(x_train, num_classes):
# Reset default graph. Keras leaves old ops in the graph,
# which are ignored for execution but clutter graph
# visualization in TensorBoard.
tf.reset_default_graph()
inputs = KL.Input(shape=x_train.shape[1:], name="input_image")
x = KL.Conv2D(32, (3, 3), activation='relu', padding="same",
name="conv1")(inputs)
x = KL.Conv2D(64, (3, 3), activation='relu', padding="same",
name="conv2")(x)
x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x)
x = KL.Flatten(name="flat1")(x)
x = KL.Dense(128, activation='relu', name="dense1")(x)
x = KL.Dense(num_classes, activation='softmax', name="dense2")(x)
return KM.Model(inputs, x, "digit_classifier_model")
# Load MNIST Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype('float32') / 255
x_test = np.expand_dims(x_test, -1).astype('float32') / 255
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# Build data generator and model
datagen = ImageDataGenerator()
model = build_model(x_train, 10)
# Add multi-GPU support.
model = ParallelModel(model, GPU_COUNT)
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
model.summary()
# Train
model.fit_generator(
datagen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=50, epochs=10, verbose=1,
validation_data=(x_test, y_test),
callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR,
write_graph=True)]
)
This diff is collapsed.
This diff is collapsed.
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
MODEL_DIR = os.path.join("logs")
COCO_MODEL_PATH = 'D:/chrome/Y4S2/research/New folder (3)/Dinusha-IT18118346/Backend/mask_rcnn_object_0010.h5'
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
# Change the config infermation
class InferenceConfig(config.__class__):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
"""def apply_mask2(image, mask):
blur1[:, :, 0] = np.where(
mask == 0,
image[:, :, 0],
blur1[:, :,0]
)
blur1[:, :, 1] = np.where(
mask == 0,
image[:, :, 1],
blur1[:, :,1]
)
blur1[:, :, 2] = np.where(
mask == 0,
image[:, :, 2],
blur1[:, :,2]
)
return blur1 """
def Blur():
# Input the original image name
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
intent= 15
dimensions = image.shape
blur = cv2.GaussianBlur(image ,(intent,intent),0)
def apply_mask(image, mask):
blur[:, :, 0] = np.where(
mask == 0,
blur[:, :, 0],
image[:, :, 0]
)
blur[:, :, 1] = np.where(
mask == 0,
blur[:, :, 1],
image[:, :, 1]
)
blur[:, :, 2] = np.where(
mask == 0,
blur[:, :, 2],
image[:, :, 2]
)
return blur
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2
## run fast rcnn model
results = model.detect([image], verbose=0)
r = results[0]
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame1 = cv2.resize(frame1, (466, 700))
cv2.imwrite('static/blur.jpg', frame1)
return "blurdone"
......@@ -103,4 +103,12 @@ dependencies {
testImplementation 'junit:junit:4.+'
androidTestImplementation 'androidx.test.ext:junit:1.1.2'
androidTestImplementation 'androidx.test.espresso:espresso-core:3.3.0'
// retrofit
implementation 'com.squareup.retrofit2:retrofit:2.1.0'
implementation 'com.squareup.retrofit2:converter-gson:2.1.0'
implementation("com.squareup.okhttp3:okhttp:4.9.2")
implementation 'com.squareup.picasso:picasso:2.71828'
}
\ No newline at end of file
......@@ -61,6 +61,17 @@
android:label="Save"
android:screenOrientation="portrait" />
<activity android:name=".ImageProcessing.Backgroundupload"/>
<activity android:name=".ImageProcessing.Splashedit" />
<activity android:name=".ImageProcessing.Cut" />
<activity android:name=".ImageProcessing.Blur" />
<activity android:name=".ImageProcessing.Selectedit" />
<activity android:name=".ImageProcessing.MedBlur"/>
<activity android:name=".ImageProcessing.BackgroundCustomization"/>
<activity android:name=".ImageProcessing.Bokeh"/>
<activity android:name=".ImageProcessing.Vignette"/>
<meta-data
android:name="com.google.mlkit.vision.DEPENDENCIES"
android:value="face" />
......@@ -74,4 +85,4 @@
</receiver>
</application>
</manifest>
\ No newline at end of file
</manifest>
package com.app.smartphotoeditor.ImageProcessing;
import okhttp3.MultipartBody;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.http.GET;
import retrofit2.http.Multipart;
import retrofit2.http.POST;
import retrofit2.http.Part;
public interface Api {
String baseurl="http://10.0.2.15:5000";
@Multipart
@POST("upload/")
Call<ResponseBody> uploadImage( @Part MultipartBody.Part file);
@Multipart
@POST("uploadbg/")
Call<ResponseBody> uploadBgImage(@Part MultipartBody.Part file);
@GET("/blur")
Call<ResponseBody> blur();
@GET("/splash")
Call<ResponseBody> splash();
@GET("/cut")
Call<ResponseBody> cut();
@GET("/bokeh")
Call<ResponseBody> bokeh();
@GET("/vignette")
Call<ResponseBody> vignette();
@GET("/medblur")
Call<ResponseBody> medblur();
}
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;
import android.Manifest;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.Bundle;
import android.provider.MediaStore;
import android.util.Base64;
import android.util.Log;
import android.view.View;
import android.webkit.MimeTypeMap;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.Toast;
import android.util.Log;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.config.ImageList;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.util.concurrent.TimeUnit;
import okhttp3.FormBody;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
public class BackgroundCustomization extends AppCompatActivity
{
private Button btnedit,btnselectimage;
//private static final String UPLOAD_IMAGE_URL = "http://10.0.2.2:5000/upload";
private static final String UPLOAD_IMAGE_URL = "http://2989-112-134-170-172.ngrok.io/upload";
ImageView image;
String f_path,filePath,fileExtn;
String f_extension;
ProgressDialog progress;
Bitmap inputimage;
private static final int REQUEST_EXTERNAL_STORAGE = 1;
private static String[] PERMISSIONS_STORAGE = {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE
};
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_background_customization);
btnedit = (Button) findViewById(R.id.btnedit);
image = findViewById(R.id.img);
//displayFileChoose();
btnedit.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v) {
// if(filePath==null)
// {
//Toast.makeText(BackgroundCustomization.this,"Select An Image", Toast.LENGTH_LONG).show();
//}else{
//****
progress = new ProgressDialog(BackgroundCustomization.this);
progress.setTitle("Uploading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
f_path = filePath;
f_extension = fileExtn;
try
{
//if(fileExtn.equals("img") || fileExtn.equals("jpg") || fileExtn.equals("jpeg") || fileExtn.equals("gif") || fileExtn.equals("png")) {
//Toast.makeText(BackgroundCustomization.this, filePath, Toast.LENGTH_SHORT).show();
Thread t = new Thread(new Runnable()
{
@Override
public void run()
{
//To upload the image to server
String input = convert(ImageList.getInstance().getCurrentBitmap());
System.out.println(input);
//File file = new File(f_path);
//String content_type = MimeTypeMap.getSingleton().getMimeTypeFromExtension(f_extension);
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(320, TimeUnit.SECONDS)
.readTimeout(320, TimeUnit.SECONDS)
.writeTimeout(320, TimeUnit.SECONDS)
.build();
//RequestBody file_body = RequestBody.create(MediaType.parse(content_type), file);
RequestBody request_body = new FormBody.Builder()
.add("image", input)
//.addFormDataPart("image", f_path.substring(f_path.lastIndexOf("/") +1), file_body)
.build();
Request request = new Request.Builder()
.url(UPLOAD_IMAGE_URL)
.post(request_body)
.build();
try
{
//Request executed
okHttpClient.newCall(request).execute();
progress.dismiss();
openEditActivity();
}catch (Exception e)
{
e.printStackTrace();
progress.dismiss();
}
}
});
t.start();
//}else
//{
//}
}catch (Exception e)
{
e.printStackTrace();
}//******
//}
}
});
btnselectimage=(Button) findViewById(R.id.selectimage);
btnselectimage.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
verifyStoragePermissions(BackgroundCustomization.this);
displayFileChoose();
}
});
}
public void displayFileChoose()
{
Intent pickPhoto = new Intent(Intent.ACTION_PICK, MediaStore.Images.Media.EXTERNAL_CONTENT_URI);
pickPhoto.setType("image/*");
startActivityForResult(pickPhoto,1);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent ImageReturnedIntent)
{
super.onActivityResult(requestCode, resultCode, ImageReturnedIntent);
if(requestCode==1){
Uri selectedImage = ImageReturnedIntent.getData();
filePath = getPath(selectedImage);
fileExtn = filePath.substring(filePath.lastIndexOf(".")+1);
image.setImageURI(selectedImage);
}
}
public String getPath(Uri uri)
{
String[] projection = {MediaStore.MediaColumns.DATA};
//store query result in cursor variable
Cursor cursor = getContentResolver().query(uri,projection,null,null,null);
int column_index = cursor.getColumnIndexOrThrow(MediaStore.MediaColumns.DATA);
cursor.moveToFirst();
String imagePath = cursor.getString(column_index);
//Test Output
Log.d("Image Path : " , imagePath);
//return string
return cursor.getString(column_index);
}
public static void verifyStoragePermissions(Activity activity)
{
int permission = ActivityCompat.checkSelfPermission(activity, Manifest.permission.WRITE_EXTERNAL_STORAGE);
if (permission != PackageManager.PERMISSION_GRANTED)
{
ActivityCompat.requestPermissions(
activity,
PERMISSIONS_STORAGE,
REQUEST_EXTERNAL_STORAGE
);
}
}
public void openEditActivity()
{
Intent intent = new Intent(this, Selectedit.class);
startActivity(intent);
}
public static Bitmap convert(String base64Str) throws IllegalArgumentException
{
byte[] decodedBytes = Base64.decode( base64Str.substring(base64Str.indexOf(",") + 1), Base64.DEFAULT );
return BitmapFactory.decodeByteArray(decodedBytes, 0, decodedBytes.length);
}
public static String convert(Bitmap bitmap)
{
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
bitmap.compress(Bitmap.CompressFormat.PNG, 100, outputStream);
return Base64.encodeToString(outputStream.toByteArray(), Base64.DEFAULT);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityCompat;
import android.Manifest;
import android.app.Activity;
import android.app.ProgressDialog;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.net.Uri;
import android.os.Bundle;
import android.provider.MediaStore;
import android.view.View;
import android.webkit.MimeTypeMap;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.Toast;
import com.app.smartphotoeditor.R;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.Callback;
public class Backgroundupload extends AppCompatActivity {
private Button btncutprocess, btnselectimage;
ImageView image;
private static final String UPLOAD_IMAGE_URL="http://727a-112-134-169-152.ngrok.io/uploadbg";
String f_path, filePath, fileExtn;
String f_extension;
ProgressDialog progress;
private static final int REQUEST_EXTERNAL_STORAGE = 1;
private static String[] PERMISSIONS_STORAGE = {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE
};
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_backgroundupload);
btncutprocess =(Button) findViewById(R.id.btncutprocess);
image = findViewById(R.id.imgbg);
btnselectimage =(Button) findViewById(R.id.selectbgimage);
btnselectimage.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
verifyStoragePermissions(Backgroundupload.this);
displayFileChoose();
}
});
btncutprocess.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
if(filePath == null)
{
Toast.makeText(Backgroundupload.this, "Select an image!", Toast.LENGTH_LONG).show();
}else
{
progress = new ProgressDialog(Backgroundupload.this);
progress.setTitle("Uploading");
progress.setMessage("Please wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
f_path = filePath;
f_extension = fileExtn;
try
{
if (fileExtn.equals("img") || fileExtn.equals("jpg") || fileExtn.equals("jpeg") || fileExtn.equals("gif") || fileExtn.equals("png"))
{
Toast.makeText(Backgroundupload.this, filePath, Toast.LENGTH_LONG).show();
Thread t = new Thread(new Runnable() {
@Override
public void run() {
//uploading the file to server (same as before)
File file = new File(f_path);
String content_type = MimeTypeMap.getSingleton().getMimeTypeFromExtension(f_extension);
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(120, TimeUnit.SECONDS)
.readTimeout(120, TimeUnit.SECONDS)
.writeTimeout(120, TimeUnit.SECONDS)
.build();
RequestBody file_body = RequestBody.create(MediaType.parse(content_type), file);
RequestBody request_Body = new MultipartBody.Builder()
.setType(MultipartBody.FORM)
.addFormDataPart("type", content_type)
.addFormDataPart("image", f_path.substring(f_path.lastIndexOf("/") +1), file_body)
.build();
Request request = new Request.Builder()
.url(UPLOAD_IMAGE_URL)
.post(request_Body)
.build();
try
{
okHttpClient.newCall(request).execute();
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.cut();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, retrofit2.Response<ResponseBody> response)
{
try {
//String s = response.body().string();
//Toast.makeText(Backgroundupload.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openCutActivity();
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t)
{
progress.dismiss();
Toast.makeText(Backgroundupload.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}catch (Exception e)
{
e.printStackTrace();
Toast.makeText(Backgroundupload.this, "Error!!!", Toast.LENGTH_LONG).show();
progress.dismiss();
}
}
});
t.start();
}else
{
}
}catch (Exception e)
{
e.printStackTrace();
}
}
}
});
}
public void displayFileChoose()
{
Intent pickPhoto = new Intent(Intent.ACTION_PICK, MediaStore.Images.Media.EXTERNAL_CONTENT_URI);
pickPhoto.setType("image/*");
startActivityForResult(pickPhoto,1);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent ImageReturnedIntent)
{
super.onActivityResult(requestCode, resultCode, ImageReturnedIntent);
if(requestCode == 1)
{
Uri selectedImage = ImageReturnedIntent.getData();
filePath = getPath(selectedImage);
fileExtn = filePath.substring(filePath.lastIndexOf(".") +1);
image.setImageURI(selectedImage);
}
}
public String getPath(Uri uri)
{
String[] projection = {MediaStore.MediaColumns.DATA};
Cursor cursor = getContentResolver().query(uri, projection, null, null, null);
int column_index = cursor.getColumnIndexOrThrow(MediaStore.MediaColumns.DATA);
cursor.moveToFirst();
return cursor.getString(column_index);
}
public static void verifyStoragePermissions(Activity activity)
{
int permission = ActivityCompat.checkSelfPermission(activity, Manifest.permission.WRITE_EXTERNAL_STORAGE);
if(permission != PackageManager.PERMISSION_GRANTED)
{
ActivityCompat.requestPermissions(
activity,
PERMISSIONS_STORAGE,
REQUEST_EXTERNAL_STORAGE
);
}
}
public void openCutActivity()
{
Intent intent = new Intent(this, Cut.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Blur extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_blur);
btnBack = findViewById(R.id.back);
imgView = findViewById(R.id.imgView);
Picasso
.get()
.load("https://" + Constants.IMGLINK + ".ngrok.io/static/blur.jpg")
.placeholder(R.drawable.progress_bar_material) //replaced my animation with this
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300, 300)
.into(imgView);//
btnBack.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Bokeh extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_bokeh);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/bokeh.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
public class Constants {
// This should be replaced with the server link when server starts running
public static final String IMGLINK = "727a-112-134-169-152";
}
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Cut extends AppCompatActivity {
Button btnBack;
ImageView imageView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_cut);
btnBack =(Button) findViewById(R.id.back);
imageView =(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://" + Constants.IMGLINK + ".ngrok.io/static/cut.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300, 300)
.into(imageView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class MedBlur extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_med_blur);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/medblur.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import java.util.concurrent.TimeUnit;
import okhttp3.OkHttpClient;
import retrofit2.Retrofit;
import retrofit2.converter.gson.GsonConverterFactory;
public class RetrofitClient {
// private static final String baseurl="http://10.0.2.15:5000";
private static final String baseurl="http://727a-112-134-169-152.ngrok.io";
private static RetrofitClient mInstance;
private Retrofit retrofit;
private RetrofitClient()
{
OkHttpClient okHttpClient = new OkHttpClient().newBuilder()
.connectTimeout(320, TimeUnit.SECONDS)
.readTimeout(320, TimeUnit.SECONDS)
.writeTimeout(320, TimeUnit.SECONDS)
.build();
retrofit=new Retrofit.Builder()
.client(okHttpClient)
.baseUrl(baseurl)
.addConverterFactory(GsonConverterFactory.create())
.build();
}
public static synchronized RetrofitClient getInstance()
{
if(mInstance==null)
{
mInstance=new RetrofitClient();
}
return mInstance;
}
public Api getApi()
{
return retrofit.create(Api.class);
}
}
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Splashedit extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_splashedit);
btnBack = findViewById(R.id.back);
imgView = findViewById(R.id.imgView);
Picasso
.get()
.load("https://" + Constants.IMGLINK +".ngrok.io/static/splash.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300, 300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener()
{
@Override
public void onClick(View v)
{
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.app.smartphotoeditor.activities.EditorActivity;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Vignette extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_vignette);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/vignette.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openEditorAcitivity();
}
});
}
public void openEditorAcitivity() {
Intent intent = new Intent(this, EditorActivity.class);
startActivity(intent);
}
}
\ No newline at end of file
......@@ -279,4 +279,4 @@ public class EnvironmentChecker extends AppCompatActivity implements OnEyeStatus
{
tts.speak(text,TextToSpeech.QUEUE_FLUSH,null,null);
}
}
\ No newline at end of file
}
......@@ -688,7 +688,7 @@ public class Methods
Canvas canvas = new Canvas(bmOut);
// setup default color
canvas.drawColor(0, PorterDuff.Mode.CLEAR);
// create a blur paint for capturing alpha
// create a Blur paint for capturing alpha
Paint ptBlur = new Paint();
ptBlur.setMaskFilter(new BlurMaskFilter(15, BlurMaskFilter.Blur.NORMAL));
int[] offsetXY = new int[2];
......
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.BackgroundCustomization">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent"
>
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/selectimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="100dp"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Select From Gallery" />
<ImageView
android:id="@+id/img"
android:layout_width="match_parent"
android:layout_height="275dp"
android:layout_margin="50dp"
tools:srcCompat="@tools:sample/avatars" />
<Button
android:id="@+id/btnedit"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Edit" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Backgroundupload">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent"
>
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/selectbgimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="50dp"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Select Background" />
<ImageView
android:id="@+id/imgbg"
android:layout_width="match_parent"
android:layout_height="275dp"
android:layout_margin="50dp"
tools:srcCompat="@tools:sample/avatars" />
<Button
android:id="@+id/btncutprocess"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginLeft="60dp"
android:layout_marginRight="60dp"
android:text="Edit" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Blur">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Bokeh">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Cut">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.MedBlur">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
This diff is collapsed.
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Splashedit">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
This diff is collapsed.
#Mon Apr 12 21:09:05 IST 2021
#Mon Sep 06 00:25:46 IST 2021
distributionBase=GRADLE_USER_HOME
distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-6.5-all.zip
zipStoreBase=GRADLE_USER_HOME
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment