Commit 4e368534 authored by dilshan-98's avatar dilshan-98

edited

parent dcb6d652
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import PIL
from PIL import Image
def Blur():
input = 'static/theimage.jpg'
image = cv2.imread(input)
# Load the pre-trained model data
MODEL_DIR = os.path.join("logs")
COCO_MODEL_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
intent = 15
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
dimensions = image.shape
IMAGES_PER_GPU = 1
# blurredImage = gaussianBlur(image, intent)
blurredImage = cv2.GaussianBlur(image, (intent, intent), 0)
NUM_CLASSES = 1 + 3 # Background + labels
def convolution(inputimage, kernel):
kernelHeight = kernel.shape[0]
kernelWidth = kernel.shape[1]
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
if(len(inputimage.shape) == 3):
imagePadding = np.pad(inputimage, paddingWidth=((kernelHeight // 2, kernelHeight // 2),
(kernelWidth // 2, kernelWidth // 2),
(0, 0)),
mode='constant', valueConstant=0).astype(np.float32)
elif(len(inputimage.shape) == 2):
imagePadding = np.pad(inputimage, paddingWidth=((kernelHeight // 2, kernelHeight // 2),
(kernelWidth // 2, kernelWidth // 2)),
mode='constant', valueConstant=0).astype(np.float32)
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
height = kernelHeight // 2
width = kernelWidth // 2
config = CustomConfig()
imageConvolution = np.zeros(imagePadding.shape)
# Change the config infermation
class InferenceConfig(config.__class__):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
for i in range(height, imagePadding.shape[0]-height):
for j in range(width, imagePadding.shape[1]-width):
x = imagePadding[i-height:i-height+kernelHeight, j-width:j-width+kernelWidth]
x = x.flatten()*kernel.flatten()
imageConvolution[i][j] = x.sum()
heightmax = -height
widthmax = -width
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
if(height == 0):
return imageConvolution[height:, width:widthmax]
if(width == 0):
return imageConvolution[height:heightmax, width:]
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
return imageConvolution[height:heightmax, width:widthmax]
def Blur():
def gaussianBlur(image, sigma):
image = np.asarray(image)
filter = 2 * int(4 * sigma + 0.5) + 1
gaussianFilter = np.zeros((filter, filter), np.float32)
f1 = filter//2
f2 = filter//2
# Input the original image name
original_image = 'static/theimage.jpg'
for x in range(-f1, f1+1):
for y in range(-f2, f2+1):
x1 = 2 * np.pi * (sigma ** 2)
x2 = np.exp(-(x ** 2 + y ** 2)/(2 * sigma ** 2))
gaussianFilter[x + f1, y + f2] = (1/x1) * x2
# Use OpenCV to read the original image
image = cv2.imread(original_image)
intent= 15
filterImage = np.zeros_like(image, dtype=np.float32)
dimensions = image.shape
blur = cv2.GaussianBlur(image ,(intent,intent),0)
for c in range(3):
filterImage[:, :, c] = convolution(image[:, :, c], gaussianFilter)
return (filterImage.astype(np.uint8))
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
def apply_mask(image, mask):
blur[:, :, 0] = np.where(
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBlur(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blur[:, :,0],
blurredImage[:, :, 0],
image[:, :, 0]
)
blur[:, :, 1] = np.where(
blurredImage[:, :, 1] = np.where(
mask == 0,
blur[:, :,1],
blurredImage[:, :, 1],
image[:, :, 1]
)
blur[:, :, 2] = np.where(
blurredImage[:, :, 2] = np.where(
mask == 0,
blur[:, :,2],
blurredImage[:, :, 2],
image[:, :, 2]
)
return blur
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
return blurredImage
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
def applyBlur(image, boxes, masks, class_ids, classes, scores, viewMask=True):
instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
if not instances:
print("No instances available!!!")
for i in range(n_instances):
if not np.any(boxes[i]):
continue
maskedImage = image.astype(np.uint32).copy()
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
for i in range(instances):
mask = masks[:, :, i]
if viewMask:
maskedImage = applyBlur(maskedImage, mask)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
return maskedImage
# apply mask for the image
save1 = apply_mask(image, mask)
detectionResults = model.detect([image], verbose=0)
return save1
r = detectionResults[0]
output = applyBlur(image, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
# run mask rcnn model
results = model.detect([image], verbose=0)
r = results[0]
# applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame1 = cv2.resize(frame1, (466, 700))
cv2.imwrite('static/blur.jpg', frame1)
# output = cv2.resize(np.float32(output), (466, 700))
cv2.imwrite('static/blur.jpg', output)
return "blurdone"
......@@ -17,88 +17,84 @@ import PIL
from PIL import Image
def cutImage():
# Input the original image name
# original_image = 'samples/ex7.jpg'
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
image = cv2.resize(image, (466, 700))
height, width, channels = image.shape
print("Read original image successfully! The original image shape is:")
print(image.shape)
dimensions = (width, height)
# blur = cv2.GaussianBlur(image ,(intent,intent),0)
bg = cv2.imread('static/bg.jpg')
bg = cv2.resize(bg, (466, 700))
bg = cv2.resize(bg, dimensions, interpolation = cv2.INTER_AREA)
# blur1 = cv2.GaussianBlur(image ,(intent,intent),0)
# cv2.imwrite('original_image.jpg', bg)
# Load the pre-trained model data
ROOT_DIR = ""
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
# Change the config infermation
class InferenceConfig(config.__class__):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def apply_mask(image, mask):
bg[:, :, 0] = np.where(
mask == 0,
bg[:, :,0],
bg[:, :, 0],
image[:, :, 0]
)
bg[:, :, 1] = np.where(
mask == 0,
bg[:, :,1],
bg[:, :, 1],
image[:, :, 1]
)
bg[:, :, 2] = np.where(
mask == 0,
bg[:, :,2],
bg[:, :, 2],
image[:, :, 2]
)
return bg
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
# max_area will save the largest object for all the detection results
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
max_area = 0
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
......@@ -131,18 +127,20 @@ def cutImage():
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1
return save1 # ,save2"""
# run mask rcnn model
results = model.detect([image], verbose=0)
r = results[0]
# applying our effect
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
cv2.imwrite("static/cut.jpg",frame1)
return "cutdone"
## save image
cv2.imwrite("static/cut.jpg", frame1)
return "done"
import cv2
import numpy as np
import os
import sys
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
import PIL
from PIL import Image
from matplotlib.pyplot import imread
import scipy as sc
from scipy import ndimage
from skimage import filters
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import math
from skimage.color import rgb2gray
rootLocation = os.path.join("D:/research -app/2021-129/Backend/")
modelLocation = os.path.join(rootLocation, "logs/")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode = "inference", model_dir = modelLocation, config = config)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def medBlur():
def apply_mask(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
image[:, :, 0]
)
blurredImage[:, :, 1] = np.where(
mask == 0,
blurredImage[:, :, 1],
image[:, :, 1]
)
blurredImage[:, :, 2] = np.where(
mask == 0,
blurredImage[:, :, 2],
image[:, :, 2]
)
return blurredImage
def applyBlur(image, boxes, masks, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
intent= 15
blurredImage = cv2.medianBlur(image, intent)
results = model.detect([image], verbose=0)
r = results[0]
output = applyBlur(image, r['rois'], r['masks'])
#output = cv2.resize(output, (466, 700))
cv2.imwrite('static/medblur.jpg', output)
return "blurdone"
\ No newline at end of file
......@@ -16,50 +16,26 @@ import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def Splash():
# Input the original image name
original_image = 'static/theimage.jpg'
# Use OpenCV to read and show the original image
image = cv2.imread(original_image)
# Use cvtColor to accomplish image transformation from RGB image to gray image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Root directory of the project
ROOT_DIR = ""
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
WEIGHTS_PATH = "D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5"
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
CUSTOM_DIR = os.path.join(ROOT_DIR, "/Dataset/")
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
config = InferenceConfig()
......@@ -69,7 +45,7 @@ def Splash():
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top'
]
]
def apply_mask(image, mask):
image[:, :, 0] = np.where(
......@@ -89,9 +65,23 @@ def Splash():
)
return image
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
......@@ -125,16 +115,19 @@ def Splash():
image = apply_mask(image, mask)
return image
return image"""
results = model.detect([image], verbose=0)
r = results[0]
frame = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame = cv2.resize(frame, (466, 700))
frame = cv2.resize(np.float32(frame), (466, 700))
cv2.imwrite('static/splash.jpg', frame)
return "splashdone"
import numpy as np
import cv2
def vignette():
input_image = 'static/theimage.jpg'
input_image = cv2.imread(input_image)
rows, cols = input_image.shape[:2]
X_resultant_kernel = cv2.getGaussianKernel(cols,200)
Y_resultant_kernel = cv2.getGaussianKernel(rows,200)
resultant_kernel = Y_resultant_kernel * X_resultant_kernel.T
mask = 255 * resultant_kernel / np.linalg.norm(resultant_kernel)
output = np.copy(input_image)
for i in range(3):
output[:, :, i] = output[:,:,i] * mask
cv2.imwrite('static/vignette.jpg', output)
return "vignettedone"
\ No newline at end of file
......@@ -4,6 +4,9 @@ from Cut import cutImage
from Splash import Splash
from Blur import Blur
from maskImage import mask
from bokeh import Bokeh
from MedBlur import medBlur
from Vignette import vignette
import os
import tensorflow as tf
from flask_ngrok import run_with_ngrok
......@@ -20,7 +23,7 @@ graph = tf.get_default_graph()
@app.route("/")
def hello():
return "Checking Connectivity..."
return "Checking Connectivity..."
@app.route('/upload', methods = ['POST'])
......@@ -70,25 +73,34 @@ def blurGet():
with graph.as_default():
Blur()
return "done"
@app.route('/bokeh', methods = ['GET'])
@cross_origin()
def bokehGet():
global graph
with graph.as_default():
Bokeh()
return "done"
@app.route('/vignette', methods = ['GET'])
@cross_origin()
def vignetteGet():
global graph
with graph.as_default():
vignette()
return "done"
# @app.route('/bokeh', methods = ['GET'])
# @cross_origin()
# def bokehGet():
# global graph
# with graph.as_default():
# Bokeh()
# return "done"
# @app.route('/medBlur', methods = ['GET'])
# @cross_origin()
# def medBlurGet():
# global graph
# with graph.as_default():
# MedBlur()
# return "done"
@app.route('/medblur', methods = ['GET'])
@cross_origin()
def medblurGet():
global graph
with graph.as_default():
medBlur()
return "done"
if __name__ == '__main__':
app.run(debug=True)
\ No newline at end of file
app.run()
\ No newline at end of file
import cv2
import os
import mrcnn.model as modellib
from mrcnn.config import Config
import matplotlib.pyplot as plt
import numpy as np
def Bokeh():
input = 'static/theimage.jpg'
image = cv2.imread(input)
plt.rcParams["figure.figsize"]= (10,10)
np.set_printoptions(precision=3)
Array = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],], dtype='float')
binaryMask = Array
gaussianKernel = cv2.getGaussianKernel(11, 5.)
convoKernel = gaussianKernel*gaussianKernel.transpose()*binaryMask
kernel = convoKernel / np.sum(convoKernel)
print(kernel)
def bokeh(image):
r,g,b = cv2.split(image)
r = r / 255.
r = np.where(r > 0.9, r * 2, r)
filteredR = cv2.filter2D(r, -1, kernel)
filteredR = np.where(filteredR > 1., 1., filteredR)
g = g / 255.
g = np.where(g > 0.9, g * 2, g)
filteredG = cv2.filter2D(g, -1, kernel)
filteredG = np.where(filteredG > 1., 1., filteredG)
b = b / 255.
b = np.where(b > 0.9, b * 2, b)
filteredB = cv2.filter2D(b, -1, kernel)
filteredB = np.where(filteredB > 1., 1., filteredB)
result = cv2.merge((filteredR, filteredG, filteredB))
return result
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imageRGB = imageRGB / 255.
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBokeh(imageInput, boxes, masks, class_ids, classes, scores, show_mask=True):
instances = boxes.shape[0]
if not instances:
print("No instances available!!!")
for i in range(instances):
mask = masks[:, :, i]
invertImage = np.abs(1. - mask)
r,g,b = cv2.split(imageInput)
maskedR = r * mask
maskedG = g * mask
maskedB = b * mask
mergedImage = cv2.merge((maskedR, maskedG, maskedB))
invertR = r * invertImage
invertG = g * invertImage
invertB = b * invertImage
backgroundMerged = cv2.merge((invertR, invertG, invertB))
mergedImage = np.asarray(mergedImage * 255., dtype='uint8')
backgroundMerged = np.asarray(backgroundMerged * 255., dtype='uint8')
backgroundBokeh = bokeh(backgroundMerged)
backgroundBokeh = np.asarray(backgroundBokeh * 255., dtype='uint8')
maskedImage = cv2.add(mergedImage, backgroundBokeh)
maskedImage = cv2.cvtColor(maskedImage, cv2.COLOR_BGR2RGB)
return maskedImage
detectionResults = model.detect([image], verbose=0)
r = detectionResults[0]
output = applyBokeh(imageRGB, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
cv2.imwrite('static/bokeh.jpg', output)
return "bokehdone"
\ No newline at end of file
......@@ -25,60 +25,41 @@ from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Root directory of the project
ROOT_DIR = "samples"
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = "D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5"
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
def mask():
path_to_new_image = 'static/theimage.jpg'
#image1 = mpimg.imread(path_to_new_image)
image1 = cv2.imread(path_to_new_image)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.5
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
#config.display()
#LOAD MODEL. Create model in inference mode
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load COCO weights Or, load the last model you trained
weights_path = WEIGHTS_PATH
# Load weights
# print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait','portrait_body', 'selfie_top']
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
# path = sys.argv[1]
path_to_new_image = 'static/theimage.jpg'
# path_to_new_image = path.split("/")[-1]
image1 = mpimg.imread(path_to_new_image)
# plt.imshow(image1)
results1 = model.detect([image1], verbose=1)
......@@ -86,4 +67,5 @@ def mask():
visualize.display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
return True
\ No newline at end of file
return True
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
import colorsys
from skimage.measure import find_contours
from matplotlib.patches import Polygon
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
#import custom
# Root directory of the project
ROOT_DIR = ""
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = "mask_rcnn_object_0010.h5"
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
config = InferenceConfig()
config.display()
#LOAD MODEL. Create model in inference mode
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load COCO weights Or, load the last model you trained
weights_path = WEIGHTS_PATH
# Load weights
# print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait','portrait_body', 'selfie_top']
# path = sys.argv[1]
#path_to_new_image = 'D:/research/maskRcnn/images/portrait demo 1.jpg'
path_to_new_image = path.split("/")[-1]
image1 = mpimg.imread(path_to_new_image)
plt.imshow(image1)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
# print(len([image1]))
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
\ No newline at end of file
......@@ -164,7 +164,7 @@ def display_instances(image, boxes, masks, class_ids, class_names,
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
cv2.imwrite('masked2.jpg', masked_image.astype(np.uint8))
cv2.imwrite('static/masked.jpg', masked_image.astype(np.uint8))
# if auto_show:
# plt.show()
......
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
MODEL_DIR = os.path.join("logs")
COCO_MODEL_PATH = 'D:/chrome/Y4S2/research/New folder (3)/Dinusha-IT18118346/Backend/mask_rcnn_object_0010.h5'
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
# Change the config infermation
class InferenceConfig(config.__class__):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
"""def apply_mask2(image, mask):
blur1[:, :, 0] = np.where(
mask == 0,
image[:, :, 0],
blur1[:, :,0]
)
blur1[:, :, 1] = np.where(
mask == 0,
image[:, :, 1],
blur1[:, :,1]
)
blur1[:, :, 2] = np.where(
mask == 0,
image[:, :, 2],
blur1[:, :,2]
)
return blur1 """
def Blur():
# Input the original image name
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
intent= 15
dimensions = image.shape
blur = cv2.GaussianBlur(image ,(intent,intent),0)
def apply_mask(image, mask):
blur[:, :, 0] = np.where(
mask == 0,
blur[:, :, 0],
image[:, :, 0]
)
blur[:, :, 1] = np.where(
mask == 0,
blur[:, :, 1],
image[:, :, 1]
)
blur[:, :, 2] = np.where(
mask == 0,
blur[:, :, 2],
image[:, :, 2]
)
return blur
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2
## run fast rcnn model
results = model.detect([image], verbose=0)
r = results[0]
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame1 = cv2.resize(frame1, (466, 700))
cv2.imwrite('static/blur.jpg', frame1)
return "blurdone"
......@@ -47,7 +47,6 @@ dependencies {
implementation 'com.github.bumptech.glide:glide:4.11.0'
implementation 'org.tensorflow:tensorflow-lite-metadata:0.1.0-rc1'
implementation 'org.tensorflow:tensorflow-lite-gpu:2.2.0'
implementation project(path: ':openCVLibrary343')
annotationProcessor 'com.github.bumptech.glide:compiler:4.11.0'
implementation 'de.hdodenhof:circleimageview:3.1.0'
implementation 'com.github.zjywill:roundedcornerimageview:1.1.0'
......
......@@ -19,7 +19,7 @@
<activity android:name=".ImageProcessing.Cut" />
<activity android:name=".ImageProcessing.Blur" />
<activity android:name=".ImageProcessing.Selectedit" />
<activity android:name=".ImageProcessing.BackgroundCustomization" />
<!-- <activity android:name=".ImageProcessing.BackgroundCustomization" />-->
<activity android:name=".activities.DenoisingActivity" />
<activity android:name=".activities.MyGallery" />
<activity android:name=".activities.CropActivity" />
......@@ -27,7 +27,7 @@
<activity android:name=".activities.CameraView" />
<activity android:name=".activities.EnvironmentChecker" />
<activity
android:name=".activities.SplashScreen"
android:name=".ImageProcessing.BackgroundCustomization"
android:screenOrientation="portrait">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
......
......@@ -29,4 +29,12 @@ public interface Api {
@GET("/cut")
Call<ResponseBody> cut();
@GET("/bokeh")
Call<ResponseBody> bokeh();
@GET("/vignette")
Call<ResponseBody> vignette();
@GET("/medblur")
Call<ResponseBody> medblur();
}
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Bokeh extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_bokeh);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/bokeh.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openSelecteditAcitivity();
}
});
}
public void openSelecteditAcitivity() {
Intent intent = new Intent(this, Selectedit.class);
startActivity(intent);
}
}
\ No newline at end of file
......@@ -3,6 +3,6 @@ package com.app.smartphotoeditor.ImageProcessing;
public class Constants {
// This should be replaced with the server link when server starts running
public static final String IMGLINK = "";
public static final String IMGLINK = "4e11-112-134-169-32";
}
package com.app.smartphotoeditor.ImageProcessing;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
......
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class MedBlur extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_med_blur);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/medblur.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openSelecteditAcitivity();
}
});
}
public void openSelecteditAcitivity() {
Intent intent = new Intent(this, Selectedit.class);
startActivity(intent);
}
}
\ No newline at end of file
......@@ -8,7 +8,8 @@ import retrofit2.converter.gson.GsonConverterFactory;
public class RetrofitClient {
private static final String baseurl="http://10.0.2.15:5000";
// private static final String baseurl="http://10.0.2.15:5000";
private static final String baseurl="http://4e11-112-134-169-32.ngrok.io";
private static RetrofitClient mInstance;
private Retrofit retrofit;
......
package com.app.smartphotoeditor.ImageProcessing;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import com.app.smartphotoeditor.R;
import com.squareup.picasso.MemoryPolicy;
import com.squareup.picasso.NetworkPolicy;
import com.squareup.picasso.Picasso;
public class Vignette extends AppCompatActivity {
Button btnBack;
ImageView imgView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_vignette);
btnBack=(Button) findViewById(R.id.back);
imgView=(ImageView) findViewById(R.id.imgView);
Picasso
.get()
.load("https://"+Constants.IMGLINK+".ngrok.io/static/vignette.jpg")
.placeholder(R.drawable.progress_bar_material)
.networkPolicy(NetworkPolicy.NO_CACHE)
.memoryPolicy(MemoryPolicy.NO_CACHE)
.resize(300,300)
.into(imgView);
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
openSelecteditAcitivity();
}
});
}
public void openSelecteditAcitivity() {
Intent intent = new Intent(this, Selectedit.class);
startActivity(intent);
}
}
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Bokeh">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.MedBlur">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
......@@ -13,61 +13,120 @@
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="horizontal"
android:layout_marginTop="80dp"
>
android:layout_height="match_parent"
android:orientation="vertical">
<Button
android:id="@+id/blur"
android:layout_width="wrap_content"
android:id="@+id/newimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:text="New Image" />
android:text="Blur" />
<Button
android:id="@+id/cut"
android:layout_width="wrap_content"
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:text="Cut" />
android:orientation="horizontal"
android:layout_marginRight="10dp"
android:layout_marginLeft="10dp">
<Button
android:id="@+id/splash"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:text="Splash" />
<TableLayout
android:layout_width="match_parent"
android:layout_height="wrap_content">
</LinearLayout>
</ScrollView>
<TableRow
android:id="@+id/tableRow1"
android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="1" >
<LinearLayout
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical">
<Button
android:id="@+id/blur"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="Blur"/>
<Button
android:id="@+id/newimage"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:text="New Image" />
<ImageView
android:adjustViewBounds="true"
android:layout_marginTop="125dp"
android:id="@+id/imgview"
android:layout_width="match_parent"
android:layout_height="wrap_content"
app:srcCompat="@drawable/ic_launcher_background" />
<Button
android:id="@+id/cut"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="Cut"/>
</TableRow>
<TableRow
android:id="@+id/tableRow2"
android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="1" >
<Button
android:id="@+id/splash"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_gravity="center"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:text="Splash" />
<Button
android:id="@+id/bokeh"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_weight="1"
android:layout_gravity="center"
android:text="Bokeh" />
</TableRow>
<TableRow
android:id="@+id/tableRow3"
android:layout_width="wrap_content"
android:layout_height="0dp"
android:layout_weight="1" >
<Button
android:id="@+id/medblur"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="Medium Blur"
/>
<Button
android:id="@+id/vignette"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_weight="1"
android:layout_marginLeft="20dp"
android:layout_marginRight="20dp"
android:layout_gravity="center"
android:text="vignette"/>
</TableRow>
</LinearLayout>
</TableLayout>
</LinearLayout>
<ImageView
android:id="@+id/imgview"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_marginTop="20dp"
android:adjustViewBounds="true"
app:srcCompat="@drawable/ic_launcher_background" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".ImageProcessing.Vignette">
<ScrollView
android:layout_width="match_parent"
android:layout_height="match_parent">
<LinearLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:orientation="vertical" >
<Button
android:id="@+id/back"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:text="Back" />
<ImageView
android:id="@+id/imgView"
android:layout_width="match_parent"
android:layout_height="383dp"
tools:srcCompat="@tools:sample/avatars" />
</LinearLayout>
</ScrollView>
</androidx.constraintlayout.widget.ConstraintLayout>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment