Commit cb67cde4 authored by dilshan-98's avatar dilshan-98

finalized

parent 9ea93f93
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_11" default="true" project-jdk-name="1.8" project-jdk-type="JavaSDK">
<component name="ProjectRootManager" version="2" languageLevel="JDK_1_8" default="true" project-jdk-name="1.8" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/build/classes" />
</component>
<component name="ProjectType">
......
......@@ -11,11 +11,6 @@ def Blur():
intent = 15
dimensions = image.shape
# blurredImage = gaussianBlur(image, intent)
blurredImage = cv2.GaussianBlur(image, (intent, intent), 0)
def convolution(inputimage, kernel):
kernelHeight = kernel.shape[0]
kernelWidth = kernel.shape[1]
......@@ -75,6 +70,8 @@ def Blur():
return (filterImage.astype(np.uint8))
blurredImage = gaussianBlur(image, intent)
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
......@@ -92,9 +89,7 @@ def Blur():
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBlur(image, mask):
def blurImage(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
......@@ -112,7 +107,7 @@ def Blur():
)
return blurredImage
def applyBlur(image, boxes, masks, class_ids, classes, scores, viewMask=True):
def applyBlur(image, boxes, masks, viewMask=True):
instances = boxes.shape[0]
if not instances:
......@@ -123,7 +118,7 @@ def Blur():
for i in range(instances):
mask = masks[:, :, i]
if viewMask:
maskedImage = applyBlur(maskedImage, mask)
maskedImage = blurImage(maskedImage, mask)
return maskedImage
......@@ -131,9 +126,8 @@ def Blur():
r = detectionResults[0]
output = applyBlur(image, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
output = applyBlur(image, r['rois'], r['masks'])
# output = cv2.resize(np.float32(output), (466, 700))
cv2.imwrite('static/blur.jpg', output)
return "blurdone"
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
from mrcnn import model as modellib
def cutImage():
# Input the original image name
# original_image = 'samples/ex7.jpg'
original_image = 'static/theimage.jpg'
input = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
image = cv2.imread(input)
height, width, channels = image.shape
print("Read original image successfully! The original image shape is:")
print("Input image shape is:")
print(image.shape)
dimensions = (width, height)
# blur = cv2.GaussianBlur(image ,(intent,intent),0)
bg = cv2.imread('static/bg.jpg')
bg = cv2.resize(bg, dimensions, interpolation = cv2.INTER_AREA)
background = cv2.imread('static/bg.jpg')
# blur1 = cv2.GaussianBlur(image ,(intent,intent),0)
# cv2.imwrite('original_image.jpg', bg)
background = cv2.resize(background, dimensions, interpolation = cv2.INTER_AREA)
# Load the pre-trained model data
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(customModel, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def apply_mask(image, mask):
bg[:, :, 0] = np.where(
mask == 0,
bg[:, :, 0],
image[:, :, 0]
)
bg[:, :, 1] = np.where(
mask == 0,
bg[:, :, 1],
image[:, :, 1]
)
bg[:, :, 2] = np.where(
mask == 0,
bg[:, :, 2],
image[:, :, 2]
)
return bg
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
# max_area will save the largest object for all the detection results
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(trainedModelPath, by_name=True)
def cutImage(image, mask):
background[:, :, 0] = np.where(mask == 0, background[:, :, 0], image[:, :, 0])
background[:, :, 1] = np.where(mask == 0, background[:, :, 1], image[:, :, 1])
background[:, :, 2] = np.where(mask == 0, background[:, :, 2], image[:, :, 2])
return background
def cutBackground(image, boxes, masks, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
print("No instances available!!!")
maskedImage = image.astype(np.uint32).copy()
......@@ -90,57 +55,16 @@ def cutImage():
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
maskedImage = cutImage(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2"""
results = model.detect([image], verbose=0)
r = results[0]
detectionResults = model.detect([image], verbose=0)
##applying our effect
r = detectionResults[0]
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
output = cutBackground(image, r['rois'], r['masks'])
## save image
cv2.imwrite("static/cut.jpg", output)
cv2.imwrite("static/cut.jpg", frame1)
return "done"
import cv2
import numpy as np
import os
import sys
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
import PIL
from PIL import Image
from matplotlib.pyplot import imread
import scipy as sc
from scipy import ndimage
from skimage import filters
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import math
from skimage.color import rgb2gray
rootLocation = os.path.join("D:/research -app/2021-129/Backend/")
modelLocation = os.path.join(rootLocation, "logs/")
customModel = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
mainFilePath = os.path.join("D:/research -app/2021-129/Backend/")
modelFilePath = os.path.join(mainFilePath, "logs/")
trainedModelPath = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.5
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode = "inference", model_dir = modelLocation, config = config)
model = modellib.MaskRCNN(mode="inference", model_dir=modelFilePath, config=config)
model.load_weights(customModel, by_name=True)
model.load_weights(trainedModelPath, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def medBlur():
def apply_mask(image, mask):
blurredImage[:, :, 0] = np.where(
mask == 0,
blurredImage[:, :, 0],
image[:, :, 0]
)
blurredImage[:, :, 1] = np.where(
mask == 0,
blurredImage[:, :, 1],
image[:, :, 1]
)
blurredImage[:, :, 2] = np.where(
mask == 0,
blurredImage[:, :, 2],
image[:, :, 2]
)
return blurredImage
mediumBlurredImage[:, :, 0] = np.where(mask == 0, mediumBlurredImage[:, :, 0], image[:, :, 0])
mediumBlurredImage[:, :, 1] = np.where(mask == 0, mediumBlurredImage[:, :, 1], image[:, :, 1])
mediumBlurredImage[:, :, 2] = np.where(mask == 0, mediumBlurredImage[:, :, 2], image[:, :, 2])
return mediumBlurredImage
def applyBlur(image, boxes, masks, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
print("No instances available!!!")
maskedImage = image.astype(np.uint32).copy()
for i in range(instances):
mask = masks[:, :, i]
if show_mask:
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
intent= 15
blurredImage = cv2.medianBlur(image, intent)
intent = 15
mediumBlurredImage = cv2.medianBlur(image, intent)
results = model.detect([image], verbose=0)
r = results[0]
output = applyBlur(image, r['rois'], r['masks'])
#output = cv2.resize(output, (466, 700))
cv2.imwrite('static/medblur.jpg', output)
return "blurdone"
\ No newline at end of file
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from mrcnn.config import Config
from mrcnn import model as modellib
def Splash():
original_image = 'static/theimage.jpg'
image = cv2.imread(original_image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
NUM_CLASSES = 1 + 3
DETECTION_MIN_CONFIDENCE = 0.9
IMAGES_PER_GPU = 1
......@@ -44,32 +31,18 @@ def Splash():
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top'
]
def apply_mask(image, mask):
image[:, :, 0] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 0]
)
image[:, :, 1] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 1]
)
image[:, :, 2] = np.where(
mask == 0,
gray_image[:, :],
image[:, :, 2]
)
image[:, :, 0] = np.where(mask == 0, gray_image[:, :], image[:, :, 0])
image[:, :, 1] = np.where(mask == 0, gray_image[:, :], image[:, :, 1])
image[:, :, 2] = np.where(mask == 0, gray_image[:, :], image[:, :, 2])
return image
def display_instances(image, boxes, masks, ids, names, scores, show_mask=True):
def display_instances(image, boxes, masks, show_mask=True):
instances = boxes.shape[0]
if not instances:
print('There are no instances available to display!!!')
print("No instances available!!!")
maskedImage = image.astype(np.uint32).copy()
......@@ -80,51 +53,16 @@ def Splash():
maskedImage = apply_mask(maskedImage, mask)
return maskedImage
"""max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
image = apply_mask(image, mask)
return image"""
results = model.detect([image], verbose=0)
r = results[0]
frame = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
output = display_instances(image, r['rois'], r['masks'])
frame = cv2.resize(np.float32(frame), (466, 700))
cv2.imwrite('static/splash.jpg', frame)
output = cv2.resize(np.float32(output), (466, 700))
cv2.imwrite('static/splash.jpg', output)
return "splashdone"
......
......@@ -2,24 +2,24 @@ import numpy as np
import cv2
def vignette():
input_image = 'static/theimage.jpg'
input = 'static/theimage.jpg'
input_image = cv2.imread(input_image)
input = cv2.imread(input)
rows, cols = input_image.shape[:2]
rows, cols = input.shape[:2]
X_resultant_kernel = cv2.getGaussianKernel(cols,200)
xResultantKernel = cv2.getGaussianKernel(cols, 200)
Y_resultant_kernel = cv2.getGaussianKernel(rows,200)
yResultantKernel = cv2.getGaussianKernel(rows, 200)
resultant_kernel = Y_resultant_kernel * X_resultant_kernel.T
resultant_kernel = yResultantKernel * xResultantKernel.T
mask = 255 * resultant_kernel / np.linalg.norm(resultant_kernel)
output = np.copy(input_image)
output = np.copy(input)
for i in range(3):
output[:, :, i] = output[:,:,i] * mask
output[:, :, i] = output[:, :, i] * mask
cv2.imwrite('static/vignette.jpg', output)
......
......@@ -3,7 +3,6 @@ from flask_cors import CORS, cross_origin
from Cut import cutImage
from Splash import Splash
from Blur import Blur
from maskImage import mask
from bokeh import Bokeh
from MedBlur import medBlur
from Vignette import vignette
......@@ -12,7 +11,6 @@ import tensorflow as tf
from flask_ngrok import run_with_ngrok
import base64
app = Flask(__name__)
run_with_ngrok(app)
cors = CORS(app)
......@@ -27,10 +25,10 @@ def convert_and_save(b64_string):
with open("static/theimage.jpg", "wb") as fh:
fh.write(base64.decodebytes(b64_string.encode()))
@app.route("/")
def hello():
return "Checking Connectivity..."
@app.route('/upload', methods = ['POST'])
@cross_origin()
......@@ -38,22 +36,10 @@ def upload_file():
if request.method == 'POST':
image = request.form.get('image')
convert_and_save(image)
#image.save(os.path.join(app.config["IMAGE_UPLOADS"], "static/theimage.jpg"))
return "done"
# @app.route('/upload', methods = ['POST'])
# @cross_origin()
# def upload_file():
# if request.method == 'POST':
# if request.files:
# image = request.files["image"]
# image.save(os.path.join(app.config["IMAGE_UPLOADS"], "static/theimage.jpg"))
# global graph
# with graph.as_default():
# mask()
# return "done"
@app.route('/uploadbg', methods = ['POST'])
@cross_origin()
def upload_bg():
......
......@@ -5,7 +5,6 @@ from mrcnn.config import Config
import matplotlib.pyplot as plt
import numpy as np
def Bokeh():
input = 'static/theimage.jpg'
......@@ -28,7 +27,7 @@ def Bokeh():
def bokeh(image):
r,g,b = cv2.split(image)
r = r / 255.
r = r / 255. #normalize
r = np.where(r > 0.9, r * 2, r)
filteredR = cv2.filter2D(r, -1, kernel)
filteredR = np.where(filteredR > 1., 1., filteredR)
......@@ -66,9 +65,7 @@ def Bokeh():
model.load_weights(trainedModelPath, by_name=True)
classes = ['BG', 'portrait', 'portrait_body', 'selfie_top']
def applyBokeh(imageInput, boxes, masks, class_ids, classes, scores, show_mask=True):
def applyBokeh(imageInput, boxes, masks):
instances = boxes.shape[0]
if not instances:
......@@ -109,7 +106,7 @@ def Bokeh():
r = detectionResults[0]
output = applyBokeh(imageRGB, r['rois'], r['masks'], r['class_ids'], classes, r['scores'])
output = applyBokeh(imageRGB, r['rois'], r['masks'])
cv2.imwrite('static/bokeh.jpg', output)
......
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
def mask():
path_to_new_image = 'static/theimage.jpg'
#image1 = mpimg.imread(path_to_new_image)
image1 = cv2.imread(path_to_new_image)
ROOT_DIR = os.path.join("D:/research -app/2021-129/Backend/")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = 'D:/research -app/2021-129/Backend/mask_rcnn_object_0010.h5'
class InferenceConfig(Config):
NAME = "object"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 #Background and the total labels of the dataset
DETECTION_MIN_CONFIDENCE = 0.5
IMAGES_PER_GPU = 1
config = InferenceConfig()
#config.display()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
weights_path = WEIGHTS_PATH
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
visualize.display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
return True
import warnings
warnings.filterwarnings('ignore')
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import random
import math
import re
import time
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.image as mpimg
import colorsys
from skimage.measure import find_contours
from matplotlib.patches import Polygon
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
#import custom
# Root directory of the project
ROOT_DIR = ""
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
WEIGHTS_PATH = "mask_rcnn_object_0010.h5"
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.7
config = InferenceConfig()
config.display()
#LOAD MODEL. Create model in inference mode
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load COCO weights Or, load the last model you trained
weights_path = WEIGHTS_PATH
# Load weights
# print("Loading weights ", weights_path)
model.load_weights(weights_path, by_name=True)
class_names = ['BG', 'portrait','portrait_body', 'selfie_top']
# path = sys.argv[1]
#path_to_new_image = 'D:/research/maskRcnn/images/portrait demo 1.jpg'
path_to_new_image = path.split("/")[-1]
image1 = mpimg.imread(path_to_new_image)
plt.imshow(image1)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
# print(len([image1]))
results1 = model.detect([image1], verbose=1)
r1 = results1[0]
display_instances(image1, r1['rois'], r1['masks'], r1['class_ids'],
class_names, r1['scores'], title="Predictions1")
\ No newline at end of file
import cv2
import numpy as np
import os
import sys
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import display_instances
import mrcnn.model as modellib
from mrcnn.model import log
from mrcnn.config import Config
from mrcnn import model as modellib, utils
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL
from PIL import Image
MODEL_DIR = os.path.join("logs")
COCO_MODEL_PATH = 'D:/chrome/Y4S2/research/New folder (3)/Dinusha-IT18118346/Backend/mask_rcnn_object_0010.h5'
class CustomConfig(Config):
"""Configuration for training on the custom dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "object"
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 3 # Background + labels
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = CustomConfig()
# Change the config infermation
class InferenceConfig(config.__class__):
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
config = InferenceConfig()
# COCO dataset object names
model = modellib.MaskRCNN(
mode="inference", model_dir=MODEL_DIR, config=config
)
model.load_weights(COCO_MODEL_PATH, by_name=True)
class_names = ['BG', 'portrait', 'portrait_body', 'selfie_top']
"""def apply_mask2(image, mask):
blur1[:, :, 0] = np.where(
mask == 0,
image[:, :, 0],
blur1[:, :,0]
)
blur1[:, :, 1] = np.where(
mask == 0,
image[:, :, 1],
blur1[:, :,1]
)
blur1[:, :, 2] = np.where(
mask == 0,
image[:, :, 2],
blur1[:, :,2]
)
return blur1 """
def Blur():
# Input the original image name
original_image = 'static/theimage.jpg'
# Use OpenCV to read the original image
image = cv2.imread(original_image)
intent= 15
dimensions = image.shape
blur = cv2.GaussianBlur(image ,(intent,intent),0)
def apply_mask(image, mask):
blur[:, :, 0] = np.where(
mask == 0,
blur[:, :, 0],
image[:, :, 0]
)
blur[:, :, 1] = np.where(
mask == 0,
blur[:, :, 1],
image[:, :, 1]
)
blur[:, :, 2] = np.where(
mask == 0,
blur[:, :, 2],
image[:, :, 2]
)
return blur
# This function is used to show the object detection result in original image.
def display_instances(image, boxes, masks, ids, names, scores):
# max_area will save the largest object for all the detection results
max_area = 0
# n_instances saves the amount of all objects
n_instances = boxes.shape[0]
if not n_instances:
print('NO INSTANCES TO DISPLAY')
else:
assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
for i in range(n_instances):
if not np.any(boxes[i]):
continue
# compute the square of each object
y1, x1, y2, x2 = boxes[i]
square = (y2 - y1) * (x2 - x1)
# use label to select person object from all the 80 classes in COCO dataset
label = names[ids[i]]
if label == 'selfie_top' or label == 'portrait_body' or label == 'portrait':
# save the largest object in the image as main character
# other people will be regarded as background
if square > max_area:
max_area = square
mask = masks[:, :, i]
else:
continue
else:
continue
# apply mask for the image
save1 = apply_mask(image, mask)
# save2 = apply_mask2(image, mask)
return save1 # ,save2
## run fast rcnn model
results = model.detect([image], verbose=0)
r = results[0]
##applying our effect
frame1 = display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']
)
frame1 = cv2.resize(frame1, (466, 700))
cv2.imwrite('static/blur.jpg', frame1)
return "blurdone"
......@@ -21,7 +21,7 @@ android {
}
python {
buildPython "C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python39/python.exe"
buildPython "C:\\Users\\ACER\\AppData\\Local\\Programs\\Python\\Python36/python.exe"
pip {
install "opencv-contrib-python-headless"
install "pillow"
......
......@@ -10,7 +10,7 @@ import retrofit2.http.Part;
public interface Api {
String baseurl="http://10.0.2.15:5000";
String baseurl="http://875a-112-134-175-167.ngrok.io";
@Multipart
@POST("upload/")
......
......@@ -38,7 +38,7 @@ public class Backgroundupload extends AppCompatActivity {
private Button btncutprocess, btnselectimage;
ImageView image;
private static final String UPLOAD_IMAGE_URL="http://727a-112-134-169-152.ngrok.io/uploadbg";
private static final String UPLOAD_IMAGE_URL="http://875a-112-134-175-167.ngrok.io/uploadbg";
String f_path, filePath, fileExtn;
String f_extension;
ProgressDialog progress;
......
......@@ -3,6 +3,6 @@ package com.app.smartphotoeditor.ImageProcessing;
public class Constants {
// This should be replaced with the server link when server starts running
public static final String IMGLINK = "727a-112-134-169-152";
public static final String IMGLINK = "875a-112-134-175-167";
}
......@@ -9,7 +9,7 @@ import retrofit2.converter.gson.GsonConverterFactory;
public class RetrofitClient {
// private static final String baseurl="http://10.0.2.15:5000";
private static final String baseurl="http://727a-112-134-169-152.ngrok.io";
private static final String baseurl="http://875a-112-134-175-167.ngrok.io";
private static RetrofitClient mInstance;
private Retrofit retrofit;
......
......@@ -76,7 +76,7 @@ public class EditorActivity extends AppCompatActivity
private ArrayList<View> viewsInDisplay = new ArrayList<>();
private static final String UPLOAD_IMAGE_URL = "http://727a-112-134-169-152.ngrok.io/upload";
private static final String UPLOAD_IMAGE_URL = "http://875a-112-134-175-167.ngrok.io/upload";
ProgressDialog progress;
......@@ -228,13 +228,11 @@ public class EditorActivity extends AppCompatActivity
});
t.start();
// intent = new Intent(getApplicationContext(), MedBlur.class);
}
else if(clickedPos == 7) {
openCutActivity();
//intent = new Intent(getApplicationContext(), Backgroundupload.class);
}
else if(clickedPos == 8){
......@@ -276,7 +274,6 @@ public class EditorActivity extends AppCompatActivity
});
t.start();
//intent = new Intent(getApplicationContext(), Blur.class);
}
else if(clickedPos == 9) {
......@@ -321,47 +318,8 @@ public class EditorActivity extends AppCompatActivity
});
t.start();
//intent = new Intent(getApplicationContext(), Bokeh.class);
}
else if(clickedPos == 10) {
// progress = new ProgressDialog(EditorActivity.this);
// progress.setTitle("Loading");
// progress.setMessage("Please Wait...");
// progress.show();
// progress.setCancelable(false);
// progress.setCanceledOnTouchOutside(false);
//
// Thread t = new Thread(new Runnable() {
// @Override
// public void run() {
//
// Call<ResponseBody> call = RetrofitClient
// .getInstance()
// .getApi()
// .vignette();
//
// call.enqueue(new Callback<ResponseBody>() {
// @Override
// public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
// try {
// String s = response.body().string();
// Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
// progress.dismiss();
// openVignetteActivity();
// } catch (IOException e) {
// e.printStackTrace();
// }
// }
//
// @Override
// public void onFailure(Call<ResponseBody> call, Throwable t) {
// progress.dismiss();
// Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
// }
// });
// }
// });
// t.start();
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Loading");
......@@ -401,11 +359,48 @@ public class EditorActivity extends AppCompatActivity
});
t.start();
//intent = new Intent(getApplicationContext(), Splashedit.class);
}
if(clickedPos == 11)
{
openVignetteActivity();
progress = new ProgressDialog(EditorActivity.this);
progress.setTitle("Loading");
progress.setMessage("Please Wait...");
progress.show();
progress.setCancelable(false);
progress.setCanceledOnTouchOutside(false);
Thread t = new Thread(new Runnable() {
@Override
public void run() {
Call<ResponseBody> call = RetrofitClient
.getInstance()
.getApi()
.vignette();
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
try {
String s = response.body().string();
Toast.makeText(EditorActivity.this, s, Toast.LENGTH_LONG).show();
progress.dismiss();
openVignetteActivity();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
progress.dismiss();
Toast.makeText(EditorActivity.this, t.getMessage(), Toast.LENGTH_LONG).show();
}
});
}
});
t.start();
}
if(intent != null)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment