Commit 6ff0b856 authored by Givindu's avatar Givindu

V1.22

parent 0cc20836
......@@ -26,7 +26,7 @@
<h1 class="mx-auto my-0 text-uppercase">Abnormal Behavior Detection</h1>
<h2 class="text-white-50 mx-auto mt-2 mb-5">Detect suspects & abnormal behaviors accompanied by
Threatening Weapons (Hand Guns, Knives, Grenade)</h2>
<img src="assets/gun.png" alt="Girl in a jacket" width="200" height="200"
<img src="assets/gun_new.png" alt="Girl in a jacket" width="200" height="200"
style="position: relative; top: 15px; opacity:90%;">
<div class="row justify-content-md-center" style="padding: 36px;">
<div class="col col-lg-2">
......
......@@ -48,7 +48,7 @@ def capture_humans(path):
while True:
ret, frame = cap.read()
if ret != False:
frame = imutils.resize(frame, width=800)
frame = imutils.resize(frame, width=600)
total_frames = total_frames + 1
(H, W) = frame.shape[:2]
......
......@@ -13,7 +13,8 @@ def data(path):
path = path
elif file == "Input.avi":
path = "E:\\BACKBONE\\videos\\" + file
elif file == "Input.mp4":
path = "E:\\BACKBONE\\videos\\" + file
print("path ->" + path)
......
import glob
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
count = 1
path = glob.glob("C:/Users/Givindu/Desktop/New folder/frames/New/*.jpg")
for file in path:
image = plt.imread(file)
classes = None
with open('coco.names', 'r') as f:
classes = [line.strip() for line in f.readlines()]
Width = image.shape[1]
Height = image.shape[0]
# read pre-trained model and config file
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
# create input blob
# set input blob for the network
net.setInput(cv2.dnn.blobFromImage(image, 0.00392, (416,416), (0,0,0), True, crop=False))
# run inference through the network
# and gather predictions from output layers
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
#create bounding box
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.1:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.1, 0.1)
#check if is people detection
for i in indices:
i = i[0]
box = boxes[i]
if class_ids[i]==0:
label = str(classes[class_id])
cv2.rectangle(image, (round(box[0]),round(box[1])), (round(box[0]+box[2]),round(box[1]+box[3])), (0, 0, 0), 2)
cv2.putText(image, label, (round(box[0])-10,round(box[1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
cv2.imwrite('C:/Users/Givindu/Desktop/frames/frame' + str(count) + '.jpg', image)
count += 1
#plt.imshow(image)
\ No newline at end of file
......@@ -15,7 +15,7 @@ count = 1
start = time.time()
def weaponDetection():
cap = cv.VideoCapture('E:\\BACKBONE\\abnormal_behavior_detection\\frames\\Generated Video Output\\Output Video.avi')
cap = cv.VideoCapture('E:\\BACKBONE`\\abnormal_behavior_detection\\frames\\Generated Video Output\\Output Video.avi')
whT = 320
confThreshold = 0.1
nmsThreshold = 0.1
......@@ -72,7 +72,7 @@ def weaponDetection():
# Detect handguns
if classIds[i] == '0':
if classIds[i] == 0:
# print(x,y,w,h)
text = "{:.4f}%".format((confs[i])*100)
print("Suspect Detected : ", "[", text, "]")
......@@ -97,7 +97,7 @@ def weaponDetection():
# Detect knives
if classIds[i] == 1:
if classIds[i] == '1':
# print(x,y,w,h)
text = "{:.4f}%".format((confs[i])*100)
print("Suspect Detected : ", "[", text, "]")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment