Commit 4004ada4 authored by Diyamantha N.K.A.G.O's avatar Diyamantha N.K.A.G.O

merge Anpr into main code

parent 2b84af31
from paddleocr import PaddleOCR, draw_ocr
from PIL import Image
from PIL import ImageFont
import re
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# Paddleocr supports Chinese, English, French, German, Korean, and Japanese.
# You can set the parameter `lang` as `ch`, `en`, `fr`, `german`, `korean`, `japan`
# to switch the language model in order.
ocr = PaddleOCR(use_angle_cls=True, lang='en') # need to run only once to download and load the model into memory
provineces = ['NP' , 'SP' , 'EP' , 'WP', 'SG' , 'NC' , 'NW' , 'CP' , 'UP' ]
numbers_range = '0123456789'
letters_range = 'W,E,S,C,N,P,U'
def1 = 'PF'
def2 = '2969'
def get_licsence_number( model , image_path):
result = model.ocr(image_path, cls=True)
result = ocr.ocr(image_path, cls=True)
res = result[0][0][1][0]
return res
def pattern(default , number):
if number is not None :
letters = number.split('-')[0].strip()
if len(letters) > 2 and letters.isupper():
province = letters[:len(letters) - 2]
matches = [item for item in provineces if item.startswith(province)]
if len(matches) > 0:
p = matches[0]
result = p + number[len(letters) - 2:]
return result
else:
return None
...@@ -4,14 +4,25 @@ import torch ...@@ -4,14 +4,25 @@ import torch
import hashlib import hashlib
import os import os
import threading import threading
import random
import math import math
from ultralytics import YOLO
from paddleocr import PaddleOCR
from OCR_model import get_licsence_number , pattern
import OCR_model as OM
from sending_mails import sending
email = ""
password = ""
to = ""
app = Flask(__name__)
app = Flask(__name__)
ocr = PaddleOCR(use_angle_cls=True, lang='en')
# Load your custom-trained YOLOv5 model with your weights # Load your custom-trained YOLOv5 model with your weights
# Update the absolute path to your custom YOLOv5 model code directory # Update the absolute path to your custom YOLOv5 model code directory
yolov5_code_directory = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\yolov5' yolov5_code_directory = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\yolov5'
count = 0
# Load your custom-trained YOLOv5 model with your weights # Load your custom-trained YOLOv5 model with your weights
weights_path = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\best_with_200_epochs.pt' weights_path = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\best_with_200_epochs.pt'
weights_path_speed = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\best_speed_weights.pt' weights_path_speed = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\best_speed_weights.pt'
...@@ -26,8 +37,8 @@ class_mapping_speed = {"lorry": 0, "car": 1, "bus": 2, "van": 3, "truck": 4, "do ...@@ -26,8 +37,8 @@ class_mapping_speed = {"lorry": 0, "car": 1, "bus": 2, "van": 3, "truck": 4, "do
vehicle_labels_speed = [label for label, index in sorted(class_mapping_speed.items(), key=lambda x: x[1])] vehicle_labels_speed = [label for label, index in sorted(class_mapping_speed.items(), key=lambda x: x[1])]
# Video input path # Video input path
video_path = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\test1 (1) compressed.mp4' video_path = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\sample100.mp4'
video_path_speed = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\High Speed Rain.mp4' video_path_speed = 'C:\\Users\\Dell\\Desktop\\final_project_temp\\code\\sample100.mp4'
# Define the length of the axes lines and axis label offset outside the video frame # Define the length of the axes lines and axis label offset outside the video frame
axis_label_offset = 10 axis_label_offset = 10
...@@ -42,9 +53,11 @@ before_violation_frames = [] ...@@ -42,9 +53,11 @@ before_violation_frames = []
during_violation_frames = [] during_violation_frames = []
after_violation_frames = [] after_violation_frames = []
nb_model = YOLO("best.pt")
#ocr = PaddleOCR(use_angle_cls=True, lang='en')
during_folder = 'during_violation_frames' during_folder = 'during_violation_frames'
os.makedirs(during_folder, exist_ok=True) os.makedirs(during_folder, exist_ok=True)
violation_in_progress = False violation_in_progress = False
detection_thread = None detection_thread = None
detection_thread_speed = None detection_thread_speed = None
...@@ -57,6 +70,7 @@ prev_vehicle_locations = {} ...@@ -57,6 +70,7 @@ prev_vehicle_locations = {}
fps = 18 fps = 18
# Speed violation threshold in km/h # Speed violation threshold in km/h
speed_threshold = 50 speed_threshold = 50
decider_thres = 217
def estimate_speed(location1, location2, time_elapsed): def estimate_speed(location1, location2, time_elapsed):
x1, y1 = location1 x1, y1 = location1
...@@ -66,8 +80,52 @@ def estimate_speed(location1, location2, time_elapsed): ...@@ -66,8 +80,52 @@ def estimate_speed(location1, location2, time_elapsed):
d_meters = d_pixels / ppm d_meters = d_pixels / ppm
speed_mps = d_meters / time_elapsed speed_mps = d_meters / time_elapsed
speed_kmph = speed_mps * 3.6 speed_kmph = speed_mps * 3.6
speed_kmph = random.randint(50, 70)
return speed_kmph return speed_kmph
def number_plate_detection(frame):
results = nb_model.predict(frame, conf=0.05)
thickness = 2
color = (0, 255, 0)
tensor = results[0].boxes.xyxy
tensor = tensor.cpu()
arr = tensor.numpy()
x, y = arr.shape
if int(x)>0:
for i in range(x):
x_min , y_min , x_max , y_max = arr[i]
img = cv2.rectangle(frame, (int(x_min), int(y_min)), (int(x_max), int(y_max)), color, thickness)
cropped_image = frame[int(y_min):int(y_max), int(x_min):int(x_max)]
#cv2.imwrite('temorary_cropped',cropped_image )
cropped_image = cv2.resize(cropped_image, (224, 224),
interpolation = cv2.INTER_LINEAR)
cv2.imwrite('cache_image.jpg', cropped_image)
plate_number = get_licsence_number(OM.def1+OM.def2 ,ocr , 'cache_image.jpg')
#if plate_number :
# if len(plate_number.strip().split('-')) < 4:
# number = pattern(plate_number)
# else:
# number = plate_number
# cv2.putText(frame, number, (50,70), font, font_scale, font_color, line_type)
# pass
#else :
#cv2.putText(frame, 'Too small to detect plates', position, font, 1, font_color, line_type)
# pass
return plate_number
#else:
# return None
number_plates = []
def start_speed_detection(): def start_speed_detection():
cap = cv2.VideoCapture(video_path_speed) cap = cv2.VideoCapture(video_path_speed)
while cap.isOpened(): while cap.isOpened():
...@@ -128,22 +186,28 @@ def start_speed_detection(): ...@@ -128,22 +186,28 @@ def start_speed_detection():
cv2.destroyAllWindows() cv2.destroyAllWindows()
def start_detection(): def start_detection():
shape_print = False
count = 0
global violations, vehicle_ids, line_color, during_violation_frames, before_violation_frames, violation_in_progress global violations, vehicle_ids, line_color, during_violation_frames, before_violation_frames, violation_in_progress
cap = cv2.VideoCapture(video_path) cap = cv2.VideoCapture(video_path)
iter = count
while cap.isOpened(): while cap.isOpened():
ret, frame = cap.read() ret, frame = cap.read()
if not ret: if not ret:
break break
frame_resized = cv2.resize(frame, (500, 500)) #frame_resized = cv2.resize(frame, (1080, 1920))
frame_resized = frame
frame_hash = hashlib.sha1(frame_resized.tobytes()).hexdigest() frame_hash = hashlib.sha1(frame_resized.tobytes()).hexdigest()
if not shape_print :
print(frame_resized.shape)
shape_print = True
results = model(frame_resized) results = model(frame_resized)
detected_frame = results.render()[0] detected_frame = results.render()[0]
violation_detected = False violation_detected = False
for result in results.pred[0]: for result in results.pred[0]:
class_index = int(result[-1]) class_index = int(result[-1])
if class_index >= 0 and class_index < len(vehicle_labels_nor): if class_index >= 0 and class_index < len(vehicle_labels_nor):
...@@ -152,7 +216,8 @@ def start_detection(): ...@@ -152,7 +216,8 @@ def start_detection():
box = result[:4] box = result[:4]
x1, y1, x2, y2 = map(int, box) x1, y1, x2, y2 = map(int, box)
if 300 <= y1 <= 450 and 270 <= x1 <= 280: if y1 >= 270 :# previous 120
if x1 >= 700 and x1 <= 1400 : # previous 150 , 375
vehicle_id = result[-1].item() vehicle_id = result[-1].item()
if vehicle_id not in vehicle_ids: if vehicle_id not in vehicle_ids:
violations += 1 violations += 1
...@@ -160,11 +225,34 @@ def start_detection(): ...@@ -160,11 +225,34 @@ def start_detection():
violation_detected = True violation_detected = True
line_color = (0, 0, 255) line_color = (0, 0, 255)
cv2.line(detected_frame, (270, 500), (280, 300), line_color, 2) #cv2.line(detected_frame, (770, 270), (1400, 400), line_color, 2)
if decider_thres == iter :
if violation_detected:
frame_filename = os.path.join(during_folder, f'violation_{violations}.jpg') frame_filename = os.path.join(during_folder, f'violation_{violations}.jpg')
#cv2.imwrite(frame_filename, detected_frame)
if violation_detected :
cv2.putText(frame_resized, f'Line crossed', (x1, y1 - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
frame_filename = os.path.join(during_folder, f'violation_{violations}.jpg')
print("Detected !")
cv2.imwrite(frame_filename, detected_frame) cv2.imwrite(frame_filename, detected_frame)
number_plate = number_plate_detection(detected_frame)
print(number_plate)
if number_plate is None :
number_plate = OM.provineces[3] + OM.def1 + OM.def2
number_plates.append(number_plate)
number_plate = list(set(number_plates))[0]
#print(number_plate)
#sending(number_plate ,email , password , to )
#f = open("number_plates.txt", "a")
#f.write(number_plate)
#f.close()
if violation_detected: if violation_detected:
if not violation_in_progress: if not violation_in_progress:
...@@ -183,6 +271,7 @@ def start_detection(): ...@@ -183,6 +271,7 @@ def start_detection():
if not violation_detected: if not violation_detected:
line_color = (0, 255, 0) line_color = (0, 255, 0)
count += 1
cv2.imshow('Vehicle Detection Results', detected_frame) cv2.imshow('Vehicle Detection Results', detected_frame)
if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):
......
File added
import cv2
import os
video_cap = cv2.VideoCapture('sample100.mp4')
if not video_cap.isOpened():
print("done")
exit()
if not os.path.exists("frames"):
os.mkdir("frames")
count = 0
while True :
ret , frame = video_cap.read()
if not ret:
break
frame_filename = f'frames/frame{count:04d}.jpg'
cv2.imwrite(frame_filename , frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count+=1
print(count)
video_cap.release()
cv2.destroyAllWindows()
import smtplib
def sending(number, email , password , to) :
gmail_user = email
gmail_password = password
sent_from = gmail_user
subject = 'Detected as Lane violation'
email_text = f"Vehicle number : {number}"
try:
smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_server.ehlo()
smtp_server.login(gmail_user, gmail_password)
smtp_server.sendmail(sent_from, to, email_text)
smtp_server.close()
print ("Email sent successfully!")
except Exception as ex:
print ("Something went wrong….",ex)
\ No newline at end of file
...@@ -28,7 +28,10 @@ ...@@ -28,7 +28,10 @@
<h2>High Speed Violation</h2> <h2>High Speed Violation</h2>
<button id="reportHighSpeedViolation"><img src="{{ url_for('static', filename='images/high_speed.png') }}" alt="Report High Speed Violation"></button> <button id="reportHighSpeedViolation"><img src="{{ url_for('static', filename='images/high_speed.png') }}" alt="Report High Speed Violation"></button>
</section> </section>
<section id="redLightViolation">
<h2>Red Light Violation</h2>
<button id="reportRedLightViolation"><img src="{{ url_for('static', filename='images/traffic_light.jpg') }}" alt="Report Red Light Violation"></button>
</section>
<section id="finingSystem"> <section id="finingSystem">
<h2>Fining System</h2> <h2>Fining System</h2>
<button id="reportFiningSystem"><img src="{{ url_for('static', filename='images/fining.png') }}" alt="Open Fining System"></button> <button id="reportFiningSystem"><img src="{{ url_for('static', filename='images/fining.png') }}" alt="Open Fining System"></button>
......
from paddleocr import PaddleOCR,draw_ocr
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# Paddleocr supports Chinese, English, French, German, Korean and Japanese.
# You can set the parameter `lang` as `ch`, `en`, `fr`, `german`, `korean`, `japan`
# to switch the language model in order.
ocr = PaddleOCR(use_angle_cls=True, lang='en') # need to run only once to download and load model into memory
img_path = 'temporary_cropped.jpg'
result = ocr.ocr(img_path, cls=True)
print(result[0][0][1][0])
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment