Commit 9bb92e52 authored by @Thilakasiri_M.D.T.S's avatar @Thilakasiri_M.D.T.S

Update componet 4

parent e4821c01
......@@ -11,74 +11,86 @@ mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
mp_pose = mp.solutions.pose
mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2)
def calculate_angle(a, b, c):
a = np.array(a) # First
b = np.array(b) # Mid
c = np.array(c) # End
threshold = 0.25
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2)
if angle > 180.0:
angle = 360 - angle
return angle
def distanceCalculate(p1, p2):
"""p1 and p2 in format (x1,y1) and (x2,y2) tuples"""
dis = ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
return dis
def get_human_thred():
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('input_videos/2.mp4')
## Setup mediapipe instance
index_num = 0
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
while cap.isOpened():
ret, frame = cap.read()
# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
if ret:
# Make detection
results = pose.process(image)
# Recolor image to RGB
cv2.imwrite('images/' + str(index_num) + '.jpg', frame)
index_num += 1
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Make detection
results = pose.process(image)
# Extract landmarks
try:
landmarks = results.pose_landmarks.landmark
# Get coordinates
shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]
wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
# Calculate angle
angle = calculate_angle(shoulder, elbow, wrist)
# Visualize angle
cv2.putText(image, str(angle),
tuple(np.multiply(elbow, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA
)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Extract landmarks
try:
landmarks = results.pose_landmarks.landmark
nose = (landmarks[mp_pose.PoseLandmark.NOSE.value].x,
landmarks[mp_pose.PoseLandmark.NOSE.value].y)
left_wrist = (landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y)
right_wrist = (landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y)
distance_left = distanceCalculate(nose, left_wrist)
distance_right = distanceCalculate(nose, right_wrist)
print('distance - ', distance_left)
except:
pass
if distance_left < threshold or distance_right < threshold:
print('danger')
cv2.putText(image, 'Danger Detection',
tuple(np.multiply(nose, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2, cv2.LINE_AA
)
else:
print('no danger')
cv2.putText(image, 'No Danger Detection',
tuple(np.multiply(nose, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA
)
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)
)
cv2.imshow('Mediapipe Feed', image)
except:
pass
if cv2.waitKey(10) & 0xFF == ord('q'):
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)
)
cv2.imshow('CCTV Feed', image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
else:
break
cap.release()
......@@ -88,38 +100,52 @@ def get_human_thred():
def get_human_thred_temp():
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
sample_imgs = os.listdir('component_4/sample_data')
filename = 'component_4/sample_data/' + str(random.choice(sample_imgs))
frame = cv2.imread(filename)
frame = cv2.imread('component_4/sample_data/' + str(random.choice(sample_imgs)))
try:
# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make detection
results = pose.process(image)
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Extract landmarks
# Extract landmarks
try:
landmarks = results.pose_landmarks.landmark
# Get coordinates
shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]
wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
nose = (landmarks[mp_pose.PoseLandmark.NOSE.value].x,
landmarks[mp_pose.PoseLandmark.NOSE.value].y)
left_wrist = (landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y)
# Calculate angle
angle = calculate_angle(shoulder, elbow, wrist)
right_wrist = (landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,
landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y)
if angle > 60:
return True
distance_left = distanceCalculate(nose, left_wrist)
distance_right = distanceCalculate(nose, right_wrist)
if distance_left < threshold or distance_right < threshold:
cv2.putText(image, 'Danger Detection',
tuple(np.multiply(nose, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2, cv2.LINE_AA
)
cv2.imwrite('component_4/out/out.png', image)
return True, filename
else:
return False
cv2.putText(image, 'No Danger Detection',
tuple(np.multiply(nose, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA
)
cv2.imwrite('component_4/out/out.png', image)
return False, filename
except:
return False
return False, filename
# get_human_thred()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment