Commit cd07d474 authored by supundileepa00's avatar supundileepa00

feat: Added the implementation for the CNN model inplemented using tensorflow.

parent 3f692d5a
{
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"id": "ade37944",
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import os\n",
"import cv2\n",
"import numpy as np\n",
"from sklearn.model_selection import train_test_split\n",
"import mediapipe as mp"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "16176bf6",
"metadata": {},
"outputs": [],
"source": [
"IMG_SIZE = 224 # image size\n",
"BATCH_SIZE = 32 # batch size\n",
"EPOCHS = 2 # number of epochs\n",
"CLASSES = os.listdir('./data/Sn_sign_language_dataset') # list of classes\n",
"NUM_CLASSES = len(CLASSES) # number of classes\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c9034cbe",
"metadata": {},
"outputs": [],
"source": [
"def load_dataset(dataset_path):\n",
" data = []\n",
" labels = []\n",
" for class_name in CLASSES:\n",
" class_path = os.path.join(dataset_path, class_name)\n",
" for img_name in os.listdir(class_path):\n",
" try:\n",
" \n",
" img_path = os.path.join(class_path, img_name)\n",
" img = cv2.imread(img_path)\n",
" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert color space\n",
" img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) # resize image\n",
" data.append(img)\n",
" labels.append(CLASSES.index(class_name))\n",
"\n",
" \n",
" except Exception as e:\n",
" print(f\"Error loading image {img_path}: {e}\")\n",
" data = np.array(data, dtype=np.float32) / 255.0 # normalize pixel values\n",
" labels = tf.keras.utils.to_categorical(labels, num_classes=NUM_CLASSES) # one-hot encode labels\n",
" return data, labels\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "7adb379e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\eight: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\eleven_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\eleven_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\fifty_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\fifty_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\fifty_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\five: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\four & fourteen_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\fourteen_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\fourteen_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\nine: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\one & ten_2 & eleven_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\README.md: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\seven: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\six: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\ten_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\ten_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\thirteen_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\thirteen_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\thirty_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\thirty_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\thirty_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\three & thirteen_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\twenty_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\twenty_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\twenty_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\two: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\what: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\when_1: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\when_2: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\when_3: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\who: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n",
"Error loading image ./data/Sn_sign_language_dataset\\Sn_sign_language_dataset\\why: OpenCV(4.7.0) D:\\a\\opencv-python\\opencv-python\\opencv\\modules\\imgproc\\src\\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'\n",
"\n"
]
}
],
"source": [
"data, labels = load_dataset('./data/Sn_sign_language_dataset')\n",
"train_data, val_data, train_labels, val_labels = train_test_split(data, labels, test_size=0.2, random_state=42)\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "d44f7806",
"metadata": {},
"outputs": [],
"source": [
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 3)),\n",
" tf.keras.layers.MaxPooling2D((2, 2)),\n",
" tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n",
" tf.keras.layers.MaxPooling2D((2, 2)),\n",
" tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n",
" tf.keras.layers.MaxPooling2D((2, 2)),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(128, activation='relu'),\n",
" tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n",
"])\n"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "ff4f0d06",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/2\n",
"287/287 [==============================] - 464s 2s/step - loss: 0.5357 - accuracy: 0.8555 - val_loss: 0.0319 - val_accuracy: 0.9917\n",
"Epoch 2/2\n",
"287/287 [==============================] - 540s 2s/step - loss: 0.0248 - accuracy: 0.9915 - val_loss: 0.0313 - val_accuracy: 0.9900\n"
]
},
{
"data": {
"text/plain": [
"<keras.callbacks.History at 0x166153865b0>"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
"model.fit(train_data, train_labels, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(val_data, val_labels))\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "61d6a8d8",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_convolution_op while saving (showing 3 of 3). These functions will not be directly callable after loading.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:tensorflow:Assets written to: ./models/model-new\\assets\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:tensorflow:Assets written to: ./models/model-new\\assets\n"
]
}
],
"source": [
"model.save('./models/model-new')"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "885678c5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1/1 [==============================] - 0s 155ms/step\n",
"nine\n"
]
}
],
"source": [
"img = cv2.imread('./IMG_1132.jpeg')\n",
"img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
"img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))\n",
"img = np.array([img], dtype=np.float32) / 255.0\n",
"prediction = model.predict(img)\n",
"class_name = CLASSES[np.argmax(prediction)]\n",
"print(class_name)"
]
},
{
"cell_type": "markdown",
"id": "69b66fc1",
"metadata": {},
"source": [
"### Load saved model\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "50944c95",
"metadata": {},
"outputs": [],
"source": [
"# Load the saved model\n",
"model = tf.keras.models.load_model('./models/model')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75cef5e3",
"metadata": {},
"outputs": [],
"source": [
"img = cv2.imread('./IMG_14.jpg')\n",
"img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
"img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))\n",
"img = np.array([img], dtype=np.float32) / 255.0\n",
"prediction = model.predict(img)\n",
"class_name = CLASSES[np.argmax(prediction)]\n",
"print(class_name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cea24baa",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import mediapipe as mp\n",
"import numpy as np\n",
"\n",
"# Initialize the MediaPipe hand detection object\n",
"mp_hands = mp.solutions.hands.Hands()\n",
"\n",
"# Define the lower and upper boundaries of the skin color in the HSV color space\n",
"lower_skin = np.array([0, 20, 70], dtype=np.uint8)\n",
"upper_skin = np.array([20, 255, 255], dtype=np.uint8)\n",
"\n",
"# Initialize the video capture object\n",
"cap = cv2.VideoCapture(0)\n",
"\n",
"while True:\n",
" # Capture a frame from the video feed\n",
" ret, frame = cap.read()\n",
"\n",
" # Convert the frame to the RGB color space\n",
" frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
"\n",
" # Detect hands in the frame using MediaPipe\n",
" results = mp_hands.process(frame)\n",
"\n",
" # Check if hands were detected\n",
" if results.multi_hand_landmarks:\n",
" # Iterate over the detected hands\n",
" for hand_landmarks in results.multi_hand_landmarks:\n",
" # Convert the landmarks to pixel coordinates\n",
" landmarks = [[int(l.x * frame.shape[1]), int(l.y * frame.shape[0])] for l in hand_landmarks.landmark]\n",
"\n",
" # Get the bounding rectangle of the hand\n",
" x, y, w, h = cv2.boundingRect(np.array(landmarks))\n",
"\n",
" # Scale the bounding rectangle\n",
" scale_factor = 1.5\n",
" w = int(scale_factor * w)\n",
" h = int(scale_factor * h)\n",
" x = int(x - (scale_factor - 1) / 2 * w)\n",
" y = int(y - (scale_factor - 1) / 2 * h)\n",
"\n",
" # Crop the region of interest (ROI) containing the hand\n",
" roi = frame[y:y+h, x:x+w]\n",
"\n",
" # Convert the ROI to the HSV color space\n",
" hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)\n",
"\n",
" # Threshold the ROI to only keep the skin color\n",
" mask = cv2.inRange(hsv, lower_skin, upper_skin)\n",
"\n",
" # Find contours in the mask\n",
" contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
" # Iterate over the contours to find the hand\n",
" for contour in contours:\n",
" # Get the bounding rectangle of the contour\n",
" cx, cy, cw, ch = cv2.boundingRect(contour)\n",
"\n",
" # Display a rectangle around the hand\n",
" cv2.rectangle(frame, (x+cx, y+cy), (x+cx+cw, y+cy+ch), (0, 255, 0), 2)\n",
"\n",
" # If the 'c' key is pressed, capture the image containing the hand\n",
" if cv2.waitKey(1) & 0xFF == ord('c'):\n",
" # Apply histogram equalization to the ROI to enhance its quality\n",
" roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)\n",
" roi = cv2.equalizeHist(roi)\n",
" roi = cv2.cvtColor(roi, cv2.COLOR_GRAY2RGB)\n",
"\n",
" # Save the image containing the hand in its original color format\n",
" cv2.imwrite('hand_image.jpg', roi)\n",
"\n",
" # Convert the frame back to the BGR color space\n",
" frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n",
"\n",
" # Display the video feed\n",
" cv2.imshow('Hand Detection', frame)\n",
"\n",
" # If the 'q' key is pressed, exit the loop\n",
" if cv2.waitKey(1) & 0xFF == ord('q'):\n",
" break\n",
"\n",
"# Release the video capture object and close all windows\n",
"cap.release()\n",
"cv2.destroyAllWindows()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eb44c84e",
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"import mediapipe as mp\n",
"import numpy as np\n",
"\n",
"# Initialize Mediapipe hand detection model\n",
"mp_hands = mp.solutions.hands\n",
"\n",
"# Initialize drawing utilities for visualizing hand landmarks\n",
"mp_drawing = mp.solutions.drawing_utils\n",
"\n",
"# Initialize camera capture\n",
"capture = cv2.VideoCapture(0)\n",
"\n",
"# Continuously capture frames from camera\n",
"while True:\n",
" # Capture a frame from the camera\n",
" ret, image = capture.read()\n",
"\n",
" # Convert image to RGB format\n",
" image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
"\n",
" # Detect hands in image\n",
" with mp_hands.Hands(\n",
" static_image_mode=True,\n",
" max_num_hands=1,\n",
" min_detection_confidence=0.5) as hands:\n",
"\n",
" # Process image\n",
" results = hands.process(image)\n",
"\n",
" # Get landmarks of hand\n",
" if results.multi_hand_landmarks:\n",
" landmarks = results.multi_hand_landmarks[0]\n",
"\n",
" # Convert landmarks to numpy array\n",
" landmarks_array = np.array([(lm.x, lm.y) for lm in landmarks.landmark])\n",
"\n",
" # Get bounding box of hand and arm\n",
" x, y, w, h = cv2.boundingRect(landmarks_array.astype(np.int))\n",
"\n",
" # Crop image to bounding box of hand and arm\n",
" cropped_image = image[y:y+h, x:x+w]\n",
"\n",
" # Draw landmarks on original image\n",
" mp_drawing.draw_landmarks(\n",
" image, landmarks, mp_hands.HAND_CONNECTIONS)\n",
"\n",
" # Display cropped image\n",
" cv2.imshow(\"Hand Image\", cropped_image)\n",
"\n",
" # Display original image\n",
" cv2.imshow(\"Hand Detection\", cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n",
"\n",
" # Save cropped image to file on key press\n",
" key = cv2.waitKey(1) & 0xFF\n",
" if key == ord('s'):\n",
" cv2.imwrite(\"output.jpg\", cropped_image)\n",
"\n",
" # Exit on 'q' key\n",
" elif key == ord('q'):\n",
" break\n",
"\n",
"# Release camera capture\n",
"capture.release()\n",
"\n",
"# Close all windows\n",
"cv2.destroyAllWindows()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0fe6fff",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment