Commit 2ec1d0b0 authored by thirani's avatar thirani

initial commit

parent 29551612
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "gEeqwIvzURY6"
},
"outputs": [],
"source": [
"# Clone the tensorflow models repository from GitHub\n",
"!git clone --depth 1 https://github.com/tensorflow/models"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "fYfA5vpSUWJT"
},
"outputs": [],
"source": [
"# Copy setup files into models/research folder\n",
"%%bash\n",
"cd models/research/\n",
"protoc object_detection/protos/*.proto --python_out=.\n",
"#cp object_detection/packages/tf2/setup.py ."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UYIhW1oiUYHK"
},
"outputs": [],
"source": [
"# Modify setup.py file to install the tf-models-official repository targeted at TF v2.8.0\n",
"import re\n",
"with open('/content/models/research/object_detection/packages/tf2/setup.py') as f:\n",
" s = f.read()\n",
"\n",
"with open('/content/models/research/setup.py', 'w') as f:\n",
" # Set fine_tune_checkpoint path\n",
" s = re.sub('tf-models-official>=2.5.1',\n",
" 'tf-models-official==2.8.0', s)\n",
" f.write(s)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MGlN8BqlUZrB"
},
"outputs": [],
"source": [
"# Install the Object Detection API\n",
"!pip install /content/models/research/\n",
"\n",
"# Need to downgrade to TF v2.8.0 due to Colab compatibility bug with TF v2.10 (as of 10/03/22)\n",
"!pip install tensorflow==2.8.0"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ubwR2PtKUbqx"
},
"outputs": [],
"source": [
"# Run Model Bulider Test file, just to verify everything's working properly\n",
"!python /content/models/research/object_detection/builders/model_builder_tf2_test.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "o1_h8yHBUeQp"
},
"outputs": [],
"source": [
"from google.colab import drive\n",
"drive.mount('/content/gdrive')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "fFznYiQbXJhB"
},
"outputs": [],
"source": [
"!cp /content/gdrive/MyDrive/object_detection_imageset/NEW/images.zip /content"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "4nO__NB8XKZg"
},
"outputs": [],
"source": [
"!unzip -q images.zip"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "P_pXWZH9Xwoh"
},
"outputs": [],
"source": [
"### This creates a a \"labelmap.txt\" file with a list of classes the object detection model will detect.\n",
"%%bash\n",
"cat <<EOF >> /content/labelmap.txt\n",
"cat\n",
"chair\n",
"dog\n",
"knife\n",
"person\n",
"table\n",
"EOF"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xPR5edAIX7fA"
},
"outputs": [],
"source": [
"# Download data conversion scripts\n",
"! wget https://raw.githubusercontent.com/th1ran13/object_detection_tensorflow_research/main/utils/create_csv.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "_uc7jMmwYjEA"
},
"outputs": [],
"source": [
"# Create CSV data files and TFRecord files\n",
"!python3 create_csv.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "XMEA00W09qKS"
},
"outputs": [],
"source": [
"! wget https://raw.githubusercontent.com/th1ran13/object_detection_tensorflow_research/main/utils/create_tfrecord.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "MjGecnkW9tS5"
},
"outputs": [],
"source": [
"!python3 create_tfrecord.py --csv_input=images/train_labels.csv --labelmap=labelmap.txt --image_dir=images/train --output_path=train.tfrecord"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "N798NSyc9ulp"
},
"outputs": [],
"source": [
"!python3 create_tfrecord.py --csv_input=images/validation_labels.csv --labelmap=labelmap.txt --image_dir=images/validation --output_path=val.tfrecord"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "YNjCecESupFJ"
},
"source": [
"Add locations for train.tfrecord , val.tfrecord and labelmap"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cuDVU_U8YpF4"
},
"outputs": [],
"source": [
"train_record_fname = '/content/train.tfrecord'\n",
"val_record_fname = '/content/val.tfrecord'\n",
"label_map_pbtxt_fname = '/content/labelmap.pbtxt'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "SsxL_vGYY2Ix"
},
"outputs": [],
"source": [
"# Change the chosen_model variable to deploy different models available in the TF2 object detection zoo\n",
"chosen_model = 'ssd-mobilenet-v2-fpnlite-320'\n",
"\n",
"MODELS_CONFIG = {\n",
" 'ssd-mobilenet-v2': {\n",
" 'model_name': 'ssd_mobilenet_v2_320x320_coco17_tpu-8',\n",
" 'base_pipeline_file': 'ssd_mobilenet_v2_320x320_coco17_tpu-8.config',\n",
" 'pretrained_checkpoint': 'ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz',\n",
" },\n",
" 'efficientdet-d0': {\n",
" 'model_name': 'efficientdet_d0_coco17_tpu-32',\n",
" 'base_pipeline_file': 'ssd_efficientdet_d0_512x512_coco17_tpu-8.config',\n",
" 'pretrained_checkpoint': 'efficientdet_d0_coco17_tpu-32.tar.gz',\n",
" },\n",
" 'ssd-mobilenet-v2-fpnlite-320': {\n",
" 'model_name': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8',\n",
" 'base_pipeline_file': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config',\n",
" 'pretrained_checkpoint': 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz',\n",
" },\n",
"}\n",
"\n",
"model_name = MODELS_CONFIG[chosen_model]['model_name']\n",
"pretrained_checkpoint = MODELS_CONFIG[chosen_model]['pretrained_checkpoint']\n",
"base_pipeline_file = MODELS_CONFIG[chosen_model]['base_pipeline_file']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2UXocSoSZvwX"
},
"outputs": [],
"source": [
"# Create \"mymodel\" folder for holding pre-trained weights and configuration files\n",
"%mkdir /content/models/mymodel/\n",
"%cd /content/models/mymodel/\n",
"\n",
"# Download pre-trained model weights\n",
"import tarfile\n",
"download_tar = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/' + pretrained_checkpoint\n",
"!wget {download_tar}\n",
"tar = tarfile.open(pretrained_checkpoint)\n",
"tar.extractall()\n",
"tar.close()\n",
"\n",
"# Download training configuration file for model\n",
"download_config = 'https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/configs/tf2/' + base_pipeline_file\n",
"!wget {download_config}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "JIXdvq8JZ1d3"
},
"outputs": [],
"source": [
"# Set training parameters for the model\n",
"num_steps = 30000\n",
"\n",
"if chosen_model == 'efficientdet-d0':\n",
" batch_size = 4\n",
"else:\n",
" batch_size = 32"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "H0iYZ_HlaBdQ",
"outputId": "7a1003d3-6c3d-4076-e8bb-a59f88d570d3"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Total classes: 6\n"
]
}
],
"source": [
"# Set file locations and get number of classes for config file\n",
"pipeline_fname = '/content/models/mymodel/' + base_pipeline_file\n",
"fine_tune_checkpoint = '/content/models/mymodel/' + model_name + '/checkpoint/ckpt-0'\n",
"\n",
"def get_num_classes(pbtxt_fname):\n",
" from object_detection.utils import label_map_util\n",
" label_map = label_map_util.load_labelmap(pbtxt_fname)\n",
" categories = label_map_util.convert_label_map_to_categories(\n",
" label_map, max_num_classes=90, use_display_name=True)\n",
" category_index = label_map_util.create_category_index(categories)\n",
" return len(category_index.keys())\n",
"num_classes = get_num_classes(label_map_pbtxt_fname)\n",
"print('Total classes:', num_classes)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "kO0x6R71aCyf",
"outputId": "57681b59-d66b-484e-91c5-4badc37c7abf"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/models/mymodel\n",
"writing custom configuration file\n"
]
}
],
"source": [
"# Create custom configuration file by writing the dataset, model checkpoint, and training parameters into the base pipeline file\n",
"import re\n",
"\n",
"%cd /content/models/mymodel\n",
"print('writing custom configuration file')\n",
"\n",
"with open(pipeline_fname) as f:\n",
" s = f.read()\n",
"with open('pipeline_file.config', 'w') as f:\n",
" \n",
" # Set fine_tune_checkpoint path\n",
" s = re.sub('fine_tune_checkpoint: \".*?\"',\n",
" 'fine_tune_checkpoint: \"{}\"'.format(fine_tune_checkpoint), s)\n",
" \n",
" # Set tfrecord files for train and test datasets\n",
" s = re.sub(\n",
" '(input_path: \".*?)(PATH_TO_BE_CONFIGURED/train)(.*?\")', 'input_path: \"{}\"'.format(train_record_fname), s)\n",
" s = re.sub(\n",
" '(input_path: \".*?)(PATH_TO_BE_CONFIGURED/val)(.*?\")', 'input_path: \"{}\"'.format(val_record_fname), s)\n",
"\n",
" # Set label_map_path\n",
" s = re.sub(\n",
" 'label_map_path: \".*?\"', 'label_map_path: \"{}\"'.format(label_map_pbtxt_fname), s)\n",
"\n",
" # Set batch_size\n",
" s = re.sub('batch_size: [0-9]+',\n",
" 'batch_size: {}'.format(batch_size), s)\n",
"\n",
" # Set training steps, num_steps\n",
" s = re.sub('num_steps: [0-9]+',\n",
" 'num_steps: {}'.format(num_steps), s)\n",
" \n",
" # Set number of classes num_classes\n",
" s = re.sub('num_classes: [0-9]+',\n",
" 'num_classes: {}'.format(num_classes), s)\n",
"\n",
" # Change fine-tune checkpoint type from \"classification\" to \"detection\"\n",
" s = re.sub(\n",
" 'fine_tune_checkpoint_type: \"classification\"', 'fine_tune_checkpoint_type: \"{}\"'.format('detection'), s)\n",
" \n",
" # If using ssd-mobilenet-v2, reduce learning rate (because it's too high in the default config file)\n",
" if chosen_model == 'ssd-mobilenet-v2':\n",
" s = re.sub('learning_rate_base: .8',\n",
" 'learning_rate_base: .08', s)\n",
" \n",
" s = re.sub('warmup_learning_rate: 0.13333',\n",
" 'warmup_learning_rate: .026666', s)\n",
" \n",
" # If using efficientdet-d0, use fixed_shape_resizer instead of keep_aspect_ratio_resizer (because it isn't supported by TFLite)\n",
" if chosen_model == 'efficientdet-d0':\n",
" s = re.sub('keep_aspect_ratio_resizer', 'fixed_shape_resizer', s)\n",
" s = re.sub('pad_to_max_dimension: true', '', s)\n",
" s = re.sub('min_dimension', 'height', s)\n",
" s = re.sub('max_dimension', 'width', s)\n",
"\n",
" f.write(s)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wwLMSUgeaSen"
},
"outputs": [],
"source": [
"# Set the path to the custom config file and the directory to store training checkpoints in\n",
"pipeline_file = '/content/models/mymodel/pipeline_file.config'\n",
"model_dir = '/content/training/'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "md8GnBHeafRx"
},
"outputs": [],
"source": [
"%load_ext tensorboard\n",
"%tensorboard --logdir '/content/training/train'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "nR3rT1Wkaf4A"
},
"outputs": [],
"source": [
"# Run training!\n",
"!python /content/models/research/object_detection/model_main_tf2.py \\\n",
" --pipeline_config_path={pipeline_file} \\\n",
" --model_dir={model_dir} \\\n",
" --alsologtostderr \\\n",
" --num_train_steps={num_steps} \\\n",
" --sample_1_of_n_eval_examples=1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "nseDPHANSSaE"
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "VL6SYpgvhyi2"
},
"outputs": [],
"source": [
"# Make a directory to store the trained TFLite model\n",
"!mkdir /content/custom_model_lite\n",
"output_directory = '/content/custom_model_lite'\n",
"\n",
"# Path to training directory (the conversion script automatically chooses the highest checkpoint file)\n",
"last_model_path = '/content/training'\n",
"\n",
"!python /content/models/research/object_detection/export_tflite_graph_tf2.py \\\n",
" --trained_checkpoint_dir {last_model_path} \\\n",
" --output_directory {output_directory} \\\n",
" --pipeline_config_path {pipeline_file}\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "FyF9giHiBmvb"
},
"outputs": [],
"source": [
"# Convert exported graph file into TFLite model file\n",
"import tensorflow as tf\n",
"\n",
"converter = tf.lite.TFLiteConverter.from_saved_model('/content/custom_model_lite/saved_model')\n",
"tflite_model = converter.convert()\n",
"\n",
"with open('/content/custom_model_lite/detect.tflite', 'wb') as f:\n",
" f.write(tflite_model)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "17n__h07CQrq"
},
"outputs": [],
"source": [
"# Script to run custom TFLite model on test images to detect objects\n",
"# Source: https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/TFLite_detection_image.py\n",
"\n",
"# Import packages\n",
"import os\n",
"import cv2\n",
"import numpy as np\n",
"import sys\n",
"import glob\n",
"import random\n",
"import importlib.util\n",
"from tensorflow.lite.python.interpreter import Interpreter\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"\n",
"%matplotlib inline\n",
"\n",
"### Define function for inferencing with TFLite model and displaying results\n",
"\n",
"def tflite_detect_images(modelpath, imgpath, lblpath, min_conf=0.5, num_test_images=10, savepath='/content/results', txt_only=False):\n",
"\n",
" # Grab filenames of all images in test folder\n",
" images = glob.glob(imgpath + '/*.jpg') + glob.glob(imgpath + '/*.JPG') + glob.glob(imgpath + '/*.png') + glob.glob(imgpath + '/*.bmp')\n",
"\n",
" # Load the label map into memory\n",
" with open(lblpath, 'r') as f:\n",
" labels = [line.strip() for line in f.readlines()]\n",
"\n",
" # Load the Tensorflow Lite model into memory\n",
" interpreter = Interpreter(model_path=modelpath)\n",
" interpreter.allocate_tensors()\n",
"\n",
" # Get model details\n",
" input_details = interpreter.get_input_details()\n",
" output_details = interpreter.get_output_details()\n",
" height = input_details[0]['shape'][1]\n",
" width = input_details[0]['shape'][2]\n",
"\n",
" float_input = (input_details[0]['dtype'] == np.float32)\n",
"\n",
" input_mean = 127.5\n",
" input_std = 127.5\n",
"\n",
" # Randomly select test images\n",
" images_to_test = random.sample(images, num_test_images)\n",
"\n",
" # Loop over every image and perform detection\n",
" for image_path in images_to_test:\n",
"\n",
" # Load image and resize to expected shape [1xHxWx3]\n",
" image = cv2.imread(image_path)\n",
" image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
" imH, imW, _ = image.shape \n",
" image_resized = cv2.resize(image_rgb, (width, height))\n",
" input_data = np.expand_dims(image_resized, axis=0)\n",
"\n",
" # Normalize pixel values if using a floating model (i.e. if model is non-quantized)\n",
" if float_input:\n",
" input_data = (np.float32(input_data) - input_mean) / input_std\n",
"\n",
" # Perform the actual detection by running the model with the image as input\n",
" interpreter.set_tensor(input_details[0]['index'],input_data)\n",
" interpreter.invoke()\n",
"\n",
" # Retrieve detection results\n",
" boxes = interpreter.get_tensor(output_details[1]['index'])[0] # Bounding box coordinates of detected objects\n",
" classes = interpreter.get_tensor(output_details[3]['index'])[0] # Class index of detected objects\n",
" scores = interpreter.get_tensor(output_details[0]['index'])[0] # Confidence of detected objects\n",
"\n",
" detections = []\n",
"\n",
" # Loop over all detections and draw detection box if confidence is above minimum threshold\n",
" for i in range(len(scores)):\n",
" if ((scores[i] > min_conf) and (scores[i] <= 1.0)):\n",
"\n",
" # Get bounding box coordinates and draw box\n",
" # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()\n",
" ymin = int(max(1,(boxes[i][0] * imH)))\n",
" xmin = int(max(1,(boxes[i][1] * imW)))\n",
" ymax = int(min(imH,(boxes[i][2] * imH)))\n",
" xmax = int(min(imW,(boxes[i][3] * imW)))\n",
" \n",
" cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n",
"\n",
" # Draw label\n",
" object_name = labels[int(classes[i])] # Look up object name from \"labels\" array using class index\n",
" label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'\n",
" labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n",
" label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n",
" cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n",
" cv2.putText(image, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n",
"\n",
" detections.append([object_name, scores[i], xmin, ymin, xmax, ymax])\n",
"\n",
" \n",
" # All the results have been drawn on the image, now display the image\n",
" if txt_only == False: # \"text_only\" controls whether we want to display the image results or just save them in .txt files\n",
" image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n",
" plt.figure(figsize=(12,16))\n",
" plt.imshow(image)\n",
" plt.show()\n",
" \n",
" # Save detection results in .txt files (for calculating mAP)\n",
" elif txt_only == True:\n",
"\n",
" # Get filenames and paths\n",
" image_fn = os.path.basename(image_path) \n",
" base_fn, ext = os.path.splitext(image_fn)\n",
" txt_result_fn = base_fn +'.txt'\n",
" txt_savepath = os.path.join(savepath, txt_result_fn)\n",
"\n",
" # Write results to text file\n",
" # (Using format defined by https://github.com/Cartucho/mAP, which will make it easy to calculate mAP)\n",
" with open(txt_savepath,'w') as f:\n",
" for detection in detections:\n",
" f.write('%s %.4f %d %d %d %d\\n' % (detection[0], detection[1], detection[2], detection[3], detection[4], detection[5]))\n",
"\n",
" return"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "TIWjSM5XCk1Z"
},
"outputs": [],
"source": [
"# Set up variables for running user's model\n",
"PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n",
"PATH_TO_MODEL='/content/custom_model_lite/detect.tflite' # Path to .tflite model file\n",
"PATH_TO_LABELS='/content/labelmap.txt' # Path to labelmap.txt file\n",
"min_conf_threshold=0.5 # Confidence threshold (try changing this to 0.01 if you don't see any detection results)\n",
"images_to_test = 10 # Number of images to run detection on"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "a5Ty1dh1CuQS"
},
"outputs": [],
"source": [
"# Run inferencing function!\n",
"tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "qgaXVn6lDTHy"
},
"outputs": [],
"source": [
"%%bash\n",
"git clone https://github.com/Cartucho/mAP /content/mAP\n",
"cd /content/mAP\n",
"rm input/detection-results/* \n",
"rm input/ground-truth/* \n",
"rm input/images-optional/* \n",
"wget https://raw.githubusercontent.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/master/util_scripts/calculate_map_cartucho.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZDuB0Z4eFKBR"
},
"outputs": [],
"source": [
"!cp /content/images/test/* /content/mAP/input/images-optional # Copy images and xml files\n",
"!mv /content/mAP/input/images-optional/*.xml /content/mAP/input/ground-truth/ # Move xml files to the appropriate folder"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wNslDG7bFPZi"
},
"outputs": [],
"source": [
"!python /content/mAP/scripts/extra/convert_gt_xml.py"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "KdMY0bEbFZBB"
},
"outputs": [],
"source": [
"# Set up variables for running inference, this time to get detection results saved as .txt files\n",
"PATH_TO_IMAGES='/content/images/test' # Path to test images folder\n",
"PATH_TO_MODEL='/content/custom_model_lite/detect.tflite' # Path to .tflite model file\n",
"PATH_TO_LABELS='/content/labelmap.txt' # Path to labelmap.txt file\n",
"PATH_TO_RESULTS='/content/mAP/input/detection-results' # Folder to save detection results in\n",
"min_conf_threshold=0.1 # Confidence threshold\n",
"\n",
"# Use all the images in the test folder\n",
"image_list = glob.glob(PATH_TO_IMAGES + '/*.jpg') + glob.glob(PATH_TO_IMAGES + '/*.JPG') + glob.glob(PATH_TO_IMAGES + '/*.png') + glob.glob(PATH_TO_IMAGES + '/*.bmp')\n",
"images_to_test = min(7, len(image_list)) # If there are more than 500 images in the folder, just use 500\n",
"\n",
"# Tell function to just save results and not display images\n",
"txt_only = True\n",
"\n",
"# Run inferencing function!\n",
"print('Starting inference on %d images...' % images_to_test)\n",
"tflite_detect_images(PATH_TO_MODEL, PATH_TO_IMAGES, PATH_TO_LABELS, min_conf_threshold, images_to_test, PATH_TO_RESULTS, txt_only)\n",
"print('Finished inferencing!')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Ep3oC1OLF3Dp"
},
"outputs": [],
"source": [
"%cd /content/mAP\n",
"!python calculate_map_cartucho.py --labels=/content/labelmap.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZtRdnccwF3oa"
},
"outputs": [],
"source": [
"# Move labelmap and pipeline config files into TFLite model folder and zip it up\n",
"!cp /content/labelmap.txt /content/custom_model_lite\n",
"!cp /content/labelmap.pbtxt /content/custom_model_lite\n",
"!cp /content/models/mymodel/pipeline_file.config /content/custom_model_lite\n",
"\n",
"%cd /content\n",
"!zip -r custom_model_lite.zip custom_model_lite"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "DF5DMd1PGGtC"
},
"outputs": [],
"source": [
"from google.colab import files\n",
"\n",
"files.download('/content/custom_model_lite.zip')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UJ2f-ehdGL2R"
},
"outputs": [],
"source": [
"!zip -r training.zip training"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "kP18fEJYmQDo"
},
"outputs": [],
"source": []
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"provenance": []
},
"gpuClass": "standard",
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment