From 1334efaaf6ebeb3b2309a86afeaeb3d3877347c2 Mon Sep 17 00:00:00 2001 From: Ben Hoff Date: Mon, 19 Aug 2019 08:09:47 -0400 Subject: [PATCH] added in ability to visually see auto_annotation command line runner (#647) --- cvat/apps/auto_annotation/model_manager.py | 4 +- utils/auto_annotation/README.md | 52 ++++++++++++++ utils/auto_annotation/run_model.py | 81 +++++++++++++++++++--- 3 files changed, 124 insertions(+), 13 deletions(-) create mode 100644 utils/auto_annotation/README.md diff --git a/cvat/apps/auto_annotation/model_manager.py b/cvat/apps/auto_annotation/model_manager.py index b15e1fe0..e88689a5 100644 --- a/cvat/apps/auto_annotation/model_manager.py +++ b/cvat/apps/auto_annotation/model_manager.py @@ -315,7 +315,7 @@ def run_inference_engine_annotation(data, model_file, weights_file, if shape["label"] not in labels_mapping: continue db_label = labels_mapping[shape["label"]] - + label_attr_spec = attribute_spec.get(db_label) target_container.append({ "label_id": db_label, "frame": shape["frame"], @@ -324,7 +324,7 @@ def run_inference_engine_annotation(data, model_file, weights_file, "z_order": 0, "group": None, "occluded": False, - "attributes": process_attributes(shape["attributes"], attribute_spec[db_label]), + "attributes": process_attributes(shape["attributes"], label_attr_spec), }) result = { diff --git a/utils/auto_annotation/README.md b/utils/auto_annotation/README.md new file mode 100644 index 00000000..5dd3ea31 --- /dev/null +++ b/utils/auto_annotation/README.md @@ -0,0 +1,52 @@ +# Auto Annotation Runner + +A small command line program to test and run AutoAnnotation Scripts. + +## Instructions + +Change in to the root of the project directory and run + +```shell +$ python cvat/utils/auto_annotation/run_modely.py --py /path/to/python/interp.py \ + --xml /path/to/xml/file.xml \ + --bin /path/to/bin/file.bin \ + --json /path/to/json/mapping/mapping.json +``` + +Some programs need to run unrestricted or as an administer. Use the `--unrestriced` flag to simulate. + +You can pass image files in to fully simulate your findings. Images are passed in as a list + +```shell +$ python cvat/utils/auto_annotation/run_modely.py --py /path/to/python/interp.py \ + --xml /path/to/xml/file.xml \ + --bin /path/to/bin/file.bin \ + --json /path/to/json/mapping/mapping.json \ + --image-files /path/to/img.jpg /path2/to/img2.png /path/to/img3.jpg +``` + +Additionally, it's sometimes useful to visualize your images. +Use the `--show-images` flag to have each image with the annotations pop up. + +```shell +$ python cvat/utils/auto_annotation/run_modely.py --py /path/to/python/interp.py \ + --xml /path/to/xml/file.xml \ + --bin /path/to/bin/file.bin \ + --json /path/to/json/mapping/mapping.json \ + --image-files /path/to/img.jpg /path2/to/img2.png /path/to/img3.jpg \ + --show-images +``` + +There's a command that let's you scan quickly by setting the length of time (in milliseconds) to display each image. +Use the `--show-image-delay` flag and set the appropriate time. + +```shell +# Display each image in a window for 2 seconds +$ python cvat/utils/auto_annotation/run_modely.py --py /path/to/python/interp.py \ + --xml /path/to/xml/file.xml \ + --bin /path/to/bin/file.bin \ + --json /path/to/json/mapping/mapping.json \ + --image-files /path/to/img.jpg /path2/to/img2.png /path/to/img3.jpg \ + --show-images + --show-image-delay 2000 +``` diff --git a/utils/auto_annotation/run_model.py b/utils/auto_annotation/run_model.py index 60499c71..9543a2e5 100644 --- a/utils/auto_annotation/run_model.py +++ b/utils/auto_annotation/run_model.py @@ -1,8 +1,8 @@ import os -import sys import json import argparse -import traceback +import random +import logging os.environ['DJANGO_SETTINGS_MODULE'] = 'cvat.settings.production' @@ -24,12 +24,24 @@ def _get_kwargs(): parser.add_argument('--restricted', dest='restricted', action='store_true') parser.add_argument('--unrestricted', dest='restricted', action='store_false') parser.add_argument('--image-files', nargs='*', help='Paths to image files you want to test') + + parser.add_argument('--show-images', action='store_true', help='Show the results of the annotation in a window') + parser.add_argument('--show-image-delay', default=0, type=int, help='Displays the images for a set duration in milliseconds, default is until a key is pressed') return vars(parser.parse_args()) -class InterpreterError(Exception): - pass +def random_color(): + rgbl=[255,0,0] + random.shuffle(rgbl) + return tuple(rgbl) + + +def pairwise(iterable): + result = [] + for i in range(0, len(iterable) - 1, 2): + result.append((iterable[i], iterable[i+1])) + return np.array(result, dtype=np.int32) def main(): @@ -41,27 +53,35 @@ def main(): xml_file = kwargs['xml'] if not os.path.isfile(py_file): - print('Py file not found! Check the path') + logging.critical('Py file not found! Check the path') return if not os.path.isfile(bin_file): - print('Bin file is not found! Check path!') + logging.critical('Bin file is not found! Check path!') return if not os.path.isfile(xml_file): - print('XML File not found! Check path!') + logging.critical('XML File not found! Check path!') return if not os.path.isfile(mapping_file): - print('JSON file is not found! Check path!') + logging.critical('JSON file is not found! Check path!') return with open(mapping_file) as json_file: mapping = json.load(json_file) + try: + mapping = mapping['label_map'] + except KeyError: + logging.critical("JSON Mapping file must contain key `label_map`!") + logging.critical("Exiting") + return + + mapping = {int(k): v for k, v in mapping.items()} + restricted = kwargs['restricted'] image_files = kwargs.get('image_files') - print(image_files, kwargs.keys()) if image_files: image_data = [cv2.imread(f) for f in image_files] @@ -69,8 +89,47 @@ def main(): test_image = np.ones((1024, 1980, 3), np.uint8) * 255 image_data = [test_image,] attribute_spec = {} - results = run_inference_engine_annotation(image_data, xml_file, bin_file,mapping, attribute_spec, py_file, restricted=restricted) - print('Program Worked!') + + results = run_inference_engine_annotation(image_data, + xml_file, + bin_file, + mapping, + attribute_spec, + py_file, + restricted=restricted) + + logging.warning('Program didn\'t have any errors.') + show_images = kwargs.get('show_images', False) + + if show_images: + if image_files is None: + logging.critical("Warning, no images provided!") + logging.critical('Exiting without presenting results') + return + + if not results['shapes']: + logging.warning(str(results)) + logging.critical("No objects detected!") + return + + show_image_delay = kwargs['show_image_delay'] + for index, data in enumerate(image_data): + for detection in results['shapes']: + if not detection['frame'] == index: + continue + points = detection['points'] + # Cv2 doesn't like floats for drawing + points = [int(p) for p in points] + color = random_color() + if detection['type'] == 'rectangle': + cv2.rectangle(data, (points[0], points[1]), (points[2], points[3]), color, 3) + elif detection['type'] in ('polygon', 'polyline'): + # polylines is picky about datatypes + points = pairwise(points) + cv2.polylines(data, [points], 1, color) + cv2.imshow(str(index), data) + cv2.waitKey(show_image_delay) + cv2.destroyWindow(str(index)) if __name__ == '__main__': main()