fix auto annotation to not eat all RAM (#1328)

Co-authored-by: Nikita Manovich <40690625+nmanovic@users.noreply.github.com>
main
Ben Hoff 6 years ago committed by GitHub
parent 176dc718a2
commit 8a2efa4da3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -55,6 +55,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- React UI is the primary UI - React UI is the primary UI
### Fixed ### Fixed
- Cleaned up memory in Auto Annotation to enable long running tasks on videos
- New shape is added when press ``esc`` when drawing instead of cancellation - New shape is added when press ``esc`` when drawing instead of cancellation
- Dextr segmentation doesn't work. - Dextr segmentation doesn't work.
- `FileNotFoundError` during dump after moving format files - `FileNotFoundError` during dump after moving format files

@ -1,6 +1,6 @@
import itertools
from .model_loader import ModelLoader from .model_loader import ModelLoader
from cvat.apps.engine.utils import import_modules, execute_python_code from cvat.apps.engine.utils import import_modules, execute_python_code
import itertools
def _process_detections(detections, path_to_conv_script, restricted=True): def _process_detections(detections, path_to_conv_script, restricted=True):
results = Results() results = Results()
@ -31,6 +31,17 @@ def _process_detections(detections, path_to_conv_script, restricted=True):
return results return results
def _process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
if attr_text in label_attr_spec:
attributes.append({
"spec_id": label_attr_spec[attr_text],
"value": attr_value,
})
return attributes
class Results(): class Results():
def __init__(self): def __init__(self):
self._results = { self._results = {
@ -84,25 +95,62 @@ class Results():
"attributes": attributes or {}, "attributes": attributes or {},
} }
def run_inference_engine_annotation(data, model_file, weights_file, class InferenceAnnotationRunner:
labels_mapping, attribute_spec, convertation_file, job=None, update_progress=None, restricted=True): def __init__(self, data, model_file, weights_file, labels_mapping,
def process_attributes(shape_attributes, label_attr_spec): attribute_spec, convertation_file):
attributes = [] self.data = iter(data)
for attr_text, attr_value in shape_attributes.items(): self.data_len = len(data)
if attr_text in label_attr_spec: self.model = ModelLoader(model=model_file, weights=weights_file)
attributes.append({ self.frame_counter = 0
"spec_id": label_attr_spec[attr_text], self.attribute_spec = attribute_spec
"value": attr_value, self.convertation_file = convertation_file
}) self.iteration_size = 128
self.labels_mapping = labels_mapping
def run(self, job=None, update_progress=None, restricted=True):
result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}
detections = []
for _ in range(self.iteration_size):
try:
frame = next(self.data)
except StopIteration:
break
orig_rows, orig_cols = frame.shape[:2]
detections.append({
"frame_id": self.frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": self.model.infer(frame),
})
self.frame_counter += 1
if job and update_progress and not update_progress(job, self.frame_counter * 100 / self.data_len):
return None, False
processed_detections = _process_detections(detections, self.convertation_file, restricted=restricted)
return attributes self._add_shapes(processed_detections.get_shapes(), result["shapes"])
def add_shapes(shapes, target_container): more_items = self.frame_counter != self.data_len
return result, more_items
def _add_shapes(self, shapes, target_container):
for shape in shapes: for shape in shapes:
if shape["label"] not in labels_mapping: if shape["label"] not in self.labels_mapping:
continue continue
db_label = labels_mapping[shape["label"]]
label_attr_spec = attribute_spec.get(db_label) db_label = self.labels_mapping[shape["label"]]
label_attr_spec = self.attribute_spec.get(db_label)
target_container.append({ target_container.append({
"label_id": db_label, "label_id": db_label,
"frame": shape["frame"], "frame": shape["frame"],
@ -111,38 +159,5 @@ def run_inference_engine_annotation(data, model_file, weights_file,
"z_order": 0, "z_order": 0,
"group": None, "group": None,
"occluded": False, "occluded": False,
"attributes": process_attributes(shape["attributes"], label_attr_spec), "attributes": _process_attributes(shape["attributes"], label_attr_spec),
}) })
result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}
data_len = len(data)
model = ModelLoader(model=model_file, weights=weights_file)
frame_counter = 0
detections = []
for frame in data:
orig_rows, orig_cols = frame.shape[:2]
detections.append({
"frame_id": frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": model.infer(frame),
})
frame_counter += 1
if job and update_progress and not update_progress(job, frame_counter * 100 / data_len):
return None
processed_detections = _process_detections(detections, convertation_file, restricted=restricted)
add_shapes(processed_detections.get_shapes(), result["shapes"])
return result

@ -10,7 +10,6 @@ import platform
_IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH", None) _IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH", None)
def _check_instruction(instruction): def _check_instruction(instruction):
return instruction == str.strip( return instruction == str.strip(
subprocess.check_output( subprocess.check_output(
@ -24,7 +23,7 @@ def make_plugin_or_core():
use_core_openvino = False use_core_openvino = False
try: try:
major, minor, reference = [int(x) for x in version.split('.')] major, minor, reference = [int(x) for x in version.split('.')]
if major >= 2 and minor >= 1 and reference >= 37988: if major >= 2 and minor >= 1:
use_core_openvino = True use_core_openvino = True
except Exception: except Exception:
pass pass

@ -23,7 +23,7 @@ from cvat.apps.engine.frame_provider import FrameProvider
from .models import AnnotationModel, FrameworkChoice from .models import AnnotationModel, FrameworkChoice
from .model_loader import load_labelmap from .model_loader import load_labelmap
from .image_loader import ImageLoader from .image_loader import ImageLoader
from .inference import run_inference_engine_annotation from .inference import InferenceAnnotationRunner
def _remove_old_file(model_file_field): def _remove_old_file(model_file_field):
@ -44,15 +44,15 @@ def _update_dl_model_thread(dl_model_id, name, is_shared, model_file, weights_fi
test_image = np.ones((1024, 1980, 3), np.uint8) * 255 test_image = np.ones((1024, 1980, 3), np.uint8) * 255
try: try:
dummy_labelmap = {key: key for key in load_labelmap(labelmap_file).keys()} dummy_labelmap = {key: key for key in load_labelmap(labelmap_file).keys()}
run_inference_engine_annotation( runner = InferenceAnnotationRunner(
data=[test_image,], data=[test_image,],
model_file=model_file, model_file=model_file,
weights_file=weights_file, weights_file=weights_file,
labels_mapping=dummy_labelmap, labels_mapping=dummy_labelmap,
attribute_spec={}, attribute_spec={},
convertation_file=interpretation_file, convertation_file=interpretation_file)
restricted=restricted
) runner.run(restricted=restricted)
except Exception as e: except Exception as e:
return False, str(e) return False, str(e)
@ -227,30 +227,32 @@ def run_inference_thread(tid, model_file, weights_file, labels_mapping, attribut
result = None result = None
slogger.glob.info("auto annotation with openvino toolkit for task {}".format(tid)) slogger.glob.info("auto annotation with openvino toolkit for task {}".format(tid))
result = run_inference_engine_annotation( more_data = True
runner = InferenceAnnotationRunner(
data=ImageLoader(FrameProvider(db_task.data)), data=ImageLoader(FrameProvider(db_task.data)),
model_file=model_file, model_file=model_file,
weights_file=weights_file, weights_file=weights_file,
labels_mapping=labels_mapping, labels_mapping=labels_mapping,
attribute_spec=attributes, attribute_spec=attributes,
convertation_file= convertation_file, convertation_file= convertation_file)
job=job, while more_data:
update_progress=update_progress, result, more_data = runner.run(
restricted=restricted job=job,
) update_progress=update_progress,
restricted=restricted)
if result is None:
slogger.glob.info("auto annotation for task {} canceled by user".format(tid)) if result is None:
return slogger.glob.info("auto annotation for task {} canceled by user".format(tid))
return
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True): serializer = LabeledDataSerializer(data = result)
if reset: if serializer.is_valid(raise_exception=True):
put_task_data(tid, user, result) if reset:
else: put_task_data(tid, user, result)
patch_task_data(tid, user, result, "create") else:
patch_task_data(tid, user, result, "create")
slogger.glob.info("auto annotation for task {} done".format(tid))
slogger.glob.info("auto annotation for task {} done".format(tid))
except Exception as e: except Exception as e:
try: try:
slogger.task[tid].exception("exception was occurred during auto annotation of the task", exc_info=True) slogger.task[tid].exception("exception was occurred during auto annotation of the task", exc_info=True)

Loading…
Cancel
Save