diff --git a/CHANGELOG.md b/CHANGELOG.md index cabd689f..f3bac4bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,10 +14,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Shortcuts for outside/keyframe properties - OpenVINO for accelerated model inference - Tensorflow annotation now works without CUDA. It can use CPU only. OpenVINO and CUDA are supported optionally. +- Incremental saving, client ID field for all annotated objects. ### Changed - Polyshape editing method has been improved. You can redraw part of shape instead of points cloning. - Unified shortcut (Esc) for close any mode instead of different shortcuts (Alt+N, Alt+G, Alt+M etc.). +- Dump file contains information about data source (e.g. video name, archive name, ...) ### Fixed - Performance bottleneck has been fixed during you create new objects (draw, copy, merge etc). diff --git a/cvat/apps/engine/annotation.py b/cvat/apps/engine/annotation.py index 4488e245..1e51a9c0 100644 --- a/cvat/apps/engine/annotation.py +++ b/cvat/apps/engine/annotation.py @@ -1437,7 +1437,7 @@ def _dump(tid, data_format, scheme, host): db_task = models.Task.objects.select_for_update().get(id=tid) annotation = _AnnotationForTask(db_task) annotation.init_from_db() - annotation.dump(data_format, db_task, scheme, host) + annotation.dump(data_format, scheme, host) def _calc_box_area(box): return (box.xbr - box.xtl) * (box.ybr - box.ytl) @@ -1816,7 +1816,7 @@ class _AnnotationForTask(_Annotation): # We don't have old boxes on the frame. Let's add all new ones. self.boxes.extend(int_boxes_by_frame[frame]) - def dump(self, data_format, db_task, scheme, host): + def dump(self, data_format, scheme, host): def _flip_box(box, im_w, im_h): box.xbr, box.xtl = im_w - box.xtl, im_w - box.xbr box.ybr, box.ytl = im_h - box.ytl, im_h - box.ybr @@ -1836,6 +1836,7 @@ class _AnnotationForTask(_Annotation): shape.points = ' '.join(['{},{}'.format(point['x'], point['y']) for point in points]) + db_task = self.db_task db_segments = db_task.segment_set.all().prefetch_related('job_set') db_labels = db_task.label_set.all().prefetch_related('attributespec_set') im_meta_data = get_image_meta_cache(db_task) @@ -1851,6 +1852,7 @@ class _AnnotationForTask(_Annotation): ("flipped", str(db_task.flipped)), ("created", str(timezone.localtime(db_task.created_date))), ("updated", str(timezone.localtime(db_task.updated_date))), + ("source", db_task.source), ("labels", [ ("label", OrderedDict([ @@ -1878,19 +1880,19 @@ class _AnnotationForTask(_Annotation): ("dumped", str(timezone.localtime(timezone.now()))) ]) - if self.db_task.mode == "interpolation": + if db_task.mode == "interpolation": meta["task"]["original_size"] = OrderedDict([ ("width", str(im_meta_data["original_size"][0]["width"])), ("height", str(im_meta_data["original_size"][0]["height"])) ]) - dump_path = self.db_task.get_dump_path() + dump_path = db_task.get_dump_path() with open(dump_path, "w") as dump_file: dumper = _XmlAnnotationWriter(dump_file) dumper.open_root() dumper.add_meta(meta) - if self.db_task.mode == "annotation": + if db_task.mode == "annotation": shapes = {} shapes["boxes"] = {} shapes["polygons"] = {} @@ -1925,7 +1927,7 @@ class _AnnotationForTask(_Annotation): list(shapes["polylines"].keys()) + list(shapes["points"].keys()))): - link = get_frame_path(self.db_task.id, frame) + link = get_frame_path(db_task.id, frame) path = os.readlink(link) rpath = path.split(os.path.sep) diff --git a/cvat/apps/engine/migrations/0011_add_task_source_and_safecharfield.py b/cvat/apps/engine/migrations/0011_add_task_source_and_safecharfield.py new file mode 100644 index 00000000..bb96c1b5 --- /dev/null +++ b/cvat/apps/engine/migrations/0011_add_task_source_and_safecharfield.py @@ -0,0 +1,74 @@ +# Generated by Django 2.0.9 on 2018-10-24 10:50 + +import cvat.apps.engine.models +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('engine', '0010_auto_20181011_1517'), + ] + + operations = [ + migrations.AddField( + model_name='task', + name='source', + field=cvat.apps.engine.models.SafeCharField(default='unknown', max_length=256), + ), + migrations.AlterField( + model_name='label', + name='name', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='labeledboxattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='labeledpointsattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='labeledpolygonattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='labeledpolylineattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='objectpathattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='task', + name='name', + field=cvat.apps.engine.models.SafeCharField(max_length=256), + ), + migrations.AlterField( + model_name='trackedboxattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='trackedpointsattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='trackedpolygonattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + migrations.AlterField( + model_name='trackedpolylineattributeval', + name='value', + field=cvat.apps.engine.models.SafeCharField(max_length=64), + ), + ] diff --git a/cvat/apps/engine/models.py b/cvat/apps/engine/models.py index 608cdafb..3e16f8fa 100644 --- a/cvat/apps/engine/models.py +++ b/cvat/apps/engine/models.py @@ -14,9 +14,15 @@ from io import StringIO import re import os +class SafeCharField(models.CharField): + def get_prep_value(self, value): + value = super().get_prep_value(value) + if value: + return value[:self.max_length] + return value class Task(models.Model): - name = models.CharField(max_length=256) + name = SafeCharField(max_length=256) size = models.PositiveIntegerField() path = models.CharField(max_length=256) mode = models.CharField(max_length=32) @@ -28,6 +34,7 @@ class Task(models.Model): overlap = models.PositiveIntegerField(default=0) z_order = models.BooleanField(default=False) flipped = models.BooleanField(default=False) + source = SafeCharField(max_length=256, default="unknown") # Extend default permission model class Meta: @@ -78,7 +85,7 @@ class Job(models.Model): class Label(models.Model): task = models.ForeignKey(Task, on_delete=models.CASCADE) - name = models.CharField(max_length=64) + name = SafeCharField(max_length=64) def __str__(self): return self.name @@ -130,7 +137,7 @@ class AttributeVal(models.Model): # TODO: add a validator here to be sure that it corresponds to self.label id = models.BigAutoField(primary_key=True) spec = models.ForeignKey(AttributeSpec, on_delete=models.CASCADE) - value = models.CharField(max_length=64) + value = SafeCharField(max_length=64) class Meta: abstract = True diff --git a/cvat/apps/engine/task.py b/cvat/apps/engine/task.py index a242cf76..c8e165b9 100644 --- a/cvat/apps/engine/task.py +++ b/cvat/apps/engine/task.py @@ -498,6 +498,8 @@ def _find_and_unpack_archive(upload_dir): else: raise Exception('Type defined as archive, but archives were not found.') + return archive + ''' Search a video in upload dir and split it by frames. Copy frames to target dirs @@ -525,6 +527,8 @@ def _find_and_extract_video(upload_dir, output_dir, db_task, compress_quality, f else: raise Exception("Video files were not found") + return video + ''' Recursive search for all images in upload dir and compress it to RGB jpg with specified quality. Create symlinks for them. @@ -565,11 +569,14 @@ def _find_and_compress_images(upload_dir, output_dir, db_task, compress_quality, else: raise Exception("Image files were not found") + return filenames + def _save_task_to_db(db_task, task_params): db_task.overlap = min(db_task.size, task_params['overlap']) db_task.mode = task_params['mode'] db_task.z_order = task_params['z_order'] db_task.flipped = task_params['flip'] + db_task.source = task_params['data'] segment_step = task_params['segment'] - db_task.overlap for x in range(0, db_task.size, segment_step): @@ -638,10 +645,11 @@ def _create_thread(tid, params): job.save_meta() _copy_data_from_share(share_files_mapping, share_dirs_mapping) + archive = None if counters['archive']: job.meta['status'] = 'Archive is being unpacked..' job.save_meta() - _find_and_unpack_archive(upload_dir) + archive = _find_and_unpack_archive(upload_dir) # Define task mode and other parameters task_params = { @@ -657,9 +665,18 @@ def _create_thread(tid, params): slogger.glob.info("Task #{} parameters: {}".format(tid, task_params)) if task_params['mode'] == 'interpolation': - _find_and_extract_video(upload_dir, output_dir, db_task, task_params['compress'], task_params['flip'], job) + video = _find_and_extract_video(upload_dir, output_dir, db_task, + task_params['compress'], task_params['flip'], job) + task_params['data'] = os.path.relpath(video, upload_dir) else: - _find_and_compress_images(upload_dir, output_dir, db_task, task_params['compress'], task_params['flip'], job) + files =_find_and_compress_images(upload_dir, output_dir, db_task, + task_params['compress'], task_params['flip'], job) + if archive: + task_params['data'] = os.path.relpath(archive, upload_dir) + else: + task_params['data'] = '{} images: {}, ...'.format(len(files), + ", ".join([os.path.relpath(x, upload_dir) for x in files[0:2]])) + slogger.glob.info("Founded frames {} for task #{}".format(db_task.size, tid)) job.meta['status'] = 'Task is being saved in database'