Add LFW format (#3770)

main
Kirill Sizov 4 years ago committed by GitHub
parent 4bdaf3c083
commit cc801b21ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Add a tutorial on attaching cloud storage AWS-S3 (<https://github.com/openvinotoolkit/cvat/pull/3745>) - Add a tutorial on attaching cloud storage AWS-S3 (<https://github.com/openvinotoolkit/cvat/pull/3745>)
and Azure Blob Container (<https://github.com/openvinotoolkit/cvat/pull/3778>) and Azure Blob Container (<https://github.com/openvinotoolkit/cvat/pull/3778>)
- The feature to remove annotations in a specified range of frames (<https://github.com/openvinotoolkit/cvat/pull/3617>) - The feature to remove annotations in a specified range of frames (<https://github.com/openvinotoolkit/cvat/pull/3617>)
- Add LFW format (<https://github.com/openvinotoolkit/cvat/pull/3770>)
- Add Cityscapes format (<https://github.com/openvinotoolkit/cvat/pull/3758>) - Add Cityscapes format (<https://github.com/openvinotoolkit/cvat/pull/3758>)
- Add Open Images V6 format (<https://github.com/openvinotoolkit/cvat/pull/3679>) - Add Open Images V6 format (<https://github.com/openvinotoolkit/cvat/pull/3679>)

@ -0,0 +1,32 @@
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
from .registry import dm_env, exporter, importer
@importer(name='LFW', ext='ZIP', version='1.0')
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'lfw')
import_dm_annotations(dataset, instance_data)
@exporter(name='LFW', ext='ZIP', version='1.0')
def _exporter(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(instance_data,
include_images=save_images), env=dm_env)
with TemporaryDirectory() as tmp_dir:
dataset.export(tmp_dir, format='lfw', save_images=save_images)
make_zip_archive(tmp_dir, dst_file)

@ -121,6 +121,6 @@ import cvat.apps.dataset_manager.formats.market1501
import cvat.apps.dataset_manager.formats.icdar import cvat.apps.dataset_manager.formats.icdar
import cvat.apps.dataset_manager.formats.velodynepoint import cvat.apps.dataset_manager.formats.velodynepoint
import cvat.apps.dataset_manager.formats.pointcloud import cvat.apps.dataset_manager.formats.pointcloud
import cvat.apps.dataset_manager.formats.lfw
import cvat.apps.dataset_manager.formats.cityscapes import cvat.apps.dataset_manager.formats.cityscapes
import cvat.apps.dataset_manager.formats.openimages import cvat.apps.dataset_manager.formats.openimages

@ -1198,6 +1198,32 @@
], ],
"tracks": [] "tracks": []
}, },
"LFW 1.0": {
"version": 0,
"tags": [
{
"frame": 0,
"label_id": null,
"group": 0,
"source": "manual",
"attributes": []
}
],
"shapes": [
{
"type": "points",
"occluded": false,
"z_order": 0,
"points": [18.0, 8.0, 26.5, 17.7, 26.5, 23.7, 16.9, 23.2, 30.3, 28.0],
"frame": 0,
"label_id": null,
"group": 0,
"source": "manual",
"attributes": []
}
],
"tracks": []
},
"Cityscapes 1.0": { "Cityscapes 1.0": {
"version": 0, "version": 0,
"tags": [], "tags": [],

@ -296,6 +296,7 @@ class TaskExportTest(_DbTestBase):
'ICDAR Segmentation 1.0', 'ICDAR Segmentation 1.0',
'Kitti Raw Format 1.0', 'Kitti Raw Format 1.0',
'Sly Point Cloud Format 1.0', 'Sly Point Cloud Format 1.0',
'LFW 1.0',
'Cityscapes 1.0', 'Cityscapes 1.0',
'Open Images V6 1.0' 'Open Images V6 1.0'
}) })
@ -324,10 +325,11 @@ class TaskExportTest(_DbTestBase):
'ICDAR Segmentation 1.0', 'ICDAR Segmentation 1.0',
'Kitti Raw Format 1.0', 'Kitti Raw Format 1.0',
'Sly Point Cloud Format 1.0', 'Sly Point Cloud Format 1.0',
'LFW 1.0',
'Cityscapes 1.0', 'Cityscapes 1.0',
'Open Images V6 1.0', 'Open Images V6 1.0',
'Datumaro 1.0', 'Datumaro 1.0',
'Datumaro 3D 1.0' 'Datumaro 3D 1.0',
}) })
def test_exports(self): def test_exports(self):
@ -374,6 +376,7 @@ class TaskExportTest(_DbTestBase):
('ICDAR Recognition 1.0', 'icdar_word_recognition'), ('ICDAR Recognition 1.0', 'icdar_word_recognition'),
('ICDAR Localization 1.0', 'icdar_text_localization'), ('ICDAR Localization 1.0', 'icdar_text_localization'),
('ICDAR Segmentation 1.0', 'icdar_text_segmentation'), ('ICDAR Segmentation 1.0', 'icdar_text_segmentation'),
('LFW 1.0', 'lfw'),
# ('Cityscapes 1.0', 'cityscapes'), does not support, empty annotations # ('Cityscapes 1.0', 'cityscapes'), does not support, empty annotations
]: ]:
with self.subTest(format=format_name): with self.subTest(format=format_name):

@ -1030,11 +1030,11 @@ class TaskDumpUploadTest(_DbTestBase):
# create annotations # create annotations
if dump_format_name in [ if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \ "MOT 1.1", "MOTS PNG 1.0",
"PASCAL VOC 1.1", "Segmentation mask 1.1", \ "PASCAL VOC 1.1", "Segmentation mask 1.1",
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \ "TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0",
"WiderFace 1.0", "VGGFace2 1.0", "Open Images V6 1.0", \ "WiderFace 1.0", "VGGFace2 1.0", "LFW 1.0",
"Datumaro 1.0", \ "Open Images V6 1.0", "Datumaro 1.0"
]: ]:
self._create_annotations(task, dump_format_name, "default") self._create_annotations(task, dump_format_name, "default")
else: else:

@ -4818,6 +4818,10 @@ class TaskAnnotationAPITestCase(JobAnnotationAPITestCase):
annotations["shapes"] = rectangle_shapes_wo_attrs \ annotations["shapes"] = rectangle_shapes_wo_attrs \
+ polygon_shapes_wo_attrs + polygon_shapes_wo_attrs
elif annotation_format == "LFW 1.0":
annotations["shapes"] = points_wo_attrs \
+ tags_wo_attrs
elif annotation_format == "Market-1501 1.0": elif annotation_format == "Market-1501 1.0":
tags_with_attrs = [{ tags_with_attrs = [{
"frame": 1, "frame": 1,

@ -0,0 +1,77 @@
---
linkTitle: 'LFW'
weight: 17
---
# [LFW](http://vis-www.cs.umass.edu/lfw/)
- Format specification available [here](http://vis-www.cs.umass.edu/lfw/README.txt)
- Supported annotations: tags, points.
- Supported attributes:
- `negative_pairs` (should be defined for labels as `text`):
list of image names with mismatched persons.
- `positive_pairs` (should be defined for labels as `text`):
list of image names with matched persons.
# Import LFW annotation
The uploaded annotations file should be a zip file with the following structure:
```bash
<archive_name>.zip/
└── annotations/
├── landmarks.txt # list with landmark points for each image
├── pairs.txt # list of matched and mismatched pairs of person
└── people.txt # optional file with a list of persons name
```
Full information about the content of annotation files is available
[here](http://vis-www.cs.umass.edu/lfw/README.txt)
# Export LFW annotation
Downloaded file: a zip archive of the following structure:
```bash
<archive_name>.zip/
└── images/ # if the option save images was selected
│ ├── name1/
│ │ ├── name1_0001.jpg
│ │ ├── name1_0002.jpg
│ │ ├── ...
│ ├── name2/
│ │ ├── name2_0001.jpg
│ │ ├── name2_0002.jpg
│ │ ├── ...
│ ├── ...
├── landmarks.txt
├── pairs.txt
└── people.txt
```
# Example: create task with images and upload LFW annotations into it
This is one of the possible ways to create a task and add LFW annotations for it.
- On the task creation page:
- Add labels that correspond to the names of the persons.
- For each label define `text` attributes with names `positive_pairs` and
`negative_pairs`
- Add images using zip archive from local repository:
```bash
images.zip/
├── name1_0001.jpg
├── name1_0002.jpg
├── ...
├── name1_<N>.jpg
├── name2_0001.jpg
├── ...
```
- On the annotation page:
Upload annotation -> LFW 1.0 -> choose archive with structure
that described in the [import section](#import-lfw-annotation).
Loading…
Cancel
Save