Merge remote-tracking branch 'origin/develop' into dk/user-search

main
Dmitry Kalinin 5 years ago
commit 8c2200356d

@ -5,11 +5,36 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.2.0] - Unreleased ## [1.2.0-beta] - Unreleased
### Added
-
### Changed
-
### Deprecated
-
### Removed
-
### Fixed
-
### Security
-
## [1.2.0-alpha] - 2020-11-09
### Added ### Added
- Removed Z-Order flag from task creation process
- Ability to login into CVAT-UI with token from api/v1/auth/login (<https://github.com/openvinotoolkit/cvat/pull/2234>) - Ability to login into CVAT-UI with token from api/v1/auth/login (<https://github.com/openvinotoolkit/cvat/pull/2234>)
- Added layout grids toggling ('ctrl + alt + Enter') - Added layout grids toggling ('ctrl + alt + Enter')
- Added password reset functionality (<https://github.com/opencv/cvat/pull/2058>) - Added password reset functionality (<https://github.com/opencv/cvat/pull/2058>)
@ -29,6 +54,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Ability to upload prepared meta information along with a video when creating a task (<https://github.com/openvinotoolkit/cvat/pull/2217>) - Ability to upload prepared meta information along with a video when creating a task (<https://github.com/openvinotoolkit/cvat/pull/2217>)
- Optional chaining plugin for cvat-canvas and cvat-ui (<https://github.com/openvinotoolkit/cvat/pull/2249>) - Optional chaining plugin for cvat-canvas and cvat-ui (<https://github.com/openvinotoolkit/cvat/pull/2249>)
- MOTS png mask format support (<https://github.com/openvinotoolkit/cvat/pull/2198>) - MOTS png mask format support (<https://github.com/openvinotoolkit/cvat/pull/2198>)
- Ability to correct upload video with a rotation record in the metadata (<https://github.com/openvinotoolkit/cvat/pull/2218>)
- User search field for assignee fields (<https://github.com/openvinotoolkit/cvat/pull/2370>) - User search field for assignee fields (<https://github.com/openvinotoolkit/cvat/pull/2370>)
### Changed ### Changed
@ -41,14 +67,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Updated `docker-compose` file version from `2.3` to `3.3`(<https://github.com/openvinotoolkit/cvat/pull/2235>) - Updated `docker-compose` file version from `2.3` to `3.3`(<https://github.com/openvinotoolkit/cvat/pull/2235>)
- Added auto inference of url schema from host in CLI, if provided (<https://github.com/openvinotoolkit/cvat/pull/2240>) - Added auto inference of url schema from host in CLI, if provided (<https://github.com/openvinotoolkit/cvat/pull/2240>)
- Track frames in skips between annotation is presented in MOT and MOTS formats are marked `outside` (<https://github.com/openvinotoolkit/cvat/pull/2198>) - Track frames in skips between annotation is presented in MOT and MOTS formats are marked `outside` (<https://github.com/openvinotoolkit/cvat/pull/2198>)
- UI packages installation with `npm ci` instead of `npm install` (<https://github.com/openvinotoolkit/cvat/pull/2350>)
### Deprecated
-
### Removed ### Removed
- - Removed Z-Order flag from task creation process
### Fixed ### Fixed
@ -70,10 +93,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Cannot read property 'label' of undefined (Fixed?) (<https://github.com/openvinotoolkit/cvat/pull/2311>) - Cannot read property 'label' of undefined (Fixed?) (<https://github.com/openvinotoolkit/cvat/pull/2311>)
- Excluded track frames marked `outside` in `CVAT for Images` export (<https://github.com/openvinotoolkit/cvat/pull/2345>) - Excluded track frames marked `outside` in `CVAT for Images` export (<https://github.com/openvinotoolkit/cvat/pull/2345>)
- 'List of tasks' Kibana visualization (<https://github.com/openvinotoolkit/cvat/pull/2361>) - 'List of tasks' Kibana visualization (<https://github.com/openvinotoolkit/cvat/pull/2361>)
- An error on exporting not `jpg` or `png` images in TF Detection API format (<https://github.com/openvinotoolkit/datumaro/issues/35>)
### Security
-
## [1.1.0] - 2020-08-31 ## [1.1.0] - 2020-08-31

@ -67,9 +67,9 @@ patches and features.
- Install npm packages for UI and start UI debug server (run the following command from CVAT root directory): - Install npm packages for UI and start UI debug server (run the following command from CVAT root directory):
```sh ```sh
npm install && \ npm ci && \
cd cvat-core && npm install && \ cd cvat-core && npm ci && \
cd ../cvat-ui && npm install && npm start cd ../cvat-ui && npm ci && npm start
``` ```
> Note for Mac users > Note for Mac users

@ -25,19 +25,19 @@ RUN npm config set loglevel info
# Install cvat-data dependencies # Install cvat-data dependencies
WORKDIR /tmp/cvat-data/ WORKDIR /tmp/cvat-data/
RUN npm install RUN npm ci
# Install cvat-core dependencies # Install cvat-core dependencies
WORKDIR /tmp/cvat-core/ WORKDIR /tmp/cvat-core/
RUN npm install RUN npm ci
# Install cvat-canvas dependencies # Install cvat-canvas dependencies
WORKDIR /tmp/cvat-canvas/ WORKDIR /tmp/cvat-canvas/
RUN npm install RUN npm ci
# Install cvat-ui dependencies # Install cvat-ui dependencies
WORKDIR /tmp/cvat-ui/ WORKDIR /tmp/cvat-ui/
RUN npm install RUN npm ci
# Build source code # Build source code
COPY cvat-data/ /tmp/cvat-data/ COPY cvat-data/ /tmp/cvat-data/

@ -122,3 +122,7 @@ Other ways to ask questions and get our support:
- [Intel AI blog: New Computer Vision Tool Accelerates Annotation of Digital Images and Video](https://www.intel.ai/introducing-cvat) - [Intel AI blog: New Computer Vision Tool Accelerates Annotation of Digital Images and Video](https://www.intel.ai/introducing-cvat)
- [Intel Software: Computer Vision Annotation Tool: A Universal Approach to Data Annotation](https://software.intel.com/en-us/articles/computer-vision-annotation-tool-a-universal-approach-to-data-annotation) - [Intel Software: Computer Vision Annotation Tool: A Universal Approach to Data Annotation](https://software.intel.com/en-us/articles/computer-vision-annotation-tool-a-universal-approach-to-data-annotation)
- [VentureBeat: Intel open-sources CVAT, a toolkit for data labeling](https://venturebeat.com/2019/03/05/intel-open-sources-cvat-a-toolkit-for-data-labeling/) - [VentureBeat: Intel open-sources CVAT, a toolkit for data labeling](https://venturebeat.com/2019/03/05/intel-open-sources-cvat-a-toolkit-for-data-labeling/)
## Projects using CVAT
- [Onepanel](https://github.com/onepanelio/core) - Onepanel is an open source vision AI platform that fully integrates CVAT with scalable data processing and parallelized training pipelines.

@ -18,7 +18,7 @@ If you make changes in this package, please do following:
- Dependencies installation - Dependencies installation
```bash ```bash
npm install npm ci
``` ```
- Building the module from sources in the `dist` directory: - Building the module from sources in the `dist` directory:

@ -2822,10 +2822,6 @@
}, },
"cvat-data": { "cvat-data": {
"version": "file:../cvat-data", "version": "file:../cvat-data",
"requires": {
"async-mutex": "^0.2.4",
"jszip": "3.5.0"
},
"dependencies": { "dependencies": {
"@babel/cli": { "@babel/cli": {
"version": "7.6.4", "version": "7.6.4",
@ -9637,9 +9633,9 @@
} }
}, },
"detect-browser": { "detect-browser": {
"version": "5.1.1", "version": "5.2.0",
"resolved": "https://registry.npmjs.org/detect-browser/-/detect-browser-5.1.1.tgz", "resolved": "https://registry.npmjs.org/detect-browser/-/detect-browser-5.2.0.tgz",
"integrity": "sha512-5n2aWI57qC3kZaK4j2zYsG6L1LrxgLptGCNhMQgdKhVn6cSdcq43pp6xHPfTHG3TYM6myF4tIPWiZtfdVDgb9w==" "integrity": "sha512-tr7XntDAu50BVENgQfajMLzacmSe34D+qZc4zjnniz0ZVuw/TZcLcyxHQjYpJTM36sGEkZZlYLnIM1hH7alTMA=="
}, },
"detect-file": { "detect-file": {
"version": "1.0.0", "version": "1.0.0",

@ -36,7 +36,7 @@
"axios": "^0.20.0", "axios": "^0.20.0",
"browser-or-node": "^1.2.1", "browser-or-node": "^1.2.1",
"cvat-data": "../cvat-data", "cvat-data": "../cvat-data",
"detect-browser": "^5.0.0", "detect-browser": "^5.2.0",
"error-stack-parser": "^2.0.2", "error-stack-parser": "^2.0.2",
"form-data": "^2.5.0", "form-data": "^2.5.0",
"jest-config": "^24.8.0", "jest-config": "^24.8.0",

@ -20,7 +20,7 @@ you also need to do `npm install` to update `package-lock.json`
- Installing dependencies: - Installing dependencies:
```bash ```bash
cd ../cvat-core && npm install && cd - && npm install cd ../cvat-core && npm ci && cd - && npm ci
``` ```
- Running development UI server with autorebuild on change - Running development UI server with autorebuild on change

@ -1213,9 +1213,9 @@
"dev": true "dev": true
}, },
"@types/react": { "@types/react": {
"version": "16.9.51", "version": "16.9.53",
"resolved": "https://registry.npmjs.org/@types/react/-/react-16.9.51.tgz", "resolved": "https://registry.npmjs.org/@types/react/-/react-16.9.53.tgz",
"integrity": "sha512-lQa12IyO+DMlnSZ3+AGHRUiUcpK47aakMMoBG8f7HGxJT8Yfe+WE128HIXaHOHVPReAW0oDS3KAI0JI2DDe1PQ==", "integrity": "sha512-4nW60Sd4L7+WMXH1D6jCdVftuW7j4Za6zdp6tJ33Rqv0nk1ZAmQKML9ZLD4H0dehA3FZxXR/GM8gXplf82oNGw==",
"requires": { "requires": {
"@types/prop-types": "*", "@types/prop-types": "*",
"csstype": "^3.0.2" "csstype": "^3.0.2"
@ -12879,7 +12879,6 @@
"requires": { "requires": {
"axios": "^0.20.0", "axios": "^0.20.0",
"browser-or-node": "^1.2.1", "browser-or-node": "^1.2.1",
"cvat-data": "file:../cvat-data",
"detect-browser": "^5.0.0", "detect-browser": "^5.0.0",
"error-stack-parser": "^2.0.2", "error-stack-parser": "^2.0.2",
"form-data": "^2.5.0", "form-data": "^2.5.0",
@ -28856,9 +28855,9 @@
} }
}, },
"react": { "react": {
"version": "16.13.1", "version": "16.14.0",
"resolved": "https://registry.npmjs.org/react/-/react-16.13.1.tgz", "resolved": "https://registry.npmjs.org/react/-/react-16.14.0.tgz",
"integrity": "sha512-YMZQQq32xHLX0bz5Mnibv1/LHb3Sqzngu7xstSM+vrkE5Kzr9xE0yMByK5kMoTK30YVJE61WfbxIFFvfeDKT1w==", "integrity": "sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g==",
"requires": { "requires": {
"loose-envify": "^1.1.0", "loose-envify": "^1.1.0",
"object-assign": "^4.1.1", "object-assign": "^4.1.1",
@ -28889,9 +28888,9 @@
} }
}, },
"react-dom": { "react-dom": {
"version": "16.13.1", "version": "16.14.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.13.1.tgz", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz",
"integrity": "sha512-81PIMmVLnCNLO/fFOQxdQkvEq/+Hfpv24XNJfpyZhTRfO0QcmQIF/PgCa1zCOj2w1hrn12MFLyaJ/G0+Mxtfag==", "integrity": "sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw==",
"requires": { "requires": {
"loose-envify": "^1.1.0", "loose-envify": "^1.1.0",
"object-assign": "^4.1.1", "object-assign": "^4.1.1",

@ -49,7 +49,7 @@
"dependencies": { "dependencies": {
"@types/lodash": "^4.14.165", "@types/lodash": "^4.14.165",
"@types/platform": "^1.3.3", "@types/platform": "^1.3.3",
"@types/react": "^16.9.51", "@types/react": "^16.9.53",
"@types/react-color": "^3.0.4", "@types/react-color": "^3.0.4",
"@types/react-dom": "^16.9.0", "@types/react-dom": "^16.9.0",
"@types/react-redux": "^7.1.2", "@types/react-redux": "^7.1.2",
@ -67,10 +67,10 @@
"moment": "^2.29.1", "moment": "^2.29.1",
"platform": "^1.3.6", "platform": "^1.3.6",
"prop-types": "^15.7.2", "prop-types": "^15.7.2",
"react": "^16.13.1", "react": "^16.14.0",
"react-color": "^2.18.1", "react-color": "^2.18.1",
"react-cookie": "^4.0.3", "react-cookie": "^4.0.3",
"react-dom": "^16.13.1", "react-dom": "^16.14.0",
"react-hotkeys": "^2.0.0", "react-hotkeys": "^2.0.0",
"react-redux": "^7.1.1", "react-redux": "^7.1.1",
"react-router": "^5.1.0", "react-router": "^5.1.0",

@ -4,6 +4,6 @@
from cvat.utils.version import get_version from cvat.utils.version import get_version
VERSION = (1, 2, 0, 'alpha', 0) VERSION = (1, 2, 0, 'beta', 0)
__version__ = get_version(VERSION) __version__ = get_version(VERSION)

@ -18,6 +18,7 @@
- [PASCAL VOC and mask](#voc) - [PASCAL VOC and mask](#voc)
- [YOLO](#yolo) - [YOLO](#yolo)
- [TF detection API](#tfrecord) - [TF detection API](#tfrecord)
- [ImageNet](#imagenet)
## How to add a new annotation format support<a id="how-to-add"></a> ## How to add a new annotation format support<a id="how-to-add"></a>
@ -802,3 +803,35 @@ taskname.zip/
``` ```
- supported annotations: Rectangles, Polygons, Masks (as polygons) - supported annotations: Rectangles, Polygons, Masks (as polygons)
### [ImageNet](http://www.image-net.org)<a id="imagenet" />
#### ImageNet Dumper
Downloaded file: a zip archive of the following structure:
```bash
# if we save images:
taskname.zip/
└── label1/
├── label1_image1.jpg
└── label1_image2.jpg
└── label2/
├── label2_image1.jpg
├── label2_image3.jpg
└── label2_image4.jpg
# if we keep only annotation:
taskname.zip/
└── <any_subset_name>.txt
└── synsets.txt
```
- supported annotations: Labels
#### ImageNet Loader
Uploaded file: a zip archive of the structure above
- supported annotations: Labels

@ -0,0 +1,41 @@
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os.path as osp
from glob import glob
import zipfile
from tempfile import TemporaryDirectory
from datumaro.components.project import Dataset
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, \
import_dm_annotations
from cvat.apps.dataset_manager.util import make_zip_archive
from .registry import dm_env, exporter, importer
@exporter(name='ImageNet', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
extractor = CvatTaskDataExtractor(task_data, include_images=save_images)
extractor = Dataset.from_extractors(extractor) # apply lazy transform
with TemporaryDirectory() as temp_dir:
if save_images:
dm_env.converters.get('imagenet').convert(extractor,
save_dir=temp_dir, save_images=save_images)
else:
dm_env.converters.get('imagenet_txt').convert(extractor,
save_dir=temp_dir, save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='ImageNet', ext='ZIP', version='1.0')
def _import(src_file, task_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
if glob(osp.join(tmp_dir, '*.txt')):
dataset = dm_env.make_importer('imagenet_txt')(tmp_dir).make_dataset()
else:
dataset = dm_env.make_importer('imagenet')(tmp_dir).make_dataset()
import_dm_annotations(dataset, task_data)

@ -90,4 +90,5 @@ import cvat.apps.dataset_manager.formats.mot
import cvat.apps.dataset_manager.formats.mots import cvat.apps.dataset_manager.formats.mots
import cvat.apps.dataset_manager.formats.pascal_voc import cvat.apps.dataset_manager.formats.pascal_voc
import cvat.apps.dataset_manager.formats.tfrecord import cvat.apps.dataset_manager.formats.tfrecord
import cvat.apps.dataset_manager.formats.yolo import cvat.apps.dataset_manager.formats.yolo
import cvat.apps.dataset_manager.formats.imagenet

@ -269,6 +269,7 @@ class TaskExportTest(_DbTestBase):
'Segmentation mask 1.1', 'Segmentation mask 1.1',
'TFRecord 1.0', 'TFRecord 1.0',
'YOLO 1.1', 'YOLO 1.1',
'ImageNet 1.0',
}) })
def test_import_formats_query(self): def test_import_formats_query(self):
@ -285,6 +286,7 @@ class TaskExportTest(_DbTestBase):
'Segmentation mask 1.1', 'Segmentation mask 1.1',
'TFRecord 1.0', 'TFRecord 1.0',
'YOLO 1.1', 'YOLO 1.1',
'ImageNet 1.0',
}) })
def test_exports(self): def test_exports(self):
@ -320,6 +322,7 @@ class TaskExportTest(_DbTestBase):
('Segmentation mask 1.1', 'voc'), ('Segmentation mask 1.1', 'voc'),
('TFRecord 1.0', 'tf_detection_api'), ('TFRecord 1.0', 'tf_detection_api'),
('YOLO 1.1', 'yolo'), ('YOLO 1.1', 'yolo'),
('ImageNet 1.0', 'imagenet_txt'),
]: ]:
with self.subTest(format=format_name): with self.subTest(format=format_name):
if not dm.formats.registry.EXPORT_FORMATS[format_name].ENABLED: if not dm.formats.registry.EXPORT_FORMATS[format_name].ENABLED:

@ -14,6 +14,7 @@ import av
import numpy as np import numpy as np
from pyunpack import Archive from pyunpack import Archive
from PIL import Image, ImageFile from PIL import Image, ImageFile
from cvat.apps.engine.utils import rotate_image
# fixes: "OSError:broken data stream" when executing line 72 while loading images downloaded from the web # fixes: "OSError:broken data stream" when executing line 72 while loading images downloaded from the web
# see: https://stackoverflow.com/questions/42462431/oserror-broken-data-stream-when-reading-image-file # see: https://stackoverflow.com/questions/42462431/oserror-broken-data-stream-when-reading-image-file
@ -228,6 +229,16 @@ class VideoReader(IMediaReader):
for image in packet.decode(): for image in packet.decode():
frame_num += 1 frame_num += 1
if self._has_frame(frame_num - 1): if self._has_frame(frame_num - 1):
if packet.stream.metadata.get('rotate'):
old_image = image
image = av.VideoFrame().from_ndarray(
rotate_image(
image.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
image.pts = old_image.pts
yield (image, self._source_path[0], image.pts) yield (image, self._source_path[0], image.pts)
def __iter__(self): def __iter__(self):
@ -252,7 +263,15 @@ class VideoReader(IMediaReader):
container = self._get_av_container() container = self._get_av_container()
stream = container.streams.video[0] stream = container.streams.video[0]
preview = next(container.decode(stream)) preview = next(container.decode(stream))
return self._get_preview(preview.to_image()) return self._get_preview(preview.to_image() if not stream.metadata.get('rotate') \
else av.VideoFrame().from_ndarray(
rotate_image(
preview.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
).to_image()
)
def get_image_size(self, i): def get_image_size(self, i):
image = (next(iter(self)))[0] image = (next(iter(self)))[0]

@ -6,6 +6,7 @@ import av
from collections import OrderedDict from collections import OrderedDict
import hashlib import hashlib
import os import os
from cvat.apps.engine.utils import rotate_image
class WorkWithVideo: class WorkWithVideo:
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -24,7 +25,6 @@ class WorkWithVideo:
video_stream.thread_type = 'AUTO' video_stream.thread_type = 'AUTO'
return video_stream return video_stream
class AnalyzeVideo(WorkWithVideo): class AnalyzeVideo(WorkWithVideo):
def check_type_first_frame(self): def check_type_first_frame(self):
container = self._open_video_container(self.source_path, mode='r') container = self._open_video_container(self.source_path, mode='r')
@ -76,7 +76,17 @@ class PrepareInfo(WorkWithVideo):
@property @property
def frame_sizes(self): def frame_sizes(self):
container = self._open_video_container(self.source_path, 'r')
frame = next(iter(self.key_frames.values())) frame = next(iter(self.key_frames.values()))
if container.streams.video[0].metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
self._close_video_container(container)
return (frame.width, frame.height) return (frame.width, frame.height)
def check_key_frame(self, container, video_stream, key_frame): def check_key_frame(self, container, video_stream, key_frame):
@ -150,6 +160,14 @@ class PrepareInfo(WorkWithVideo):
if frame_number < start_chunk_frame_number: if frame_number < start_chunk_frame_number:
continue continue
elif frame_number < end_chunk_frame_number and not ((frame_number - start_chunk_frame_number) % step): elif frame_number < end_chunk_frame_number and not ((frame_number - start_chunk_frame_number) % step):
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
yield frame yield frame
elif (frame_number - start_chunk_frame_number) % step: elif (frame_number - start_chunk_frame_number) % step:
continue continue
@ -177,6 +195,14 @@ class UploadedMeta(PrepareInfo):
container.seek(offset=next(iter(self.key_frames.values())), stream=video_stream) container.seek(offset=next(iter(self.key_frames.values())), stream=video_stream)
for packet in container.demux(video_stream): for packet in container.demux(video_stream):
for frame in packet.decode(): for frame in packet.decode():
if video_stream.metadata.get('rotate'):
frame = av.VideoFrame().from_ndarray(
rotate_image(
frame.to_ndarray(format='bgr24'),
360 - int(container.streams.video[0].metadata.get('rotate'))
),
format ='bgr24'
)
self._close_video_container(container) self._close_video_container(container)
return (frame.width, frame.height) return (frame.width, frame.height)

@ -294,6 +294,7 @@ def _create_thread(tid, data):
if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE: if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE:
for media_type, media_files in media.items(): for media_type, media_files in media.items():
if not media_files: if not media_files:
continue continue

@ -1553,6 +1553,16 @@ class TaskDataAPITestCase(APITestCase):
video.write(data.read()) video.write(data.read())
cls._image_sizes[filename] = img_sizes cls._image_sizes[filename] = img_sizes
filename = "test_rotated_90_video.mp4"
path = os.path.join(os.path.dirname(__file__), 'assets', 'test_rotated_90_video.mp4')
container = av.open(path, 'r')
for frame in container.decode(video=0):
# pyav ignores rotation record in metadata when decoding frames
img_sizes = [(frame.height, frame.width)] * container.streams.video[0].frames
break
container.close()
cls._image_sizes[filename] = img_sizes
filename = os.path.join("videos", "test_video_1.mp4") filename = os.path.join("videos", "test_video_1.mp4")
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
os.makedirs(os.path.dirname(path)) os.makedirs(os.path.dirname(path))
@ -2008,7 +2018,7 @@ class TaskDataAPITestCase(APITestCase):
os.path.join(settings.SHARE_ROOT, "videos") os.path.join(settings.SHARE_ROOT, "videos")
) )
task_spec = { task_spec = {
"name": "my video with meta info task #11", "name": "my video with meta info task #13",
"overlap": 0, "overlap": 0,
"segment_size": 0, "segment_size": 0,
"labels": [ "labels": [
@ -2027,6 +2037,47 @@ class TaskDataAPITestCase(APITestCase):
self._test_api_v1_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self._test_api_v1_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE) self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE)
task_spec = {
"name": "my cached video task #14",
"overlap": 0,
"segment_size": 0,
"labels": [
{"name": "car"},
{"name": "person"},
]
}
task_data = {
"client_files[0]": open(os.path.join(os.path.dirname(__file__), 'assets', 'test_rotated_90_video.mp4'), 'rb'),
"image_quality": 70,
"use_zip_chunks": True
}
image_sizes = self._image_sizes['test_rotated_90_video.mp4']
self._test_api_v1_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.FILE_SYSTEM)
task_spec = {
"name": "my video task #15",
"overlap": 0,
"segment_size": 0,
"labels": [
{"name": "car"},
{"name": "person"},
]
}
task_data = {
"client_files[0]": open(os.path.join(os.path.dirname(__file__), 'assets', 'test_rotated_90_video.mp4'), 'rb'),
"image_quality": 70,
"use_cache": True,
"use_zip_chunks": True
}
image_sizes = self._image_sizes['test_rotated_90_video.mp4']
self._test_api_v1_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE)
def test_api_v1_tasks_id_data_admin(self): def test_api_v1_tasks_id_data_admin(self):
self._test_api_v1_tasks_id_data(self.admin) self._test_api_v1_tasks_id_data(self.admin)
@ -3360,6 +3411,9 @@ class TaskAnnotationAPITestCase(JobAnnotationAPITestCase):
+ polygon_shapes_with_attrs + polygon_shapes_with_attrs
annotations["tags"] = tags_with_attrs + tags_wo_attrs annotations["tags"] = tags_with_attrs + tags_wo_attrs
elif annotation_format == "ImageNet 1.0":
annotations["tags"] = tags_wo_attrs
else: else:
raise Exception("Unknown format {}".format(annotation_format)) raise Exception("Unknown format {}".format(annotation_format))

@ -3,6 +3,7 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
import ast import ast
import cv2 as cv
from collections import namedtuple from collections import namedtuple
import importlib import importlib
import sys import sys
@ -74,3 +75,16 @@ def av_scan_paths(*paths):
res = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) res = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.returncode: if res.returncode:
raise ValidationError(res.stdout) raise ValidationError(res.stdout)
def rotate_image(image, angle):
height, width = image.shape[:2]
image_center = (width/2, height/2)
matrix = cv.getRotationMatrix2D(image_center, angle, 1.)
abs_cos = abs(matrix[0,0])
abs_sin = abs(matrix[0,1])
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
matrix[0, 2] += bound_w/2 - image_center[0]
matrix[1, 2] += bound_h/2 - image_center[1]
matrix = cv.warpAffine(image, matrix, (bound_w, bound_h))
return matrix

@ -44,4 +44,4 @@ tensorflow==2.2.1 # Optional requirement of Datumaro
# archives. Don't use as a python module because it has GPL license. # archives. Don't use as a python module because it has GPL license.
patool==1.12 patool==1.12
diskcache==5.0.2 diskcache==5.0.2
git+https://github.com/openvinotoolkit/datumaro@v0.1.2 git+https://github.com/openvinotoolkit/datumaro@v0.1.3

@ -0,0 +1,155 @@
// Copyright (C) 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
/// <reference types="cypress" />
import { taskName } from '../../support/const';
context('Merge/split features', () => {
const caseId = '13';
const createRectangleShape2Points = {
points: 'By 2 Points',
type: 'Shape',
switchLabel: false,
firstX: 250,
firstY: 350,
secondX: 350,
secondY: 450,
};
const createRectangleShape2PointsSecond = {
points: 'By 2 Points',
type: 'Shape',
switchLabel: false,
firstX: createRectangleShape2Points.firstX + 300,
firstY: createRectangleShape2Points.firstY,
secondX: createRectangleShape2Points.secondX + 300,
secondY: createRectangleShape2Points.secondY,
};
const frameNum = 0;
// Check the 'X' coordinate. 'Y' coordinate is the same.
let xCoordinatesObjectFirstFrame = 0;
let xCoordinatesObjectThirdFrame = 0;
before(() => {
cy.openTaskJob(taskName);
});
function goCheckFrameNumber(frameNum) {
cy.get('.cvat-player-frame-selector').within(() => {
cy.get('input[role="spinbutton"]').clear().type(`${frameNum}{Enter}`).should('have.value', frameNum);
});
}
describe(`Testing case "${caseId}"`, () => {
it('Create rectangle shape on first frame', () => {
goCheckFrameNumber(frameNum);
cy.createRectangle(createRectangleShape2Points);
cy.get('#cvat_canvas_shape_1')
.should('have.attr', 'x')
.then((xCoords) => {
xCoordinatesObjectFirstFrame = Math.floor(xCoords);
});
});
it('Create rectangle shape on third frame with another position', () => {
goCheckFrameNumber(frameNum + 2);
cy.createRectangle(createRectangleShape2PointsSecond);
cy.get('#cvat_canvas_shape_2')
.should('have.attr', 'x')
.then((xCoords) => {
xCoordinatesObjectThirdFrame = Math.floor(xCoords);
});
});
it('Merge the objects with "Merge button"', () => {
cy.get('.cvat-merge-control').click();
cy.get('#cvat_canvas_shape_2').click();
goCheckFrameNumber(frameNum);
cy.get('#cvat_canvas_shape_1').click();
cy.get('.cvat-merge-control').click();
});
it('Get a track with keyframes on first and third frame', () => {
cy.get('#cvat_canvas_shape_3').should('exist').and('be.visible');
cy.get('#cvat-objects-sidebar-state-item-3')
.should('contain', '3')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-keyframe-enabled').should('exist');
});
goCheckFrameNumber(frameNum + 2);
cy.get('#cvat_canvas_shape_3').should('exist').and('be.visible');
cy.get('#cvat-objects-sidebar-state-item-3')
.should('contain', '3')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-keyframe-enabled').should('exist');
});
});
it('On the second frame and on the fourth frame the track is invisible', () => {
goCheckFrameNumber(frameNum + 1);
cy.get('#cvat_canvas_shape_3').should('exist').and('be.hidden');
goCheckFrameNumber(frameNum + 3);
cy.get('#cvat_canvas_shape_3').should('exist').and('be.hidden');
});
it('Go to the second frame and remove "outside" flag from the track. The track now visible.', () => {
goCheckFrameNumber(frameNum + 1);
cy.get('#cvat-objects-sidebar-state-item-3')
.should('contain', '3')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-outside').click();
cy.get('.cvat-object-item-button-outside-enabled').should('not.exist');
});
cy.get('#cvat_canvas_shape_3').should('exist').and('be.visible');
});
it('Remove "keyframe" flag from the track. Track now interpolated between position on the first and the third frames.', () => {
cy.get('#cvat-objects-sidebar-state-item-3')
.should('contain', '3')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-keyframe').click();
cy.get('.cvat-object-item-button-keyframe-enabled').should('not.exist');
});
cy.get('#cvat_canvas_shape_3')
.should('have.attr', 'x')
.then((xCoords) => {
// expected 9785 to be within 9642..9928
expect(Math.floor(xCoords)).to.be.within(
xCoordinatesObjectFirstFrame,
xCoordinatesObjectThirdFrame,
);
});
});
it('On the fourth frame remove "keyframe" flag from the track. The track now visible and "outside" flag is disabled.', () => {
goCheckFrameNumber(frameNum + 3);
cy.get('#cvat-objects-sidebar-state-item-3')
.should('contain', '3')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-keyframe').click();
cy.get('.cvat-object-item-button-keyframe-enabled').should('not.exist');
cy.get('.cvat-object-item-button-outside-enabled').should('not.exist');
});
cy.get('#cvat_canvas_shape_3').should('exist').and('be.visible');
});
it('Split a track with "split" button. Previous track became invisible (has "outside" flag). One more track and it is visible.', () => {
cy.get('.cvat-split-track-control').click();
// A single click does not reproduce the split a track scenario in cypress test.
cy.get('#cvat_canvas_shape_3').click().click();
cy.get('#cvat_canvas_shape_4').should('exist').and('be.hidden');
cy.get('#cvat-objects-sidebar-state-item-4')
.should('contain', '4')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-outside-enabled').should('exist');
});
cy.get('#cvat_canvas_shape_5').should('exist').and('be.visible');
cy.get('#cvat-objects-sidebar-state-item-5')
.should('contain', '5')
.and('contain', 'RECTANGLE TRACK')
.within(() => {
cy.get('.cvat-object-item-button-outside-enabled').should('not.exist');
cy.get('.cvat-object-item-button-keyframe-enabled').should('exist');
});
});
});
});

@ -1,8 +1,6 @@
/* // Copyright (C) 2020 Intel Corporation
* Copyright (C) 2020 Intel Corporation //
* // SPDX-License-Identifier: MIT
* SPDX-License-Identifier: MIT
*/
/// <reference types="cypress" /> /// <reference types="cypress" />
@ -30,12 +28,7 @@ context('The highlighted attribute in AAM should correspond to the chosen attrib
cy.createRectangle(createRectangleShape2Points); cy.createRectangle(createRectangleShape2Points);
}); });
it('Go to AAM', () => { it('Go to AAM', () => {
cy.changeAnnotationMode('Attribute annotation'); cy.changeWorkspace('Attribute annotation', labelName);
// Select the necessary label in any case
cy.get('.attribute-annotation-sidebar-basics-editor').within(() => {
cy.get('.ant-select-selection').click();
});
cy.get('.ant-select-dropdown-menu-item').contains(labelName).click();
}); });
it('Check if highlighted attribute correspond to the chosen attribute in right panel', () => { it('Check if highlighted attribute correspond to the chosen attribute in right panel', () => {
cy.get('.cvat_canvas_text').within(() => { cy.get('.cvat_canvas_text').within(() => {

@ -17,11 +17,11 @@ context('Check if the UI not to crash after remove a tag', () => {
describe(`Testing issue "${issueId}"`, () => { describe(`Testing issue "${issueId}"`, () => {
it('Add a tag', () => { it('Add a tag', () => {
cy.changeAnnotationMode('Tag annotation'); cy.changeWorkspace('Tag annotation');
cy.get('.cvat-tag-annotation-sidebar-buttons').within(() => { cy.get('.cvat-tag-annotation-sidebar-buttons').within(() => {
cy.get('button').contains('Add tag').click({ force: true }); cy.get('button').contains('Add tag').click({ force: true });
}); });
cy.changeAnnotationMode('Standard'); cy.changeWorkspace('Standard');
}); });
it('Remove the tag', () => { it('Remove the tag', () => {
cy.get('#cvat-objects-sidebar-state-item-1') cy.get('#cvat-objects-sidebar-state-item-1')

@ -1,8 +1,6 @@
/* // Copyright (C) 2020 Intel Corporation
* Copyright (C) 2020 Intel Corporation //
* // SPDX-License-Identifier: MIT
* SPDX-License-Identifier: MIT
*/
/// <reference types="cypress" /> /// <reference types="cypress" />
@ -39,11 +37,7 @@ context('An error occurs in AAM when switching to 2 frames, if the frames have o
cy.createRectangle(createRectangleShape2PointsSecond); cy.createRectangle(createRectangleShape2PointsSecond);
}); });
it('Go to AAM', () => { it('Go to AAM', () => {
cy.get('.cvat-workspace-selector').click(); cy.changeWorkspace('Attribute annotation', labelName);
cy.get('.ant-select-dropdown-menu-item')
.contains('Attribute annotation')
.click()
.should('contain.text', 'Attribute annotation');
}); });
it('Go to next frame', () => { it('Go to next frame', () => {
cy.get('.cvat-player-next-button').click(); cy.get('.cvat-player-next-button').click();
@ -65,6 +59,7 @@ context('An error occurs in AAM when switching to 2 frames, if the frames have o
}); });
it('Page with the error is missing', () => { it('Page with the error is missing', () => {
cy.contains('Oops, something went wrong').should('not.exist'); cy.contains('Oops, something went wrong').should('not.exist');
cy.changeLabelAAM(labelName);
cy.get('.attribute-annotation-sidebar-object-switcher').should('contain', `${labelName} 2 [2/2]`); cy.get('.attribute-annotation-sidebar-object-switcher').should('contain', `${labelName} 2 [2/2]`);
}); });
}); });

@ -16,5 +16,13 @@ module.exports = (on, config) => {
return null; return null;
}, },
}); });
// Try to resolve "Cypress failed to make a connection to the Chrome DevTools Protocol"
// https://github.com/cypress-io/cypress/issues/7450
on('before:browser:launch', (browser, launchOptions) => {
if (browser.name === 'chrome' && browser.isHeadless) {
launchOptions.args.push('--disable-gpu');
return launchOptions;
}
});
return config; return config;
}; };

@ -1,8 +1,6 @@
/* // Copyright (C) 2020 Intel Corporation
* Copyright (C) 2020 Intel Corporation //
* // SPDX-License-Identifier: MIT
* SPDX-License-Identifier: MIT
*/
/// <reference types="cypress" /> /// <reference types="cypress" />
@ -102,13 +100,14 @@ Cypress.Commands.add('createRectangle', (createRectangleParams) => {
cy.switchLabel(createRectangleParams.labelName); cy.switchLabel(createRectangleParams.labelName);
} }
cy.contains('Draw new rectangle') cy.contains('Draw new rectangle')
.parents('.cvat-draw-shape-popover-content').within(() => { .parents('.cvat-draw-shape-popover-content')
cy.get('.ant-select-selection-selected-value').then(($labelValue) => { .within(() => {
selectedValueGlobal = $labelValue.text(); cy.get('.ant-select-selection-selected-value').then(($labelValue) => {
selectedValueGlobal = $labelValue.text();
});
cy.get('.ant-radio-wrapper').contains(createRectangleParams.points).click();
cy.get('button').contains(createRectangleParams.type).click({ force: true });
}); });
cy.get('.ant-radio-wrapper').contains(createRectangleParams.points).click();
cy.get('button').contains(createRectangleParams.type).click({ force: true });
})
cy.get('.cvat-canvas-container').click(createRectangleParams.firstX, createRectangleParams.firstY); cy.get('.cvat-canvas-container').click(createRectangleParams.firstX, createRectangleParams.firstY);
cy.get('.cvat-canvas-container').click(createRectangleParams.secondX, createRectangleParams.secondY); cy.get('.cvat-canvas-container').click(createRectangleParams.secondX, createRectangleParams.secondY);
if (createRectangleParams.points === 'By 4 Points') { if (createRectangleParams.points === 'By 4 Points') {
@ -124,12 +123,20 @@ Cypress.Commands.add('switchLabel', (labelName) => {
}); });
Cypress.Commands.add('checkObjectParameters', (objectParameters, objectType) => { Cypress.Commands.add('checkObjectParameters', (objectParameters, objectType) => {
cy.get('.cvat-objects-sidebar-state-item').then((objectSidebar) => { let listCanvasShapeId = [];
cy.get(`#cvat_canvas_shape_${objectSidebar.length}`).should('exist').and('be.visible'); cy.document().then((doc) => {
cy.get(`#cvat-objects-sidebar-state-item-${objectSidebar.length}`) const listCanvasShape = Array.from(doc.querySelectorAll('.cvat_canvas_shape'));
.should('contain', objectSidebar.length).and('contain', `${objectType} ${objectParameters.type.toUpperCase()}`).within(() => { for (let i = 0; i < listCanvasShape.length; i++) {
cy.get('.ant-select-selection-selected-value').should('have.text', selectedValueGlobal); listCanvasShapeId.push(listCanvasShape[i].id.match(/\d+$/));
}); }
const maxId = Math.max(...listCanvasShapeId);
cy.get(`#cvat_canvas_shape_${maxId}`).should('exist').and('be.visible');
cy.get(`#cvat-objects-sidebar-state-item-${maxId}`)
.should('contain', maxId)
.and('contain', `${objectType} ${objectParameters.type.toUpperCase()}`)
.within(() => {
cy.get('.ant-select-selection-selected-value').should('have.text', selectedValueGlobal);
});
}); });
}); });
@ -217,10 +224,23 @@ Cypress.Commands.add('closeSettings', () => {
}); });
}); });
Cypress.Commands.add('changeAnnotationMode', (mode) => { Cypress.Commands.add('changeWorkspace', (mode, labelName) => {
cy.get('.cvat-workspace-selector').click(); cy.get('.cvat-workspace-selector').click();
cy.get('.ant-select-dropdown-menu-item').contains(mode).click(); cy.get('.ant-select-dropdown-menu-item').contains(mode).click();
cy.get('.cvat-workspace-selector').should('contain.text', mode); cy.get('.cvat-workspace-selector').should('contain.text', mode);
cy.changeLabelAAM(labelName);
});
Cypress.Commands.add('changeLabelAAM', (labelName) => {
cy.get('.cvat-workspace-selector').then((value) => {
const cvatWorkspaceSelectorValue = value.text();
if (cvatWorkspaceSelectorValue === 'Attribute annotation') {
cy.get('.attribute-annotation-sidebar-basics-editor').within(() => {
cy.get('.ant-select-selection').click();
});
cy.get('.ant-select-dropdown-menu-item').contains(labelName).click();
}
});
}); });
Cypress.Commands.add('createCuboid', (createCuboidParams) => { Cypress.Commands.add('createCuboid', (createCuboidParams) => {

Loading…
Cancel
Save