Project: export as a dataset (#3365)

main
Dmitry Kalinin 5 years ago committed by GitHub
parent 59af610f12
commit f18b1cb82d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -5,8 +5,7 @@ on:
- 'master'
- 'develop'
pull_request:
branches:
- '*'
jobs:
Unit_testing:
runs-on: ubuntu-latest

@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Notification if the browser does not support nesassary API
- Added ability to export project as a dataset (<https://github.com/openvinotoolkit/cvat/pull/3365>)
- Additional inline tips in interactors with demo gifs (<https://github.com/openvinotoolkit/cvat/pull/3473>)
### Changed

@ -1,6 +1,6 @@
{
"name": "cvat-core",
"version": "3.13.3",
"version": "3.14.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {

@ -1,6 +1,6 @@
{
"name": "cvat-core",
"version": "3.13.3",
"version": "3.14.0",
"description": "Part of Computer Vision Tool which presents an interface for client-side integration",
"main": "babel.config.js",
"scripts": {

@ -1,4 +1,4 @@
// Copyright (C) 2019-2020 Intel Corporation
// Copyright (C) 2019-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
@ -8,8 +8,9 @@
const AnnotationsSaver = require('./annotations-saver');
const AnnotationsHistory = require('./annotations-history');
const { checkObjectType } = require('./common');
const { Task } = require('./session');
const { Loader, Dumper } = require('./annotation-formats');
const { Project } = require('./project');
const { Task, Job } = require('./session');
const { Loader } = require('./annotation-formats');
const { ScriptingError, DataError, ArgumentError } = require('./exceptions');
const jobCache = new WeakMap();
@ -50,6 +51,7 @@
stopFrame,
frameMeta,
});
// eslint-disable-next-line no-unsanitized/method
collection.import(rawAnnotations);
const saver = new AnnotationsSaver(rawAnnotations.version, collection, session);
@ -232,27 +234,12 @@
await serverProxy.annotations.uploadAnnotations(sessionType, session.id, file, loader.name);
}
async function dumpAnnotations(session, name, dumper) {
if (!(dumper instanceof Dumper)) {
throw new ArgumentError('A dumper must be instance of Dumper class');
}
let result = null;
const sessionType = session instanceof Task ? 'task' : 'job';
if (sessionType === 'job') {
result = await serverProxy.annotations.dumpAnnotations(session.task.id, name, dumper.name);
} else {
result = await serverProxy.annotations.dumpAnnotations(session.id, name, dumper.name);
}
return result;
}
function importAnnotations(session, data) {
const sessionType = session instanceof Task ? 'task' : 'job';
const cache = getCache(sessionType);
if (cache.has(session)) {
// eslint-disable-next-line no-unsanitized/method
return cache.get(session).collection.import(data);
}
@ -274,16 +261,25 @@
);
}
async function exportDataset(session, format) {
async function exportDataset(instance, format, name, saveImages = false) {
if (!(format instanceof String || typeof format === 'string')) {
throw new ArgumentError('Format must be a string');
}
if (!(session instanceof Task)) {
throw new ArgumentError('A dataset can only be created from a task');
if (!(instance instanceof Task || instance instanceof Project || instance instanceof Job)) {
throw new ArgumentError('A dataset can only be created from a job, task or project');
}
if (typeof saveImages !== 'boolean') {
throw new ArgumentError('Save images parameter must be a boolean');
}
let result = null;
result = await serverProxy.tasks.exportDataset(session.id, format);
if (instance instanceof Task) {
result = await serverProxy.tasks.exportDataset(instance.id, format, name, saveImages);
} else if (instance instanceof Job) {
result = await serverProxy.tasks.exportDataset(instance.task.id, format, name, saveImages);
} else {
result = await serverProxy.projects.exportDataset(instance.id, format, name, saveImages);
}
return result;
}
@ -367,7 +363,6 @@
annotationsStatistics,
selectObject,
uploadAnnotations,
dumpAnnotations,
importAnnotations,
exportAnnotations,
exportDataset,

@ -18,6 +18,7 @@ function build() {
const Review = require('./review');
const { Job, Task } = require('./session');
const { Project } = require('./project');
const implementProject = require('./project-implementation');
const { Attribute, Label } = require('./labels');
const MLModel = require('./ml-model');
const { FrameData } = require('./frames');
@ -754,7 +755,7 @@ function build() {
*/
classes: {
User,
Project,
Project: implementProject(Project),
Task,
Job,
Log,

@ -0,0 +1,74 @@
// Copyright (C) 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
(() => {
const serverProxy = require('./server-proxy');
const { getPreview } = require('./frames');
const { Project } = require('./project');
const { exportDataset } = require('./annotations');
function implementProject(projectClass) {
projectClass.prototype.save.implementation = async function () {
const trainingProjectCopy = this.trainingProject;
if (typeof this.id !== 'undefined') {
// project has been already created, need to update some data
const projectData = {
name: this.name,
assignee_id: this.assignee ? this.assignee.id : null,
bug_tracker: this.bugTracker,
labels: [...this._internalData.labels.map((el) => el.toJSON())],
};
if (trainingProjectCopy) {
projectData.training_project = trainingProjectCopy;
}
await serverProxy.projects.save(this.id, projectData);
return this;
}
// initial creating
const projectSpec = {
name: this.name,
labels: [...this.labels.map((el) => el.toJSON())],
};
if (this.bugTracker) {
projectSpec.bug_tracker = this.bugTracker;
}
if (trainingProjectCopy) {
projectSpec.training_project = trainingProjectCopy;
}
const project = await serverProxy.projects.create(projectSpec);
return new Project(project);
};
projectClass.prototype.delete.implementation = async function () {
const result = await serverProxy.projects.delete(this.id);
return result;
};
projectClass.prototype.preview.implementation = async function () {
if (!this._internalData.task_ids.length) {
return '';
}
const frameData = await getPreview(this._internalData.task_ids[0]);
return frameData;
};
projectClass.prototype.annotations.exportDataset.implementation = async function (
format, saveImages, customName,
) {
const result = exportDataset(this, format, customName, saveImages);
return result;
};
return projectClass;
}
module.exports = implementProject;
})();

@ -4,11 +4,9 @@
(() => {
const PluginRegistry = require('./plugins');
const serverProxy = require('./server-proxy');
const { ArgumentError } = require('./exceptions');
const { Task } = require('./session');
const { Label } = require('./labels');
const { getPreview } = require('./frames');
const User = require('./user');
/**
@ -203,7 +201,7 @@
},
},
/**
* Tasks linked with the project
* Tasks related with the project
* @name tasks
* @type {module:API.cvat.classes.Task[]}
* @memberof module:API.cvat.classes.Project
@ -214,7 +212,7 @@
get: () => [...data.tasks],
},
/**
* Subsets array for linked tasks
* Subsets array for related tasks
* @name subsets
* @type {string[]}
* @memberof module:API.cvat.classes.Project
@ -254,6 +252,13 @@
},
}),
);
// When we call a function, for example: project.annotations.get()
// In the method get we lose the project context
// So, we need return it
this.annotations = {
exportDataset: Object.getPrototypeOf(this).annotations.exportDataset.bind(this),
};
}
/**
@ -289,7 +294,7 @@
}
/**
* Method deletes a task from a server
* Method deletes a project from a server
* @method delete
* @memberof module:API.cvat.classes.Project
* @readonly
@ -304,57 +309,28 @@
}
}
Object.defineProperties(
Project.prototype,
Object.freeze({
annotations: Object.freeze({
value: {
async exportDataset(format, saveImages, customName = '') {
const result = await PluginRegistry.apiWrapper.call(
this,
Project.prototype.annotations.exportDataset,
format,
saveImages,
customName,
);
return result;
},
},
writable: true,
}),
}),
);
module.exports = {
Project,
};
Project.prototype.save.implementation = async function () {
const trainingProjectCopy = this.trainingProject;
if (typeof this.id !== 'undefined') {
// project has been already created, need to update some data
const projectData = {
name: this.name,
assignee_id: this.assignee ? this.assignee.id : null,
bug_tracker: this.bugTracker,
labels: [...this._internalData.labels.map((el) => el.toJSON())],
};
if (trainingProjectCopy) {
projectData.training_project = trainingProjectCopy;
}
await serverProxy.projects.save(this.id, projectData);
return this;
}
// initial creating
const projectSpec = {
name: this.name,
labels: [...this.labels.map((el) => el.toJSON())],
};
if (this.bugTracker) {
projectSpec.bug_tracker = this.bugTracker;
}
if (trainingProjectCopy) {
projectSpec.training_project = trainingProjectCopy;
}
const project = await serverProxy.projects.create(projectSpec);
return new Project(project);
};
Project.prototype.delete.implementation = async function () {
const result = await serverProxy.projects.delete(this.id);
return result;
};
Project.prototype.preview.implementation = async function () {
if (!this._internalData.task_ids.length) {
return '';
}
const frameData = await getPreview(this._internalData.task_ids[0]);
return frameData;
};
})();

@ -465,29 +465,39 @@
}
}
async function exportDataset(id, format) {
const { backendAPI } = config;
let url = `${backendAPI}/tasks/${id}/dataset?format=${format}`;
function exportDataset(instanceType) {
return async function (id, format, name, saveImages) {
const { backendAPI } = config;
const baseURL = `${backendAPI}/${instanceType}/${id}/${saveImages ? 'dataset' : 'annotations'}`;
let query = `format=${encodeURIComponent(format)}`;
if (name) {
const filename = name.replace(/\//g, '_');
query += `&filename=${encodeURIComponent(filename)}`;
}
let url = `${baseURL}?${query}`;
return new Promise((resolve, reject) => {
async function request() {
try {
const response = await Axios.get(`${url}`, {
return new Promise((resolve, reject) => {
async function request() {
Axios.get(`${url}`, {
proxy: config.proxy,
});
if (response.status === 202) {
setTimeout(request, 3000);
} else {
url = `${url}&action=download`;
resolve(url);
}
} catch (errorData) {
reject(generateError(errorData));
})
.then((response) => {
if (response.status === 202) {
setTimeout(request, 3000);
} else {
query = `${query}&action=download`;
url = `${baseURL}?${query}`;
resolve(url);
}
})
.catch((errorData) => {
reject(generateError(errorData));
});
}
}
setTimeout(request);
});
setTimeout(request);
});
};
}
async function exportTask(id) {
@ -1135,7 +1145,9 @@
const closureId = Date.now();
predictAnnotations.latestRequest.id = closureId;
const predicate = () => !predictAnnotations.latestRequest.fetching || predictAnnotations.latestRequest.id !== closureId;
const predicate = () => (
!predictAnnotations.latestRequest.fetching || predictAnnotations.latestRequest.id !== closureId
);
if (predictAnnotations.latestRequest.fetching) {
waitFor(5, predicate).then(() => {
if (predictAnnotations.latestRequest.id !== closureId) {
@ -1199,6 +1211,7 @@
save: saveProject,
create: createProject,
delete: deleteProject,
exportDataset: exportDataset('projects'),
}),
writable: false,
},
@ -1209,7 +1222,7 @@
saveTask,
createTask,
deleteTask,
exportDataset,
exportDataset: exportDataset('tasks'),
exportTask,
importTask,
}),

@ -42,16 +42,6 @@
return result;
},
async dump(dumper, name = null) {
const result = await PluginRegistry.apiWrapper.call(
this,
prototype.annotations.dump,
dumper,
name,
);
return result;
},
async statistics() {
const result = await PluginRegistry.apiWrapper.call(this, prototype.annotations.statistics);
return result;
@ -148,11 +138,13 @@
return result;
},
async exportDataset(format) {
async exportDataset(format, saveImages, customName = '') {
const result = await PluginRegistry.apiWrapper.call(
this,
prototype.annotations.exportDataset,
format,
saveImages,
customName,
);
return result;
},
@ -329,21 +321,6 @@
* @instance
* @async
*/
/**
* Dump of annotations to a file.
* Method always dumps annotations for a whole task.
* @method dump
* @memberof Session.annotations
* @param {module:API.cvat.classes.Dumper} dumper - a dumper
* @param {string} [name = null] - a name of a file with annotations
* which will be used to dump
* @returns {string} URL which can be used in order to get a dump file
* @throws {module:API.cvat.exceptions.PluginError}
* @throws {module:API.cvat.exceptions.ServerError}
* @throws {module:API.cvat.exceptions.ArgumentError}
* @instance
* @async
*/
/**
* Collect short statistics about a task or a job.
* @method statistics
@ -877,7 +854,6 @@
get: Object.getPrototypeOf(this).annotations.get.bind(this),
put: Object.getPrototypeOf(this).annotations.put.bind(this),
save: Object.getPrototypeOf(this).annotations.save.bind(this),
dump: Object.getPrototypeOf(this).annotations.dump.bind(this),
merge: Object.getPrototypeOf(this).annotations.merge.bind(this),
split: Object.getPrototypeOf(this).annotations.split.bind(this),
group: Object.getPrototypeOf(this).annotations.group.bind(this),
@ -1575,7 +1551,6 @@
get: Object.getPrototypeOf(this).annotations.get.bind(this),
put: Object.getPrototypeOf(this).annotations.put.bind(this),
save: Object.getPrototypeOf(this).annotations.save.bind(this),
dump: Object.getPrototypeOf(this).annotations.dump.bind(this),
merge: Object.getPrototypeOf(this).annotations.merge.bind(this),
split: Object.getPrototypeOf(this).annotations.split.bind(this),
group: Object.getPrototypeOf(this).annotations.group.bind(this),
@ -1715,7 +1690,6 @@
selectObject,
annotationsStatistics,
uploadAnnotations,
dumpAnnotations,
importAnnotations,
exportAnnotations,
exportDataset,
@ -1948,13 +1922,8 @@
return result;
};
Job.prototype.annotations.dump.implementation = async function (dumper, name) {
const result = await dumpAnnotations(this, name, dumper);
return result;
};
Job.prototype.annotations.exportDataset.implementation = async function (format) {
const result = await exportDataset(this.task, format);
Job.prototype.annotations.exportDataset.implementation = async function (format, saveImages, customName) {
const result = await exportDataset(this.task, format, customName, saveImages);
return result;
};
@ -2252,11 +2221,6 @@
return result;
};
Task.prototype.annotations.dump.implementation = async function (dumper, name) {
const result = await dumpAnnotations(this, name, dumper);
return result;
};
Task.prototype.annotations.import.implementation = function (data) {
const result = importAnnotations(this, data);
return result;
@ -2267,8 +2231,8 @@
return result;
};
Task.prototype.annotations.exportDataset.implementation = async function (format) {
const result = await exportDataset(this, format);
Task.prototype.annotations.exportDataset.implementation = async function (format, saveImages, customName) {
const result = await exportDataset(this, format, customName, saveImages);
return result;
};

@ -0,0 +1,49 @@
// Copyright (C) 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
import { ActionUnion, createAction, ThunkAction } from 'utils/redux';
export enum ExportActionTypes {
OPEN_EXPORT_MODAL = 'OPEN_EXPORT_MODAL',
CLOSE_EXPORT_MODAL = 'CLOSE_EXPORT_MODAL',
EXPORT_DATASET = 'EXPORT_DATASET',
EXPORT_DATASET_SUCCESS = 'EXPORT_DATASET_SUCCESS',
EXPORT_DATASET_FAILED = 'EXPORT_DATASET_FAILED',
}
export const exportActions = {
openExportModal: (instance: any) => createAction(ExportActionTypes.OPEN_EXPORT_MODAL, { instance }),
closeExportModal: () => createAction(ExportActionTypes.CLOSE_EXPORT_MODAL),
exportDataset: (instance: any, format: string) =>
createAction(ExportActionTypes.EXPORT_DATASET, { instance, format }),
exportDatasetSuccess: (instance: any, format: string) =>
createAction(ExportActionTypes.EXPORT_DATASET_SUCCESS, { instance, format }),
exportDatasetFailed: (instance: any, format: string, error: any) =>
createAction(ExportActionTypes.EXPORT_DATASET_FAILED, {
instance,
format,
error,
}),
};
export const exportDatasetAsync = (
instance: any,
format: string,
name: string,
saveImages: boolean,
): ThunkAction => async (dispatch) => {
dispatch(exportActions.exportDataset(instance, format));
try {
const url = await instance.annotations.exportDataset(format, saveImages, name);
const downloadAnchor = window.document.getElementById('downloadAnchor') as HTMLAnchorElement;
downloadAnchor.href = url;
downloadAnchor.click();
dispatch(exportActions.exportDatasetSuccess(instance, format));
} catch (error) {
dispatch(exportActions.exportDatasetFailed(instance, format, error));
}
};
export type ExportActions = ActionUnion<typeof exportActions>;

@ -18,12 +18,6 @@ export enum TasksActionTypes {
LOAD_ANNOTATIONS = 'LOAD_ANNOTATIONS',
LOAD_ANNOTATIONS_SUCCESS = 'LOAD_ANNOTATIONS_SUCCESS',
LOAD_ANNOTATIONS_FAILED = 'LOAD_ANNOTATIONS_FAILED',
DUMP_ANNOTATIONS = 'DUMP_ANNOTATIONS',
DUMP_ANNOTATIONS_SUCCESS = 'DUMP_ANNOTATIONS_SUCCESS',
DUMP_ANNOTATIONS_FAILED = 'DUMP_ANNOTATIONS_FAILED',
EXPORT_DATASET = 'EXPORT_DATASET',
EXPORT_DATASET_SUCCESS = 'EXPORT_DATASET_SUCCESS',
EXPORT_DATASET_FAILED = 'EXPORT_DATASET_FAILED',
DELETE_TASK = 'DELETE_TASK',
DELETE_TASK_SUCCESS = 'DELETE_TASK_SUCCESS',
DELETE_TASK_FAILED = 'DELETE_TASK_FAILED',
@ -108,60 +102,6 @@ export function getTasksAsync(query: TasksQuery): ThunkAction<Promise<void>, {},
};
}
function dumpAnnotation(task: any, dumper: any): AnyAction {
const action = {
type: TasksActionTypes.DUMP_ANNOTATIONS,
payload: {
task,
dumper,
},
};
return action;
}
function dumpAnnotationSuccess(task: any, dumper: any): AnyAction {
const action = {
type: TasksActionTypes.DUMP_ANNOTATIONS_SUCCESS,
payload: {
task,
dumper,
},
};
return action;
}
function dumpAnnotationFailed(task: any, dumper: any, error: any): AnyAction {
const action = {
type: TasksActionTypes.DUMP_ANNOTATIONS_FAILED,
payload: {
task,
dumper,
error,
},
};
return action;
}
export function dumpAnnotationsAsync(task: any, dumper: any): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
try {
dispatch(dumpAnnotation(task, dumper));
const url = await task.annotations.dump(dumper);
const downloadAnchor = window.document.getElementById('downloadAnchor') as HTMLAnchorElement;
downloadAnchor.href = url;
downloadAnchor.click();
} catch (error) {
dispatch(dumpAnnotationFailed(task, dumper, error));
return;
}
dispatch(dumpAnnotationSuccess(task, dumper));
};
}
function loadAnnotations(task: any, loader: any): AnyAction {
const action = {
type: TasksActionTypes.LOAD_ANNOTATIONS,
@ -263,60 +203,6 @@ export function importTaskAsync(file: File): ThunkAction<Promise<void>, {}, {},
};
}
function exportDataset(task: any, exporter: any): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_DATASET,
payload: {
task,
exporter,
},
};
return action;
}
function exportDatasetSuccess(task: any, exporter: any): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_DATASET_SUCCESS,
payload: {
task,
exporter,
},
};
return action;
}
function exportDatasetFailed(task: any, exporter: any, error: any): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_DATASET_FAILED,
payload: {
task,
exporter,
error,
},
};
return action;
}
export function exportDatasetAsync(task: any, exporter: any): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
dispatch(exportDataset(task, exporter));
try {
const url = await task.annotations.exportDataset(exporter.name);
const downloadAnchor = window.document.getElementById('downloadAnchor') as HTMLAnchorElement;
downloadAnchor.href = url;
downloadAnchor.click();
} catch (error) {
dispatch(exportDatasetFailed(task, exporter, error));
}
dispatch(exportDatasetSuccess(task, exporter));
};
}
function exportTask(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK,

@ -9,9 +9,7 @@ import Modal from 'antd/lib/modal';
import { LoadingOutlined } from '@ant-design/icons';
// eslint-disable-next-line import/no-extraneous-dependencies
import { MenuInfo } from 'rc-menu/lib/interface';
import DumpSubmenu from './dump-submenu';
import LoadSubmenu from './load-submenu';
import ExportSubmenu from './export-submenu';
import { DimensionType } from '../../reducers/interfaces';
interface Props {
@ -21,8 +19,6 @@ interface Props {
loaders: any[];
dumpers: any[];
loadActivity: string | null;
dumpActivities: string[] | null;
exportActivities: string[] | null;
inferenceIsActive: boolean;
taskDimension: DimensionType;
onClickMenu: (params: MenuInfo, file?: File) => void;
@ -30,7 +26,6 @@ interface Props {
}
export enum Actions {
DUMP_TASK_ANNO = 'dump_task_anno',
LOAD_TASK_ANNO = 'load_task_anno',
EXPORT_TASK_DATASET = 'export_task_dataset',
DELETE_TASK = 'delete_task',
@ -43,14 +38,10 @@ export enum Actions {
export default function ActionsMenuComponent(props: Props): JSX.Element {
const {
taskID,
taskMode,
bugTracker,
inferenceIsActive,
dumpers,
loaders,
onClickMenu,
dumpActivities,
exportActivities,
loadActivity,
taskDimension,
exportIsActive,
@ -106,13 +97,6 @@ export default function ActionsMenuComponent(props: Props): JSX.Element {
return (
<Menu selectable={false} className='cvat-actions-menu' onClick={onClickMenuWrapper}>
{DumpSubmenu({
taskMode,
dumpers,
dumpActivities,
menuKey: Actions.DUMP_TASK_ANNO,
taskDimension,
})}
{LoadSubmenu({
loaders,
loadActivity,
@ -122,19 +106,14 @@ export default function ActionsMenuComponent(props: Props): JSX.Element {
menuKey: Actions.LOAD_TASK_ANNO,
taskDimension,
})}
{ExportSubmenu({
exporters: dumpers,
exportActivities,
menuKey: Actions.EXPORT_TASK_DATASET,
taskDimension,
})}
<Menu.Item key={Actions.EXPORT_TASK_DATASET}>Export task dataset</Menu.Item>
{!!bugTracker && <Menu.Item key={Actions.OPEN_BUG_TRACKER}>Open bug tracker</Menu.Item>}
<Menu.Item disabled={inferenceIsActive} key={Actions.RUN_AUTO_ANNOTATION}>
Automatic annotation
</Menu.Item>
<Menu.Item key={Actions.EXPORT_TASK} disabled={exportIsActive}>
{exportIsActive && <LoadingOutlined id='cvat-export-task-loading' />}
Export Task
Export task
</Menu.Item>
<hr />
<Menu.Item key={Actions.MOVE_TASK_TO_PROJECT}>Move to project</Menu.Item>

@ -1,54 +0,0 @@
// Copyright (C) 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
import React from 'react';
import Menu from 'antd/lib/menu';
import { DownloadOutlined, LoadingOutlined } from '@ant-design/icons';
import Text from 'antd/lib/typography/Text';
import { DimensionType } from '../../reducers/interfaces';
function isDefaultFormat(dumperName: string, taskMode: string): boolean {
return (
(dumperName === 'CVAT for video 1.1' && taskMode === 'interpolation') ||
(dumperName === 'CVAT for images 1.1' && taskMode === 'annotation')
);
}
interface Props {
taskMode: string;
menuKey: string;
dumpers: any[];
dumpActivities: string[] | null;
taskDimension: DimensionType;
}
export default function DumpSubmenu(props: Props): JSX.Element {
const {
taskMode, menuKey, dumpers, dumpActivities, taskDimension,
} = props;
return (
<Menu.SubMenu key={menuKey} title='Dump annotations'>
{dumpers
.sort((a: any, b: any) => a.name.localeCompare(b.name))
.filter((dumper: any): boolean => dumper.dimension === taskDimension)
.map(
(dumper: any): JSX.Element => {
const pending = (dumpActivities || []).includes(dumper.name);
const disabled = !dumper.enabled || pending;
const isDefault = isDefaultFormat(dumper.name, taskMode);
return (
<Menu.Item key={dumper.name} disabled={disabled} className='cvat-menu-dump-submenu-item'>
<DownloadOutlined />
<Text strong={isDefault} disabled={disabled}>
{dumper.name}
</Text>
{pending && <LoadingOutlined style={{ marginLeft: 10 }} />}
</Menu.Item>
);
},
)}
</Menu.SubMenu>
);
}

@ -1,47 +0,0 @@
// Copyright (C) 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
import React from 'react';
import Menu from 'antd/lib/menu';
import Text from 'antd/lib/typography/Text';
import { ExportOutlined, LoadingOutlined } from '@ant-design/icons';
import { DimensionType } from '../../reducers/interfaces';
interface Props {
menuKey: string;
exporters: any[];
exportActivities: string[] | null;
taskDimension: DimensionType;
}
export default function ExportSubmenu(props: Props): JSX.Element {
const {
menuKey, exporters, exportActivities, taskDimension,
} = props;
return (
<Menu.SubMenu key={menuKey} title='Export as a dataset'>
{exporters
.sort((a: any, b: any) => a.name.localeCompare(b.name))
.filter((exporter: any): boolean => exporter.dimension === taskDimension)
.map(
(exporter: any): JSX.Element => {
const pending = (exportActivities || []).includes(exporter.name);
const disabled = !exporter.enabled || pending;
return (
<Menu.Item
key={exporter.name}
disabled={disabled}
className='cvat-menu-export-submenu-item'
>
<ExportOutlined />
<Text disabled={disabled}>{exporter.name}</Text>
{pending && <LoadingOutlined style={{ marginLeft: 10 }} />}
</Menu.Item>
);
},
)}
</Menu.SubMenu>
);
}

@ -60,11 +60,7 @@ function mapDispatchToProps(dispatch: Dispatch<AnyAction>): DispatchToProps {
function ObjectsSideBar(props: StateToProps & DispatchToProps & OwnProps): JSX.Element {
const {
sidebarCollapsed,
canvasInstance,
collapseSidebar,
objectsList,
jobInstance,
sidebarCollapsed, canvasInstance, collapseSidebar, objectsList, jobInstance,
} = props;
const collapse = (): void => {
@ -119,13 +115,11 @@ function ObjectsSideBar(props: StateToProps & DispatchToProps & OwnProps): JSX.E
<LabelsList />
</Tabs.TabPane>
{is2D ?
(
<Tabs.TabPane tab={<Text strong>Issues</Text>} key='issues'>
<IssuesListComponent />
</Tabs.TabPane>
) : null}
{is2D ? (
<Tabs.TabPane tab={<Text strong>Issues</Text>} key='issues'>
<IssuesListComponent />
</Tabs.TabPane>
) : null}
</Tabs>
{!sidebarCollapsed && <AppearanceBlock />}

@ -8,9 +8,8 @@ import Modal from 'antd/lib/modal';
// eslint-disable-next-line import/no-extraneous-dependencies
import { MenuInfo } from 'rc-menu/lib/interface';
import DumpSubmenu from 'components/actions-menu/dump-submenu';
import ExportDatasetModal from 'components/export-dataset/export-dataset-modal';
import LoadSubmenu from 'components/actions-menu/load-submenu';
import ExportSubmenu from 'components/actions-menu/export-submenu';
import { DimensionType } from '../../../reducers/interfaces';
interface Props {
@ -18,8 +17,6 @@ interface Props {
loaders: any[];
dumpers: any[];
loadActivity: string | null;
dumpActivities: string[] | null;
exportActivities: string[] | null;
isReviewer: boolean;
jobInstance: any;
onClickMenu(params: MenuInfo, file?: File): void;
@ -28,7 +25,6 @@ interface Props {
}
export enum Actions {
DUMP_TASK_ANNO = 'dump_task_anno',
LOAD_JOB_ANNO = 'load_job_anno',
EXPORT_TASK_DATASET = 'export_task_dataset',
REMOVE_ANNO = 'remove_anno',
@ -41,12 +37,8 @@ export enum Actions {
export default function AnnotationMenuComponent(props: Props): JSX.Element {
const {
taskMode,
loaders,
dumpers,
loadActivity,
dumpActivities,
exportActivities,
isReviewer,
jobInstance,
onClickMenu,
@ -163,13 +155,6 @@ export default function AnnotationMenuComponent(props: Props): JSX.Element {
return (
<Menu onClick={onClickMenuWrapper} className='cvat-annotation-menu' selectable={false}>
{DumpSubmenu({
taskMode,
dumpers,
dumpActivities,
menuKey: Actions.DUMP_TASK_ANNO,
taskDimension: jobInstance.task.dimension,
})}
{LoadSubmenu({
loaders,
loadActivity,
@ -179,13 +164,7 @@ export default function AnnotationMenuComponent(props: Props): JSX.Element {
menuKey: Actions.LOAD_JOB_ANNO,
taskDimension: jobInstance.task.dimension,
})}
{ExportSubmenu({
exporters: dumpers,
exportActivities,
menuKey: Actions.EXPORT_TASK_DATASET,
taskDimension: jobInstance.task.dimension,
})}
<Menu.Item key={Actions.EXPORT_TASK_DATASET}>Export task dataset</Menu.Item>
<Menu.Item key={Actions.REMOVE_ANNO}>Remove annotations</Menu.Item>
<Menu.Item key={Actions.OPEN_TASK}>
<a href={`/tasks/${taskID}`} onClick={(e: React.MouseEvent) => e.preventDefault()}>
@ -198,6 +177,7 @@ export default function AnnotationMenuComponent(props: Props): JSX.Element {
<Menu.Item key={Actions.SUBMIT_REVIEW}>Submit the review</Menu.Item>
)}
{jobStatus === 'completed' && <Menu.Item key={Actions.RENEW_JOB}>Renew the job</Menu.Item>}
<ExportDatasetModal />
</Menu>
);
}

@ -0,0 +1,145 @@
// Copyright (C) 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
import './styles.scss';
import React, { useState, useEffect, useCallback } from 'react';
import Modal from 'antd/lib/modal';
import Notification from 'antd/lib/notification';
import { useSelector, useDispatch } from 'react-redux';
import { DownloadOutlined, LoadingOutlined } from '@ant-design/icons';
import Text from 'antd/lib/typography/Text';
import Select from 'antd/lib/select';
import Checkbox from 'antd/lib/checkbox';
import Input from 'antd/lib/input';
import Form from 'antd/lib/form';
import { CombinedState } from 'reducers/interfaces';
import { exportActions, exportDatasetAsync } from 'actions/export-actions';
import getCore from 'cvat-core-wrapper';
const core = getCore();
type FormValues = {
selectedFormat: string | undefined;
saveImages: boolean;
customName: string | undefined;
};
function ExportDatasetModal(): JSX.Element {
const [instanceType, setInstanceType] = useState('');
const [activities, setActivities] = useState<string[]>([]);
const [form] = Form.useForm();
const dispatch = useDispatch();
const instance = useSelector((state: CombinedState) => state.export.instance);
const modalVisible = useSelector((state: CombinedState) => state.export.modalVisible);
const dumpers = useSelector((state: CombinedState) => state.formats.annotationFormats.dumpers);
const {
tasks: taskExportActivities, projects: projectExportActivities,
} = useSelector((state: CombinedState) => state.export);
const initActivities = (): void => {
if (instance instanceof core.classes.Project) {
setInstanceType('project');
setActivities(projectExportActivities[instance.id] || []);
} else if (instance instanceof core.classes.Task) {
setInstanceType('task');
setActivities(taskExportActivities[instance.id] || []);
if (instance.mode === 'interpolation' && instance.dimension === '2d') {
form.setFieldsValue({ selectedFormat: 'CVAT for video 1.1' });
} else if (instance.mode === 'annotation' && instance.dimension === '2d') {
form.setFieldsValue({ selectedFormat: 'CVAT for images 1.1' });
}
}
};
useEffect(() => {
initActivities();
}, [instance?.id, instance instanceof core.classes.Project]);
const closeModal = (): void => {
form.resetFields();
dispatch(exportActions.closeExportModal());
};
const handleExport = useCallback((values: FormValues): void => {
// have to validate format before so it would not be undefined
dispatch(
exportDatasetAsync(instance, values.selectedFormat as string, values.customName ? `${values.customName}.zip` : '', values.saveImages),
);
closeModal();
Notification.info({
message: 'Dataset export started',
description: `Dataset export was started for ${instanceType} #${instance?.id}. ` +
'Download will start automaticly as soon as the dataset is ready.',
className: `cvat-notification-notice-export-${instanceType}-start`,
});
}, [instance?.id, instance instanceof core.classes.Project, instanceType]);
return (
<Modal
title={`Export ${instanceType} #${instance?.id} as a dataset`}
visible={modalVisible}
onCancel={closeModal}
onOk={() => form.submit()}
className={`cvat-modal-export-${instanceType}`}
>
<Form
name='Export dataset'
form={form}
labelCol={{ span: 8 }}
wrapperCol={{ span: 16 }}
initialValues={
{
selectedFormat: undefined,
saveImages: false,
customName: undefined,
} as FormValues
}
onFinish={handleExport}
>
<Form.Item
name='selectedFormat'
label='Export format'
rules={[{ required: true, message: 'Format must be selected' }]}
>
<Select placeholder='Select dataset format' className='cvat-modal-export-select'>
{dumpers
.sort((a: any, b: any) => a.name.localeCompare(b.name))
.filter(
(dumper: any): boolean =>
!(instance instanceof core.classes.Task) ||
dumper.dimension === instance?.dimension,
)
.map(
(dumper: any): JSX.Element => {
const pending = (activities || []).includes(dumper.name);
const disabled = !dumper.enabled || pending;
return (
<Select.Option
value={dumper.name}
key={dumper.name}
disabled={disabled}
className='cvat-modal-export-option-item'
>
<DownloadOutlined />
<Text disabled={disabled}>{dumper.name}</Text>
{pending && <LoadingOutlined style={{ marginLeft: 10 }} />}
</Select.Option>
);
},
)}
</Select>
</Form.Item>
<Form.Item name='saveImages' valuePropName='checked' wrapperCol={{ offset: 8, span: 16 }}>
<Checkbox>Save images</Checkbox>
</Form.Item>
<Form.Item label='Custom name' name='customName'>
<Input placeholder='Custom name for a dataset' suffix='.zip' />
</Form.Item>
</Form>
</Modal>
);
}
export default React.memo(ExportDatasetModal);

@ -0,0 +1,13 @@
// Copyright (C) 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
@import '../../base.scss';
.cvat-modal-export-option-item > .ant-select-item-option-content,
.cvat-modal-export-select .ant-select-selection-item {
> span[role='img'] {
color: $info-icon-color;
margin-right: $grid-unit-size;
}
}

@ -16,6 +16,7 @@ import { PlusOutlined } from '@ant-design/icons';
import { CombinedState, Task } from 'reducers/interfaces';
import { getProjectsAsync } from 'actions/projects-actions';
import { cancelInferenceAsync } from 'actions/models-actions';
import ExportDatasetModal from 'components/export-dataset/export-dataset-modal';
import TaskItem from 'components/tasks-page/task-item';
import MoveTaskModal from 'components/move-task-modal/move-task-modal';
import ModelRunnerDialog from 'components/model-runner-modal/model-runner-dialog';
@ -111,6 +112,7 @@ export default function ProjectPageComponent(): JSX.Element {
</React.Fragment>
))}
</Col>
<ExportDatasetModal />
<MoveTaskModal />
<ModelRunnerDialog />
</Row>

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
@ -8,6 +8,7 @@ import Modal from 'antd/lib/modal';
import Menu from 'antd/lib/menu';
import { deleteProjectAsync } from 'actions/projects-actions';
import { exportActions } from 'actions/export-actions';
interface Props {
projectInstance: any;
@ -37,6 +38,11 @@ export default function ProjectActionsMenuComponent(props: Props): JSX.Element {
return (
<Menu className='cvat-project-actions-menu'>
<Menu.Item onClick={onDeleteProject}>Delete</Menu.Item>
<Menu.Item
onClick={() => dispatch(exportActions.openExportModal(projectInstance))}
>
Export project dataset
</Menu.Item>
</Menu>
);
}

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
@ -8,9 +8,10 @@ import { useDispatch, useSelector } from 'react-redux';
import { useLocation, useHistory } from 'react-router';
import Spin from 'antd/lib/spin';
import FeedbackComponent from 'components/feedback/feedback';
import { CombinedState, ProjectsQuery } from 'reducers/interfaces';
import { getProjectsAsync } from 'actions/projects-actions';
import FeedbackComponent from 'components/feedback/feedback';
import ExportDatasetModal from 'components/export-dataset/export-dataset-modal';
import EmptyListComponent from './empty-list';
import TopBarComponent from './top-bar';
import ProjectListComponent from './project-list';
@ -55,6 +56,7 @@ export default function ProjectsPageComponent(): JSX.Element {
<TopBarComponent />
{projectsCount ? <ProjectListComponent /> : <EmptyListComponent notFound={anySearchQuery} />}
<FeedbackComponent />
<ExportDatasetModal />
</div>
);
}

@ -14,6 +14,7 @@ import DetailsContainer from 'containers/task-page/details';
import JobListContainer from 'containers/task-page/job-list';
import ModelRunnerModal from 'components/model-runner-modal/model-runner-dialog';
import MoveTaskModal from 'components/move-task-modal/move-task-modal';
import ExportDatasetModal from 'components/export-dataset/export-dataset-modal';
import { Task } from 'reducers/interfaces';
import TopBarComponent from './top-bar';
@ -85,6 +86,7 @@ class TaskPageComponent extends React.PureComponent<Props> {
</Row>
<ModelRunnerModal />
<MoveTaskModal />
<ExportDatasetModal />
{updating && <Spin size='large' className='cvat-spinner' />}
</>
);

@ -14,6 +14,7 @@ import Text from 'antd/lib/typography/Text';
import { TasksQuery } from 'reducers/interfaces';
import FeedbackComponent from 'components/feedback/feedback';
import TaskListContainer from 'containers/tasks-page/tasks-list';
import ExportDatasetModal from 'components/export-dataset/export-dataset-modal';
import TopBar from './top-bar';
import EmptyListComponent from './empty-list';
@ -221,6 +222,7 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
<EmptyListComponent />
)}
<FeedbackComponent />
<ExportDatasetModal />
</div>
);
}

@ -12,13 +12,12 @@ import { CombinedState } from 'reducers/interfaces';
import { modelsActions } from 'actions/models-actions';
import {
dumpAnnotationsAsync,
loadAnnotationsAsync,
exportDatasetAsync,
deleteTaskAsync,
exportTaskAsync,
switchMoveTaskModalVisible,
} from 'actions/tasks-actions';
import { exportActions } from 'actions/export-actions';
interface OwnProps {
taskInstance: any;
@ -27,16 +26,13 @@ interface OwnProps {
interface StateToProps {
annotationFormats: any;
loadActivity: string | null;
dumpActivities: string[] | null;
exportActivities: string[] | null;
inferenceIsActive: boolean;
exportIsActive: boolean;
}
interface DispatchToProps {
loadAnnotations: (taskInstance: any, loader: any, file: File) => void;
dumpAnnotations: (taskInstance: any, dumper: any) => void;
exportDataset: (taskInstance: any, exporter: any) => void;
showExportModal: (taskInstance: any) => void;
deleteTask: (taskInstance: any) => void;
openRunModelWindow: (taskInstance: any) => void;
exportTask: (taskInstance: any) => void;
@ -52,14 +48,12 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
formats: { annotationFormats },
tasks: {
activities: {
dumps, loads, exports: activeExports, backups,
loads, backups,
},
},
} = state;
return {
dumpActivities: tid in dumps ? dumps[tid] : null,
exportActivities: tid in activeExports ? activeExports[tid] : null,
loadActivity: tid in loads ? loads[tid] : null,
annotationFormats,
inferenceIsActive: tid in state.models.inferences,
@ -72,11 +66,8 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
loadAnnotations: (taskInstance: any, loader: any, file: File): void => {
dispatch(loadAnnotationsAsync(taskInstance, loader, file));
},
dumpAnnotations: (taskInstance: any, dumper: any): void => {
dispatch(dumpAnnotationsAsync(taskInstance, dumper));
},
exportDataset: (taskInstance: any, exporter: any): void => {
dispatch(exportDatasetAsync(taskInstance, exporter));
showExportModal: (taskInstance: any): void => {
dispatch(exportActions.openExportModal(taskInstance));
},
deleteTask: (taskInstance: any): void => {
dispatch(deleteTaskAsync(taskInstance));
@ -98,14 +89,11 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
taskInstance,
annotationFormats: { loaders, dumpers },
loadActivity,
dumpActivities,
exportActivities,
inferenceIsActive,
exportIsActive,
loadAnnotations,
dumpAnnotations,
exportDataset,
showExportModal,
deleteTask,
openRunModelWindow,
exportTask,
@ -115,28 +103,18 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
function onClickMenu(params: MenuInfo, file?: File): void {
if (params.keyPath.length > 1) {
const [additionalKey, action] = params.keyPath;
if (action === Actions.DUMP_TASK_ANNO) {
const format = additionalKey;
const [dumper] = dumpers.filter((_dumper: any): boolean => _dumper.name === format);
if (dumper) {
dumpAnnotations(taskInstance, dumper);
}
} else if (action === Actions.LOAD_TASK_ANNO) {
if (action === Actions.LOAD_TASK_ANNO) {
const format = additionalKey;
const [loader] = loaders.filter((_loader: any): boolean => _loader.name === format);
if (loader && file) {
loadAnnotations(taskInstance, loader, file);
}
} else if (action === Actions.EXPORT_TASK_DATASET) {
const format = additionalKey;
const [exporter] = dumpers.filter((_exporter: any): boolean => _exporter.name === format);
if (exporter) {
exportDataset(taskInstance, exporter);
}
}
} else {
const [action] = params.keyPath;
if (action === Actions.DELETE_TASK) {
if (action === Actions.EXPORT_TASK_DATASET) {
showExportModal(taskInstance);
} else if (action === Actions.DELETE_TASK) {
deleteTask(taskInstance);
} else if (action === Actions.OPEN_BUG_TRACKER) {
window.open(`${taskInstance.bugTracker}`, '_blank');
@ -158,8 +136,6 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
loaders={loaders}
dumpers={dumpers}
loadActivity={loadActivity}
dumpActivities={dumpActivities}
exportActivities={exportActivities}
inferenceIsActive={inferenceIsActive}
onClickMenu={onClickMenu}
taskDimension={taskInstance.dimension}

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
@ -10,7 +10,7 @@ import { MenuInfo } from 'rc-menu/lib/interface';
import { CombinedState, TaskStatus } from 'reducers/interfaces';
import AnnotationMenuComponent, { Actions } from 'components/annotation-page/top-bar/annotation-menu';
import { dumpAnnotationsAsync, exportDatasetAsync, updateJobAsync } from 'actions/tasks-actions';
import { updateJobAsync } from 'actions/tasks-actions';
import {
uploadJobAnnotationsAsync,
removeAnnotationsAsync,
@ -19,20 +19,18 @@ import {
switchSubmitReviewDialog as switchSubmitReviewDialogAction,
setForceExitAnnotationFlag as setForceExitAnnotationFlagAction,
} from 'actions/annotation-actions';
import { exportActions } from 'actions/export-actions';
interface StateToProps {
annotationFormats: any;
jobInstance: any;
loadActivity: string | null;
dumpActivities: string[] | null;
exportActivities: string[] | null;
user: any;
}
interface DispatchToProps {
loadAnnotations(job: any, loader: any, file: File): void;
dumpAnnotations(task: any, dumper: any): void;
exportDataset(task: any, exporter: any): void;
showExportModal(task: any): void;
removeAnnotations(sessionInstance: any): void;
switchRequestReviewDialog(visible: boolean): void;
switchSubmitReviewDialog(visible: boolean): void;
@ -49,7 +47,7 @@ function mapStateToProps(state: CombinedState): StateToProps {
},
formats: { annotationFormats },
tasks: {
activities: { dumps, loads, exports: activeExports },
activities: { loads },
},
auth: { user },
} = state;
@ -58,8 +56,6 @@ function mapStateToProps(state: CombinedState): StateToProps {
const jobID = jobInstance.id;
return {
dumpActivities: taskID in dumps ? dumps[taskID] : null,
exportActivities: taskID in activeExports ? activeExports[taskID] : null,
loadActivity: taskID in loads || jobID in jobLoads ? loads[taskID] || jobLoads[jobID] : null,
jobInstance,
annotationFormats,
@ -72,11 +68,8 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
loadAnnotations(job: any, loader: any, file: File): void {
dispatch(uploadJobAnnotationsAsync(job, loader, file));
},
dumpAnnotations(task: any, dumper: any): void {
dispatch(dumpAnnotationsAsync(task, dumper));
},
exportDataset(task: any, exporter: any): void {
dispatch(exportDatasetAsync(task, exporter));
showExportModal(task: any): void {
dispatch(exportActions.openExportModal(task));
},
removeAnnotations(sessionInstance: any): void {
dispatch(removeAnnotationsAsync(sessionInstance));
@ -108,11 +101,8 @@ function AnnotationMenuContainer(props: Props): JSX.Element {
annotationFormats: { loaders, dumpers },
history,
loadActivity,
dumpActivities,
exportActivities,
loadAnnotations,
dumpAnnotations,
exportDataset,
showExportModal,
removeAnnotations,
switchRequestReviewDialog,
switchSubmitReviewDialog,
@ -124,28 +114,18 @@ function AnnotationMenuContainer(props: Props): JSX.Element {
const onClickMenu = (params: MenuInfo, file?: File): void => {
if (params.keyPath.length > 1) {
const [additionalKey, action] = params.keyPath;
if (action === Actions.DUMP_TASK_ANNO) {
const format = additionalKey;
const [dumper] = dumpers.filter((_dumper: any): boolean => _dumper.name === format);
if (dumper) {
dumpAnnotations(jobInstance.task, dumper);
}
} else if (action === Actions.LOAD_JOB_ANNO) {
if (action === Actions.LOAD_JOB_ANNO) {
const format = additionalKey;
const [loader] = loaders.filter((_loader: any): boolean => _loader.name === format);
if (loader && file) {
loadAnnotations(jobInstance, loader, file);
}
} else if (action === Actions.EXPORT_TASK_DATASET) {
const format = additionalKey;
const [exporter] = dumpers.filter((_exporter: any): boolean => _exporter.name === format);
if (exporter) {
exportDataset(jobInstance.task, exporter);
}
}
} else {
const [action] = params.keyPath;
if (action === Actions.REMOVE_ANNO) {
if (action === Actions.EXPORT_TASK_DATASET) {
showExportModal(jobInstance.task);
} else if (action === Actions.REMOVE_ANNO) {
removeAnnotations(jobInstance);
} else if (action === Actions.REQUEST_REVIEW) {
switchRequestReviewDialog(true);
@ -173,8 +153,6 @@ function AnnotationMenuContainer(props: Props): JSX.Element {
loaders={loaders}
dumpers={dumpers}
loadActivity={loadActivity}
dumpActivities={dumpActivities}
exportActivities={exportActivities}
onClickMenu={onClickMenu}
setForceExitAnnotationFlag={setForceExitAnnotationFlag}
saveAnnotations={saveAnnotations}

@ -0,0 +1,67 @@
// Copyright (C) 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
import { ExportActions, ExportActionTypes } from 'actions/export-actions';
import getCore from 'cvat-core-wrapper';
import deepCopy from 'utils/deep-copy';
import { ExportState } from './interfaces';
const core = getCore();
const defaultState: ExportState = {
tasks: {},
projects: {},
instance: null,
modalVisible: false,
};
export default (state: ExportState = defaultState, action: ExportActions): ExportState => {
switch (action.type) {
case ExportActionTypes.OPEN_EXPORT_MODAL:
return {
...state,
modalVisible: true,
instance: action.payload.instance,
};
case ExportActionTypes.CLOSE_EXPORT_MODAL:
return {
...state,
modalVisible: false,
instance: null,
};
case ExportActionTypes.EXPORT_DATASET: {
const { instance, format } = action.payload;
const activities = deepCopy(instance instanceof core.classes.Project ? state.projects : state.tasks);
activities[instance.id] =
instance.id in activities && !activities[instance.id].includes(format) ?
[...activities[instance.id], format] :
activities[instance.id] || [format];
return {
...state,
tasks: instance instanceof core.classes.Task ? activities : state.tasks,
projects: instance instanceof core.classes.Project ? activities : state.projects,
};
}
case ExportActionTypes.EXPORT_DATASET_FAILED:
case ExportActionTypes.EXPORT_DATASET_SUCCESS: {
const { instance, format } = action.payload;
const activities = deepCopy(instance instanceof core.classes.Project ? state.projects : state.tasks);
activities[instance.id] = activities[instance.id].filter(
(exporterName: string): boolean => exporterName !== format,
);
return {
...state,
tasks: instance instanceof core.classes.Task ? activities : state.tasks,
projects: instance instanceof core.classes.Project ? activities : state.projects,
};
}
default:
return state;
}
};

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT

@ -86,14 +86,6 @@ export interface TasksState {
count: number;
current: Task[];
activities: {
dumps: {
// dumps in different formats at the same time
[tid: number]: string[]; // dumper names
};
exports: {
// exports in different formats at the same time
[tid: number]: string[]; // dumper names
};
loads: {
// only one loading simultaneously
[tid: number]: string; // loader name
@ -112,6 +104,17 @@ export interface TasksState {
};
}
export interface ExportState {
tasks: {
[tid: number]: string[];
};
projects: {
[pid: number]: string[];
};
instance: any;
modalVisible: boolean;
}
export interface FormatsState {
annotationFormats: any;
fetching: boolean;
@ -621,6 +624,7 @@ export interface CombinedState {
settings: SettingsState;
shortcuts: ShortcutsState;
review: ReviewState;
export: ExportState;
}
export enum DimensionType {

@ -16,9 +16,13 @@ import { NotificationsActionType } from 'actions/notification-actions';
import { BoundariesActionTypes } from 'actions/boundaries-actions';
import { UserAgreementsActionTypes } from 'actions/useragreements-actions';
import { ReviewActionTypes } from 'actions/review-actions';
import { ExportActionTypes } from 'actions/export-actions';
import getCore from 'cvat-core-wrapper';
import { NotificationsState } from './interfaces';
const core = getCore();
const defaultState: NotificationsState = {
errors: {
auth: {
@ -308,8 +312,9 @@ export default function (state = defaultState, action: AnyAction): Notifications
},
};
}
case TasksActionTypes.EXPORT_DATASET_FAILED: {
const taskID = action.payload.task.id;
case ExportActionTypes.EXPORT_DATASET_FAILED: {
const instanceID = action.payload.instance.id;
const instanceType = action.payload.instance instanceof core.classes.Project ? 'project' : 'task';
return {
...state,
errors: {
@ -319,7 +324,8 @@ export default function (state = defaultState, action: AnyAction): Notifications
exportingAsDataset: {
message:
'Could not export dataset for the ' +
`<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a>`,
`<a href="/${instanceType}s/${instanceID}" target="_blank">` +
`${instanceType} ${instanceID}</a>`,
reason: action.payload.error.toString(),
},
},
@ -392,24 +398,6 @@ export default function (state = defaultState, action: AnyAction): Notifications
},
};
}
case TasksActionTypes.DUMP_ANNOTATIONS_FAILED: {
const taskID = action.payload.task.id;
return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
dumping: {
message:
'Could not dump annotations for the ' +
`<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a>`,
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.DELETE_TASK_FAILED: {
const { taskID } = action.payload;
return {

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
@ -17,6 +17,7 @@ import settingsReducer from './settings-reducer';
import shortcutsReducer from './shortcuts-reducer';
import userAgreementsReducer from './useragreements-reducer';
import reviewReducer from './review-reducer';
import exportReducer from './export-reducer';
export default function createRootReducer(): Reducer {
return combineReducers({
@ -34,5 +35,6 @@ export default function createRootReducer(): Reducer {
shortcuts: shortcutsReducer,
userAgreements: userAgreementsReducer,
review: reviewReducer,
export: exportReducer,
});
}

@ -32,8 +32,6 @@ const defaultState: TasksState = {
mode: null,
},
activities: {
dumps: {},
exports: {},
loads: {},
deletes: {},
creates: {
@ -85,84 +83,6 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
initialized: true,
fetching: false,
};
case TasksActionTypes.DUMP_ANNOTATIONS: {
const { task } = action.payload;
const { dumper } = action.payload;
const { dumps } = state.activities;
dumps[task.id] =
task.id in dumps && !dumps[task.id].includes(dumper.name) ?
[...dumps[task.id], dumper.name] :
dumps[task.id] || [dumper.name];
return {
...state,
activities: {
...state.activities,
dumps: {
...dumps,
},
},
};
}
case TasksActionTypes.DUMP_ANNOTATIONS_FAILED:
case TasksActionTypes.DUMP_ANNOTATIONS_SUCCESS: {
const { task } = action.payload;
const { dumper } = action.payload;
const { dumps } = state.activities;
dumps[task.id] = dumps[task.id].filter((dumperName: string): boolean => dumperName !== dumper.name);
return {
...state,
activities: {
...state.activities,
dumps: {
...dumps,
},
},
};
}
case TasksActionTypes.EXPORT_DATASET: {
const { task } = action.payload;
const { exporter } = action.payload;
const { exports: activeExports } = state.activities;
activeExports[task.id] =
task.id in activeExports && !activeExports[task.id].includes(exporter.name) ?
[...activeExports[task.id], exporter.name] :
activeExports[task.id] || [exporter.name];
return {
...state,
activities: {
...state.activities,
exports: {
...activeExports,
},
},
};
}
case TasksActionTypes.EXPORT_DATASET_FAILED:
case TasksActionTypes.EXPORT_DATASET_SUCCESS: {
const { task } = action.payload;
const { exporter } = action.payload;
const { exports: activeExports } = state.activities;
activeExports[task.id] = activeExports[task.id].filter(
(exporterName: string): boolean => exporterName !== exporter.name,
);
return {
...state,
activities: {
...state.activities,
exports: {
...activeExports,
},
},
};
}
case TasksActionTypes.LOAD_ANNOTATIONS: {
const { task } = action.payload;
const { loader } = action.payload;

@ -0,0 +1,21 @@
// Copyright (C) 2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
function deepCopy<T>(obj: T): T {
if (typeof obj !== 'object') {
return obj;
}
if (!obj) {
return obj;
}
const container: any = (obj instanceof Array) ? [] : {};
for (const i in obj) {
if (Object.prototype.hasOwnProperty.call(obj, i)) {
container[i] = deepCopy(obj[i]);
}
}
return container;
}
export default deepCopy;

@ -42,6 +42,9 @@ class AnnotationIR:
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
@data.setter
def data(self, data):
self.version = data['version']

@ -3,52 +3,34 @@
#
# SPDX-License-Identifier: MIT
import sys
import os.path as osp
from collections import OrderedDict, namedtuple
from collections import namedtuple
from typing import Any, Callable, DefaultDict, Dict, List, Literal, Mapping, NamedTuple, OrderedDict, Tuple, Union
from pathlib import Path
from django.utils import timezone
import datumaro.components.extractor as datumaro
from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.models import AttributeType, ShapeType, DimensionType, Image as Img
from cvat.apps.engine.models import AttributeType, ShapeType, Project, Task, Label, DimensionType, Image as Img
from datumaro.util import cast
from datumaro.util.image import ByteImage, Image
from .annotation import AnnotationManager, TrackManager
from .annotation import AnnotationManager, TrackManager, AnnotationIR
class TaskData:
Attribute = namedtuple('Attribute', 'name, value')
Shape = namedtuple("Shape", 'id, label_id') # 3d
LabeledShape = namedtuple(
'LabeledShape', 'type, frame, label, points, occluded, attributes, source, group, z_order')
LabeledShape.__new__.__defaults__ = (0, 0)
TrackedShape = namedtuple(
'TrackedShape', 'type, frame, points, occluded, outside, keyframe, attributes, source, group, z_order, label, track_id')
TrackedShape.__new__.__defaults__ = ('manual', 0, 0, None, 0)
Track = namedtuple('Track', 'label, group, source, shapes')
Tag = namedtuple('Tag', 'frame, label, attributes, source, group')
Tag.__new__.__defaults__ = (0, )
Frame = namedtuple(
'Frame', 'idx, id, frame, name, width, height, labeled_shapes, tags, shapes, labels')
Labels = namedtuple('Label', 'id, name, color')
class InstanceLabelData:
Attribute = NamedTuple('Attribute', [('name', str), ('value', Any)])
def __init__(self, annotation_ir, db_task, host='', create_callback=None):
self._annotation_ir = annotation_ir
self._db_task = db_task
self._host = host
self._create_callback = create_callback
self._MAX_ANNO_SIZE = 30000
self._frame_info = {}
self._frame_mapping = {}
self._frame_step = db_task.data.get_frame_step()
def __init__(self, instance: Union[Task, Project]) -> None:
instance = instance.project if isinstance(instance, Task) and instance.project_id is not None else instance
db_labels = (self._db_task.project if self._db_task.project_id else self._db_task).label_set.all().prefetch_related(
'attributespec_set').order_by('pk')
db_labels = instance.label_set.all().prefetch_related('attributespec_set').order_by('pk')
self._label_mapping = OrderedDict(
(db_label.id, db_label) for db_label in db_labels)
self._label_mapping = OrderedDict[int, Label](
((db_label.id, db_label) for db_label in db_labels),
)
self._attribute_mapping = {db_label.id: {
'mutable': {}, 'immutable': {}, 'spec': {}}
@ -69,9 +51,6 @@ class TaskData:
**attr_mapping['immutable'],
}
self._init_frame_info()
self._init_meta()
def _get_label_id(self, label_name):
for db_label in self._label_mapping.values():
if label_name == db_label.name:
@ -103,6 +82,71 @@ class TaskData:
def _get_immutable_attribute_id(self, label_id, attribute_name):
return self._get_attribute_id(label_id, attribute_name, 'immutable')
def _import_attribute(self, label_id, attribute):
spec_id = self._get_attribute_id(label_id, attribute.name)
value = attribute.value
if spec_id:
spec = self._attribute_mapping[label_id]['spec'][spec_id]
try:
if spec.input_type == AttributeType.NUMBER:
pass # no extra processing required
elif spec.input_type == AttributeType.CHECKBOX:
if isinstance(value, str):
value = value.lower()
assert value in {'true', 'false'}
elif isinstance(value, (bool, int, float)):
value = 'true' if value else 'false'
else:
raise ValueError("Unexpected attribute value")
except Exception as e:
raise Exception("Failed to convert attribute '%s'='%s': %s" %
(self._get_label_name(label_id), value, e))
return { 'spec_id': spec_id, 'value': value }
def _export_attributes(self, attributes):
exported_attributes = []
for attr in attributes:
attribute_name = self._get_attribute_name(attr["spec_id"])
exported_attributes.append(InstanceLabelData.Attribute(
name=attribute_name,
value=attr["value"],
))
return exported_attributes
class TaskData(InstanceLabelData):
Shape = namedtuple("Shape", 'id, label_id') # 3d
LabeledShape = namedtuple(
'LabeledShape', 'type, frame, label, points, occluded, attributes, source, group, z_order')
LabeledShape.__new__.__defaults__ = (0, 0)
TrackedShape = namedtuple(
'TrackedShape', 'type, frame, points, occluded, outside, keyframe, attributes, source, group, z_order, label, track_id')
TrackedShape.__new__.__defaults__ = ('manual', 0, 0, None, 0)
Track = namedtuple('Track', 'label, group, source, shapes')
Tag = namedtuple('Tag', 'frame, label, attributes, source, group')
Tag.__new__.__defaults__ = (0, )
Frame = namedtuple(
'Frame', 'idx, id, frame, name, width, height, labeled_shapes, tags, shapes, labels')
Labels = namedtuple('Label', 'id, name, color')
def __init__(self, annotation_ir, db_task, host='', create_callback=None):
self._annotation_ir = annotation_ir
self._db_task = db_task
self._host = host
self._create_callback = create_callback
self._MAX_ANNO_SIZE = 30000
self._frame_info = {}
self._frame_mapping = {}
self._frame_step = db_task.data.get_frame_step()
InstanceLabelData.__init__(self, db_task)
self._init_frame_info()
self._init_meta()
def abs_frame_id(self, relative_id):
if relative_id not in range(0, self._db_task.data.size):
raise ValueError("Unknown internal frame id %s" % relative_id)
@ -135,79 +179,80 @@ class TaskData:
for frame_number, info in self._frame_info.items()
}
def _init_meta(self):
db_segments = self._db_task.segment_set.all().prefetch_related('job_set')
self._meta = OrderedDict([
("task", OrderedDict([
("id", str(self._db_task.id)),
("name", self._db_task.name),
("size", str(self._db_task.data.size)),
("mode", self._db_task.mode),
("overlap", str(self._db_task.overlap)),
("bugtracker", self._db_task.bug_tracker),
("created", str(timezone.localtime(self._db_task.created_date))),
("updated", str(timezone.localtime(self._db_task.updated_date))),
("start_frame", str(self._db_task.data.start_frame)),
("stop_frame", str(self._db_task.data.stop_frame)),
("frame_filter", self._db_task.data.frame_filter),
("labels", [
("label", OrderedDict([
("name", db_label.name),
("color", db_label.color),
("attributes", [
("attribute", OrderedDict([
("name", db_attr.name),
("mutable", str(db_attr.mutable)),
("input_type", db_attr.input_type),
("default_value", db_attr.default_value),
("values", db_attr.values)]))
for db_attr in db_label.attributespec_set.all()])
])) for db_label in self._label_mapping.values()
]),
@staticmethod
def meta_for_task(db_task, host, label_mapping=None):
db_segments = db_task.segment_set.all().prefetch_related('job_set')
meta = OrderedDict([
("id", str(db_task.id)),
("name", db_task.name),
("size", str(db_task.data.size)),
("mode", db_task.mode),
("overlap", str(db_task.overlap)),
("bugtracker", db_task.bug_tracker),
("created", str(timezone.localtime(db_task.created_date))),
("updated", str(timezone.localtime(db_task.updated_date))),
("subset", db_task.subset or datumaro.DEFAULT_SUBSET_NAME),
("start_frame", str(db_task.data.start_frame)),
("stop_frame", str(db_task.data.stop_frame)),
("frame_filter", db_task.data.frame_filter),
("segments", [
("segment", OrderedDict([
("id", str(db_segment.id)),
("start", str(db_segment.start_frame)),
("stop", str(db_segment.stop_frame)),
("url", "{}/?id={}".format(
host, db_segment.job_set.all()[0].id))]
)) for db_segment in db_segments
]),
("owner", OrderedDict([
("username", db_task.owner.username),
("email", db_task.owner.email)
]) if db_task.owner else ""),
("assignee", OrderedDict([
("username", db_task.assignee.username),
("email", db_task.assignee.email)
]) if db_task.assignee else ""),
])
("segments", [
("segment", OrderedDict([
("id", str(db_segment.id)),
("start", str(db_segment.start_frame)),
("stop", str(db_segment.stop_frame)),
("url", "{}/?id={}".format(
self._host, db_segment.job_set.all()[0].id))]
)) for db_segment in db_segments
]),
if label_mapping is not None:
meta['labels'] = [
("label", OrderedDict([
("name", db_label.name),
("color", db_label.color),
("attributes", [
("attribute", OrderedDict([
("name", db_attr.name),
("mutable", str(db_attr.mutable)),
("input_type", db_attr.input_type),
("default_value", db_attr.default_value),
("values", db_attr.values)]))
for db_attr in db_label.attributespec_set.all()])
])) for db_label in label_mapping.values()
]
if hasattr(db_task.data, "video"):
meta["original_size"] = OrderedDict([
("width", str(db_task.data.video.width)),
("height", str(db_task.data.video.height))
])
("owner", OrderedDict([
("username", self._db_task.owner.username),
("email", self._db_task.owner.email)
]) if self._db_task.owner else ""),
return meta
("assignee", OrderedDict([
("username", self._db_task.assignee.username),
("email", self._db_task.assignee.email)
]) if self._db_task.assignee else ""),
])),
def _init_meta(self):
self._meta = OrderedDict([
("task", self.meta_for_task(self._db_task, self._host, self._label_mapping)),
("dumped", str(timezone.localtime(timezone.now())))
])
if hasattr(self._db_task.data, "video"):
self._meta["task"]["original_size"] = OrderedDict([
("width", str(self._db_task.data.video.width)),
("height", str(self._db_task.data.video.height))
])
# Add source to dumped file
self._meta["source"] = str(
osp.basename(self._db_task.data.video.path))
def _export_attributes(self, attributes):
exported_attributes = []
for attr in attributes:
attribute_name = self._get_attribute_name(attr["spec_id"])
exported_attributes.append(TaskData.Attribute(
name=attribute_name,
value=attr["value"],
))
return exported_attributes
def _export_tracked_shape(self, shape):
return TaskData.TrackedShape(
type=shape["type"],
@ -356,30 +401,6 @@ class TaskData:
if self._get_attribute_id(label_id, attrib.name)]
return _tag
def _import_attribute(self, label_id, attribute):
spec_id = self._get_attribute_id(label_id, attribute.name)
value = attribute.value
if spec_id:
spec = self._attribute_mapping[label_id]['spec'][spec_id]
try:
if spec.input_type == AttributeType.NUMBER:
pass # no extra processing required
elif spec.input_type == AttributeType.CHECKBOX:
if isinstance(value, str):
value = value.lower()
assert value in {'true', 'false'}
elif isinstance(value, (bool, int, float)):
value = 'true' if value else 'false'
else:
raise ValueError("Unexpected attribute value")
except Exception as e:
raise Exception("Failed to convert attribute '%s'='%s': %s" %
(self._get_label_name(label_id), value, e))
return { 'spec_id': spec_id, 'value': value }
def _import_shape(self, shape):
_shape = shape._asdict()
label_id = self._get_label_id(_shape.pop('label'))
@ -482,7 +503,328 @@ class TaskData:
return v
return None
class CvatTaskDataExtractor(datumaro.SourceExtractor):
class ProjectData(InstanceLabelData):
LabeledShape = NamedTuple('LabledShape', [('type', str), ('frame', int), ('label', str), ('points', List[float]), ('occluded', bool), ('attributes', List[InstanceLabelData.Attribute]), ('source', str), ('group', int), ('z_order', int), ('task_id', int)])
LabeledShape.__new__.__defaults__ = (0,0)
TrackedShape = NamedTuple('TrackedShape',
[('type', str), ('frame', int), ('points', List[float]), ('occluded', bool), ('outside', bool), ('keyframe', bool), ('attributes', List[InstanceLabelData.Attribute]), ('source', str), ('group', int), ('z_order', int), ('label', str), ('track_id', int)],
)
TrackedShape.__new__.__defaults__ = ('manual', 0, 0, None, 0)
Track = NamedTuple('Track', [('label', str), ('group', int), ('source', str), ('shapes', List[TrackedShape]), ('task_id', int)])
Tag = NamedTuple('Tag', [('frame', int), ('label', str), ('attributes', List[InstanceLabelData.Attribute]), ('source', str), ('group', int), ('task_id', int)])
Tag.__new__.__defaults__ = (0, )
Frame = NamedTuple('Frame', [('task_id', int), ('subset', str), ('idx', int), ('frame', int), ('name', str), ('width', int), ('height', int), ('labeled_shapes', List[Union[LabeledShape, TrackedShape]]), ('tags', List[Tag])])
def __init__(self, annotation_irs: Mapping[str, AnnotationIR], db_project: Project, host: str, create_callback: Callable = None):
self._annotation_irs = annotation_irs
self._db_project = db_project
self._db_tasks: OrderedDict[int, Task] = OrderedDict(
((db_task.id, db_task) for db_task in db_project.tasks.order_by("subset","id").all())
)
self._subsets = set()
self._host = host
self._create_callback = create_callback
self._MAX_ANNO_SIZE = 30000
self._frame_info: Dict[Tuple[int, int], Literal["path", "width", "height", "subset"]] = dict()
self._frame_mapping: Dict[Tuple[str, str], Tuple[str, str]] = dict()
self._frame_steps: Dict[int, int] = {task.id: task.data.get_frame_step() for task in self._db_tasks.values()}
for task in self._db_tasks.values():
self._subsets.add(task.subset)
self._subsets: List[str] = list(self._subsets)
InstanceLabelData.__init__(self, db_project)
self._init_task_frame_offsets()
self._init_frame_info()
self._init_meta()
def abs_frame_id(self, task_id: int, relative_id: int) -> int:
task = self._db_tasks[task_id]
if relative_id not in range(0, task.data.size):
raise ValueError(f"Unknown internal frame id {relative_id}")
return relative_id * task.data.get_frame_step() + task.data.start_frame + self._task_frame_offsets[task_id]
def rel_frame_id(self, task_id: int, absolute_id: int) -> int:
task = self._db_tasks[task_id]
d, m = divmod(
absolute_id - task.data.start_frame, task.data.get_frame_step())
if m or d not in range(0, task.data.size):
raise ValueError(f"Unknown frame {absolute_id}")
return d
def _init_task_frame_offsets(self):
self._task_frame_offsets: Dict[int, int] = dict()
s = 0
subset = None
for task in self._db_tasks.values():
if subset != task.subset:
s = 0
subset = task.subset
self._task_frame_offsets[task.id] = s
s += task.data.start_frame + task.data.get_frame_step() * task.data.size
def _init_frame_info(self):
self._frame_info = dict()
original_names = DefaultDict[Tuple[str, str], int](int)
for task in self._db_tasks.values():
defaulted_subset = get_defaulted_subset(task.subset, self._subsets)
if hasattr(task.data, 'video'):
self._frame_info.update({(task.id, frame): {
"path": "frame_{:06d}".format(self.abs_frame_id(task.id, frame)),
"width": task.data.video.width,
"height": task.data.video.height,
"subset": defaulted_subset,
} for frame in range(task.data.size)})
else:
self._frame_info.update({(task.id, self.rel_frame_id(task.id, db_image.frame)): {
"path": mangle_image_name(db_image.path, defaulted_subset, original_names),
"width": db_image.width,
"height": db_image.height,
"subset": defaulted_subset
} for db_image in task.data.images.all()})
self._frame_mapping = {
(self._db_tasks[frame_ident[0]].subset, self._get_filename(info["path"])): frame_ident
for frame_ident, info in self._frame_info.items()
}
def _init_meta(self):
self._meta = OrderedDict([
('project', OrderedDict([
('id', str(self._db_project.id)),
('name', self._db_project.name),
("bugtracker", self._db_project.bug_tracker),
("created", str(timezone.localtime(self._db_project.created_date))),
("updated", str(timezone.localtime(self._db_project.updated_date))),
("tasks", [
('task',
TaskData.meta_for_task(db_task, self._host)
) for db_task in self._db_tasks.values()
]),
("labels", [
("label", OrderedDict([
("name", db_label.name),
("color", db_label.color),
("attributes", [
("attribute", OrderedDict([
("name", db_attr.name),
("mutable", str(db_attr.mutable)),
("input_type", db_attr.input_type),
("default_value", db_attr.default_value),
("values", db_attr.values)]))
for db_attr in db_label.attributespec_set.all()])
])) for db_label in self._label_mapping.values()
]),
("owner", OrderedDict([
("username", self._db_project.owner.username),
("email", self._db_project.owner.email),
]) if self._db_project.owner else ""),
("assignee", OrderedDict([
("username", self._db_project.assignee.username),
("email", self._db_project.assignee.email),
]) if self._db_project.assignee else ""),
])),
("dumped", str(timezone.localtime(timezone.now())))
])
def _export_tracked_shape(self, shape: dict, task_id: int):
return ProjectData.TrackedShape(
type=shape["type"],
frame=self.abs_frame_id(task_id, shape["frame"]),
label=self._get_label_name(shape["label_id"]),
points=shape["points"],
occluded=shape["occluded"],
z_order=shape.get("z_order", 0),
group=shape.get("group", 0),
outside=shape.get("outside", False),
keyframe=shape.get("keyframe", True),
track_id=shape["track_id"],
source=shape.get("source", "manual"),
attributes=self._export_attributes(shape["attributes"]),
)
def _export_labeled_shape(self, shape: dict, task_id: int):
return ProjectData.LabeledShape(
type=shape["type"],
label=self._get_label_name(shape["label_id"]),
frame=self.abs_frame_id(task_id, shape["frame"]),
points=shape["points"],
occluded=shape["occluded"],
z_order=shape.get("z_order", 0),
group=shape.get("group", 0),
source=shape["source"],
attributes=self._export_attributes(shape["attributes"]),
task_id=task_id,
)
def _export_tag(self, tag: dict, task_id: int):
return ProjectData.Tag(
frame=self.abs_frame_id(task_id, tag["frame"]),
label=self._get_label_name(tag["label_id"]),
group=tag.get("group", 0),
source=tag["source"],
attributes=self._export_attributes(tag["attributes"]),
task_id=task_id
)
def group_by_frame(self, include_empty=False):
frames: Dict[Tuple[str, int], ProjectData.Frame] = {}
def get_frame(task_id: int, idx: int) -> ProjectData.Frame:
frame_info = self._frame_info[(task_id, idx)]
abs_frame = self.abs_frame_id(task_id, idx)
if (frame_info["subset"], abs_frame) not in frames:
frames[(frame_info["subset"], abs_frame)] = ProjectData.Frame(
task_id=task_id,
subset=frame_info["subset"],
idx=idx,
frame=abs_frame,
name=frame_info["path"],
height=frame_info["height"],
width=frame_info["width"],
labeled_shapes=[],
tags=[],
)
return frames[(frame_info["subset"], abs_frame)]
if include_empty:
for ident in self._frame_info:
get_frame(*ident)
for task in self._db_tasks.values():
anno_manager = AnnotationManager(self._annotation_irs[task.id])
for shape in sorted(anno_manager.to_shapes(task.data.size),
key=lambda shape: shape.get("z_order", 0)):
if (task.id, shape['frame']) not in self._frame_info:
continue
if 'track_id' in shape:
if shape['outside']:
continue
exported_shape = self._export_tracked_shape(shape, task.id)
else:
exported_shape = self._export_labeled_shape(shape, task.id)
get_frame(task.id, shape['frame']).labeled_shapes.append(exported_shape)
for tag in self._annotation_irs[task.id].tags:
get_frame(task.id, tag['frame']).tags.append(self._export_tag(tag, task.id))
return iter(frames.values())
@property
def shapes(self):
for task in self._db_tasks.values():
for shape in self._annotation_irs[task.id].shapes:
yield self._export_labeled_shape(shape, task.id)
@property
def tracks(self):
idx = 0
for task in self._db_tasks.values():
for track in self._annotation_irs[task.id].tracks:
tracked_shapes = TrackManager.get_interpolated_shapes(
track, 0, task.data.size
)
for tracked_shape in tracked_shapes:
tracked_shape["attributes"] += track["attributes"]
tracked_shape["track_id"] = idx
tracked_shape["group"] = track["group"]
tracked_shape["source"] = track["source"]
tracked_shape["label_id"] = track["label_id"]
yield ProjectData.Track(
label=self._get_label_name(track["label_id"]),
group=track["group"],
source=track["source"],
shapes=[self._export_tracked_shape(shape, task.id)
for shape in tracked_shapes],
task_id=task.id
)
idx+=1
@property
def tags(self):
for task in self._db_tasks.values():
for tag in self._annotation_irs[task.id].tags:
yield self._export_tag(tag, task.id)
@property
def meta(self):
return self._meta
@property
def data(self):
raise NotImplementedError()
@property
def frame_info(self):
return self._frame_info
@property
def frame_step(self):
return self._frame_steps
@property
def db_project(self):
return self._db_project
@property
def subsets(self) -> List[str]:
return self._subsets
@property
def tasks(self):
return list(self._db_tasks.values())
@property
def task_data(self):
for task_id, task in self._db_tasks.items():
yield TaskData(self._annotation_irs[task_id], task, self._host)
@staticmethod
def _get_filename(path):
return osp.splitext(path)[0]
class CVATDataExtractorMixin:
def __init__(self):
super().__init__()
def categories(self) -> dict:
raise NotImplementedError()
@staticmethod
def _load_categories(labels: list):
categories: Dict[datumaro.AnnotationType, datumaro.Categories] = {}
label_categories = datumaro.LabelCategories(attributes=['occluded'])
for _, label in labels:
label_categories.add(label['name'])
for _, attr in label['attributes']:
label_categories.attributes.add(attr['name'])
categories[datumaro.AnnotationType.label] = label_categories
return categories
def _read_cvat_anno(self, cvat_frame_anno: Union[ProjectData.Frame, TaskData.Frame], labels: list):
categories = self.categories()
label_cat = categories[datumaro.AnnotationType.label]
def map_label(name): return label_cat.find(name)[0]
label_attrs = {
label['name']: label['attributes']
for _, label in labels
}
return convert_cvat_anno_to_dm(cvat_frame_anno, label_attrs, map_label)
class CvatTaskDataExtractor(datumaro.SourceExtractor, CVATDataExtractorMixin):
def __init__(self, task_data, include_images=False, format_type=None, dimension=DimensionType.DIM_2D):
super().__init__()
self._categories, self._user = self._load_categories(task_data, dimension=dimension)
@ -537,12 +879,13 @@ class CvatTaskDataExtractor(datumaro.SourceExtractor):
dm_image = _make_image(frame_data.idx, **image_args)
else:
dm_image = Image(**image_args)
dm_anno = self._read_cvat_anno(frame_data, task_data)
dm_anno = self._read_cvat_anno(frame_data, task_data.meta['task']['labels'])
if dimension == DimensionType.DIM_2D:
dm_item = datumaro.DatasetItem(id=osp.splitext(frame_data.name)[0],
annotations=dm_anno, image=dm_image,
attributes={'frame': frame_data.frame})
annotations=dm_anno, image=dm_image,
attributes={'frame': frame_data.frame
})
elif dimension == DimensionType.DIM_3D:
attributes = {'frame': frame_data.frame}
if format_type == "sly_pointcloud":
@ -564,18 +907,8 @@ class CvatTaskDataExtractor(datumaro.SourceExtractor):
self._items = dm_items
def __iter__(self):
for item in self._items:
yield item
def __len__(self):
return len(self._items)
def categories(self):
return self._categories
@staticmethod
def _load_categories(cvat_anno, dimension):
def _load_categories(cvat_anno, dimension): # pylint: disable=arguments-differ
categories = {}
label_categories = datumaro.LabelCategories(attributes=['occluded'])
@ -595,102 +928,209 @@ class CvatTaskDataExtractor(datumaro.SourceExtractor):
return categories, user_info
def _read_cvat_anno(self, cvat_frame_anno, task_data):
item_anno = []
def _read_cvat_anno(self, cvat_frame_anno: TaskData.Frame, labels: list):
categories = self.categories()
label_cat = categories[datumaro.AnnotationType.label]
def map_label(name): return label_cat.find(name)[0]
label_attrs = {
label['name']: label['attributes']
for _, label in task_data.meta['task']['labels']
for _, label in labels
}
def convert_attrs(label, cvat_attrs):
cvat_attrs = {a.name: a.value for a in cvat_attrs}
dm_attr = dict()
for _, a_desc in label_attrs[label]:
a_name = a_desc['name']
a_value = cvat_attrs.get(a_name, a_desc['default_value'])
try:
if a_desc['input_type'] == AttributeType.NUMBER:
a_value = float(a_value)
elif a_desc['input_type'] == AttributeType.CHECKBOX:
a_value = (a_value.lower() == 'true')
dm_attr[a_name] = a_value
except Exception as e:
raise Exception(
"Failed to convert attribute '%s'='%s': %s" %
(a_name, a_value, e))
if self._format_type == "sly_pointcloud" and (a_desc.get('input_type') == 'select' or a_desc.get('input_type') == 'radio'):
dm_attr[f"{a_name}__values"] = a_desc["values"]
return dm_attr
for tag_obj in cvat_frame_anno.tags:
anno_group = tag_obj.group or 0
anno_label = map_label(tag_obj.label)
anno_attr = convert_attrs(tag_obj.label, tag_obj.attributes)
anno = datumaro.Label(label=anno_label,
attributes=anno_attr, group=anno_group)
item_anno.append(anno)
shapes = []
for shape in cvat_frame_anno.shapes:
shapes.append({"id": shape.id, "label_id": shape.label_id})
return convert_cvat_anno_to_dm(cvat_frame_anno, label_attrs, map_label, self._format_type, self._dimension)
for index, shape_obj in enumerate(cvat_frame_anno.labeled_shapes):
anno_group = shape_obj.group or 0
anno_label = map_label(shape_obj.label)
anno_attr = convert_attrs(shape_obj.label, shape_obj.attributes)
anno_attr['occluded'] = shape_obj.occluded
if hasattr(shape_obj, 'track_id'):
anno_attr['track_id'] = shape_obj.track_id
anno_attr['keyframe'] = shape_obj.keyframe
anno_points = shape_obj.points
if shape_obj.type == ShapeType.POINTS:
anno = datumaro.Points(anno_points,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.POLYLINE:
anno = datumaro.PolyLine(anno_points,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.POLYGON:
anno = datumaro.Polygon(anno_points,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.RECTANGLE:
x0, y0, x1, y1 = anno_points
anno = datumaro.Bbox(x0, y0, x1 - x0, y1 - y0,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.CUBOID:
if self._dimension == DimensionType.DIM_3D:
if self._format_type == "sly_pointcloud":
anno_id = shapes[index]["id"]
anno_attr["label_id"] = shapes[index]["label_id"]
else:
anno_id = index
position, rotation, scale = anno_points[0:3], anno_points[3:6], anno_points[6:9]
anno = datumaro.Cuboid3d(id=anno_id, position=position, rotation=rotation, scale=scale,
label=anno_label, attributes=anno_attr, group=anno_group
)
class CVATProjectDataExtractor(datumaro.Extractor, CVATDataExtractorMixin):
def __init__(self, project_data: ProjectData, include_images: bool = False):
super().__init__()
self._categories = self._load_categories(project_data.meta['project']['labels'])
dm_items: List[datumaro.DatasetItem] = []
ext_per_task: Dict[int, str] = {}
image_maker_per_task: Dict[int, Callable] = {}
for task in project_data.tasks:
is_video = task.mode == 'interpolation'
ext_per_task[task.id] = FrameProvider.VIDEO_FRAME_EXT if is_video else ''
if include_images:
frame_provider = FrameProvider(task.data)
if is_video:
# optimization for videos: use numpy arrays instead of bytes
# some formats or transforms can require image data
def image_maker_factory(frame_provider):
def _make_image(i, **kwargs):
loader = lambda _: frame_provider.get_frame(i,
quality=frame_provider.Quality.ORIGINAL,
out_type=frame_provider.Type.NUMPY_ARRAY)[0]
return Image(loader=loader, **kwargs)
return _make_image
else:
continue
# for images use encoded data to avoid recoding
def image_maker_factory(frame_provider):
def _make_image(i, **kwargs):
loader = lambda _: frame_provider.get_frame(i,
quality=frame_provider.Quality.ORIGINAL,
out_type=frame_provider.Type.BUFFER)[0].getvalue()
return ByteImage(data=loader, **kwargs)
return _make_image
image_maker_per_task[task.id] = image_maker_factory(frame_provider)
for frame_data in project_data.group_by_frame(include_empty=True):
image_args = {
'path': frame_data.name + ext_per_task[frame_data.task_id],
'size': (frame_data.height, frame_data.width),
}
if include_images:
dm_image = image_maker_per_task[frame_data.task_id](frame_data.idx, **image_args)
else:
raise Exception("Unknown shape type '%s'" % shape_obj.type)
dm_image = Image(**image_args)
dm_anno = self._read_cvat_anno(frame_data, project_data.meta['project']['labels'])
dm_item = datumaro.DatasetItem(id=osp.splitext(frame_data.name)[0],
annotations=dm_anno, image=dm_image,
subset=frame_data.subset,
attributes={'frame': frame_data.frame}
)
dm_items.append(dm_item)
item_anno.append(anno)
self._items = dm_items
return item_anno
def categories(self):
return self._categories
def __iter__(self):
yield from self._items
def __len__(self):
return len(self._items)
def GetCVATDataExtractor(instance_data: Union[ProjectData, TaskData], include_images: bool=False):
if isinstance(instance_data, ProjectData):
return CVATProjectDataExtractor(instance_data, include_images)
else:
return CvatTaskDataExtractor(instance_data, include_images)
class CvatImportError(Exception):
pass
def mangle_image_name(name: str, subset: str, names: DefaultDict[Tuple[str, str], int]) -> str:
name, ext = name.rsplit(osp.extsep, maxsplit=1)
if not names[(subset, name)]:
names[(subset, name)] += 1
return osp.extsep.join([name, ext])
else:
image_name = f"{name}_{names[(subset, name)]}"
if not names[(subset, image_name)]:
names[(subset, name)] += 1
return osp.extsep.join([image_name, ext])
else:
i = 1
while i < sys.maxsize:
new_image_name = f"{image_name}_{i}"
if not names[(subset, new_image_name)]:
names[(subset, name)] += 1
return osp.extsep.join([new_image_name, ext])
i += 1
raise Exception('Cannot mangle image name')
def get_defaulted_subset(subset: str, subsets: List[str]) -> str:
if subset:
return subset
else:
if datumaro.DEFAULT_SUBSET_NAME not in subsets:
return datumaro.DEFAULT_SUBSET_NAME
else:
i = 1
while i < sys.maxsize:
if f'{datumaro.DEFAULT_SUBSET_NAME}_{i}' not in subsets:
return f'{datumaro.DEFAULT_SUBSET_NAME}_{i}'
i += 1
raise Exception('Cannot find default name for subset')
def convert_cvat_anno_to_dm(cvat_frame_anno, label_attrs, map_label, format_name=None, dimension=DimensionType.DIM_2D):
item_anno = []
def convert_attrs(label, cvat_attrs):
cvat_attrs = {a.name: a.value for a in cvat_attrs}
dm_attr = dict()
for _, a_desc in label_attrs[label]:
a_name = a_desc['name']
a_value = cvat_attrs.get(a_name, a_desc['default_value'])
try:
if a_desc['input_type'] == AttributeType.NUMBER:
a_value = float(a_value)
elif a_desc['input_type'] == AttributeType.CHECKBOX:
a_value = (a_value.lower() == 'true')
dm_attr[a_name] = a_value
except Exception as e:
raise Exception(
"Failed to convert attribute '%s'='%s': %s" %
(a_name, a_value, e))
return dm_attr
for tag_obj in cvat_frame_anno.tags:
anno_group = tag_obj.group or 0
anno_label = map_label(tag_obj.label)
anno_attr = convert_attrs(tag_obj.label, tag_obj.attributes)
anno = datumaro.Label(label=anno_label,
attributes=anno_attr, group=anno_group)
item_anno.append(anno)
shapes = []
if hasattr(cvat_frame_anno, 'shapes'):
for shape in cvat_frame_anno.shapes:
shapes.append({"id": shape.id, "label_id": shape.label_id})
for index, shape_obj in enumerate(cvat_frame_anno.labeled_shapes):
anno_group = shape_obj.group or 0
anno_label = map_label(shape_obj.label)
anno_attr = convert_attrs(shape_obj.label, shape_obj.attributes)
anno_attr['occluded'] = shape_obj.occluded
if hasattr(shape_obj, 'track_id'):
anno_attr['track_id'] = shape_obj.track_id
anno_attr['keyframe'] = shape_obj.keyframe
anno_points = shape_obj.points
if shape_obj.type == ShapeType.POINTS:
anno = datumaro.Points(anno_points,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.POLYLINE:
anno = datumaro.PolyLine(anno_points,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.POLYGON:
anno = datumaro.Polygon(anno_points,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.RECTANGLE:
x0, y0, x1, y1 = anno_points
anno = datumaro.Bbox(x0, y0, x1 - x0, y1 - y0,
label=anno_label, attributes=anno_attr, group=anno_group,
z_order=shape_obj.z_order)
elif shape_obj.type == ShapeType.CUBOID:
if dimension == DimensionType.DIM_3D:
if format_name == "sly_pointcloud":
anno_id = shapes[index]["id"]
else:
anno_id = index
position, rotation, scale = anno_points[0:3], anno_points[3:6], anno_points[6:9]
anno = datumaro.Cuboid3d(id=anno_id, position=position, rotation=rotation, scale=scale,
label=anno_label, attributes=anno_attr, group=anno_group
)
else:
continue
else:
raise Exception("Unknown shape type '%s'" % shape_obj.type)
item_anno.append(anno)
return item_anno
def match_dm_item(item, task_data, root_hint=None):
is_video = task_data.meta['task']['mode'] == 'interpolation'

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
@ -16,13 +16,13 @@ from .utils import make_colormap
@exporter(name='CamVid', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
dataset.transform('polygons_to_masks')
dataset.transform('boxes_to_masks')
dataset.transform('merge_instance_segments')
label_map = make_colormap(task_data)
label_map = make_colormap(instance_data)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'camvid',
save_images=save_images, apply_colormap=True,
@ -31,10 +31,10 @@ def _export(dst_file, task_data, save_images=False):
make_zip_archive(temp_dir, dst_file)
@importer(name='CamVid', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'camvid', env=dm_env)
dataset.transform('masks_to_polygons')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, \
from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor, \
import_dm_annotations
from cvat.apps.dataset_manager.util import make_zip_archive
@ -15,9 +15,9 @@ from .registry import dm_env, exporter, importer
@exporter(name='COCO', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'coco_instances', save_images=save_images,
merge_images=True)
@ -25,14 +25,14 @@ def _export(dst_file, task_data, save_images=False):
make_zip_archive(temp_dir, dst_file)
@importer(name='COCO', ext='JSON, ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
if zipfile.is_zipfile(src_file):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'coco', env=dm_env)
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)
else:
dataset = Dataset.import_from(src_file.name,
'coco_instances', env=dm_env)
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -2,8 +2,10 @@
#
# SPDX-License-Identifier: MIT
from io import BufferedWriter
import os
import os.path as osp
from typing import Callable
import zipfile
from collections import OrderedDict
from glob import glob
@ -11,7 +13,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.extractor import DatasetItem
from cvat.apps.dataset_manager.bindings import match_dm_item
from cvat.apps.dataset_manager.bindings import TaskData, match_dm_item, ProjectData, get_defaulted_subset
from cvat.apps.dataset_manager.util import make_zip_archive
from cvat.apps.engine.frame_provider import FrameProvider
@ -42,8 +44,10 @@ def create_xml_dumper(file_object):
self.xmlgen.characters(self.version)
self.xmlgen.endElement("version")
def open_root(self):
def open_document(self):
self.xmlgen.startDocument()
def open_root(self):
self.xmlgen.startElement("annotations", {})
self._level += 1
self._add_version()
@ -168,23 +172,34 @@ def create_xml_dumper(file_object):
self._level -= 1
self._indent()
self.xmlgen.endElement("annotations")
self._indent()
def close_document(self):
self.xmlgen.endDocument()
return XmlAnnotationWriter(file_object)
def dump_as_cvat_annotation(file_object, annotations):
dumper = create_xml_dumper(file_object)
def dump_as_cvat_annotation(dumper, annotations):
dumper.open_root()
dumper.add_meta(annotations.meta)
for frame_annotation in annotations.group_by_frame(include_empty=True):
frame_id = frame_annotation.frame
dumper.open_image(OrderedDict([
image_attrs = OrderedDict([
("id", str(frame_id)),
("name", frame_annotation.name),
])
if isinstance(annotations, ProjectData):
image_attrs.update(OrderedDict([
("subset", frame_annotation.subset),
("task_id", str(frame_annotation.task_id)),
]))
image_attrs.update(OrderedDict([
("width", str(frame_annotation.width)),
("height", str(frame_annotation.height))
]))
dumper.open_image(image_attrs)
for shape in frame_annotation.labeled_shapes:
dump_data = OrderedDict([
@ -286,8 +301,7 @@ def dump_as_cvat_annotation(file_object, annotations):
dumper.close_image()
dumper.close_root()
def dump_as_cvat_interpolation(file_object, annotations):
dumper = create_xml_dumper(file_object)
def dump_as_cvat_interpolation(dumper, annotations):
dumper.open_root()
dumper.add_meta(annotations.meta)
def dump_track(idx, track):
@ -298,6 +312,13 @@ def dump_as_cvat_interpolation(file_object, annotations):
("source", track.source),
])
if hasattr(track, 'task_id'):
task, = filter(lambda task: task.id == track.task_id, annotations.tasks)
dump_data.update(OrderedDict([
('task_id', str(track.task_id)),
('subset', get_defaulted_subset(task.subset, annotations.subsets)),
]))
if track.group:
dump_data['group_id'] = str(track.group)
dumper.open_track(dump_data)
@ -383,11 +404,17 @@ def dump_as_cvat_interpolation(file_object, annotations):
counter += 1
for shape in annotations.shapes:
dump_track(counter, annotations.Track(
label=shape.label,
group=shape.group,
source=shape.source,
shapes=[annotations.TrackedShape(
frame_step = annotations.frame_step if isinstance(annotations, TaskData) else annotations.frame_step[shape.task_id]
if isinstance(annotations, TaskData):
stop_frame = int(annotations.meta['task']['stop_frame'])
else:
task_meta = list(filter(lambda task: int(task[1]['id']) == shape.task_id, annotations.meta['project']['tasks']))[0][1]
stop_frame = int(task_meta['stop_frame'])
track = {
'label': shape.label,
'group': shape.group,
'source': shape.source,
'shapes': [annotations.TrackedShape(
type=shape.type,
points=shape.points,
occluded=shape.occluded,
@ -405,13 +432,15 @@ def dump_as_cvat_interpolation(file_object, annotations):
outside=True,
keyframe=True,
z_order=shape.z_order,
frame=shape.frame + annotations.frame_step,
frame=shape.frame + frame_step,
attributes=shape.attributes,
)] if shape.frame + annotations.frame_step < \
int(annotations.meta['task']['stop_frame']) \
)] if shape.frame + frame_step < \
stop_frame \
else []
),
))
}
if isinstance(annotations, ProjectData): track['task_id'] = shape.task_id
dump_track(counter, annotations.Track(**track))
counter += 1
dumper.close_root()
@ -527,39 +556,76 @@ def load(file_object, annotations):
tag = None
el.clear()
def _export(dst_file, task_data, anno_callback, save_images=False):
def dump_task_anno(dst_file, task_data, callback):
dumper = create_xml_dumper(dst_file)
dumper.open_document()
callback(dumper, task_data)
dumper.close_document()
def dump_project_anno(dst_file: BufferedWriter, project_data: ProjectData, callback: Callable):
dumper = create_xml_dumper(dst_file)
dumper.open_document()
callback(dumper, project_data)
dumper.close_document()
def dump_media_files(task_data: TaskData, img_dir: str, project_data: ProjectData = None):
ext = ''
if task_data.meta['task']['mode'] == 'interpolation':
ext = FrameProvider.VIDEO_FRAME_EXT
frame_provider = FrameProvider(task_data.db_task.data)
frames = frame_provider.get_frames(
frame_provider.Quality.ORIGINAL,
frame_provider.Type.BUFFER)
for frame_id, (frame_data, _) in enumerate(frames):
frame_name = task_data.frame_info[frame_id]['path'] if project_data is None \
else project_data.frame_info[(task_data.db_task.id, frame_id)]['path']
img_path = osp.join(img_dir, frame_name + ext)
os.makedirs(osp.dirname(img_path), exist_ok=True)
with open(img_path, 'wb') as f:
f.write(frame_data.getvalue())
def _export_task(dst_file, task_data, anno_callback, save_images=False):
with TemporaryDirectory() as temp_dir:
with open(osp.join(temp_dir, 'annotations.xml'), 'wb') as f:
anno_callback(f, task_data)
dump_task_anno(f, task_data, anno_callback)
if save_images:
ext = ''
if task_data.meta['task']['mode'] == 'interpolation':
ext = FrameProvider.VIDEO_FRAME_EXT
img_dir = osp.join(temp_dir, 'images')
frame_provider = FrameProvider(task_data.db_task.data)
frames = frame_provider.get_frames(
frame_provider.Quality.ORIGINAL,
frame_provider.Type.BUFFER)
for frame_id, (frame_data, _) in enumerate(frames):
frame_name = task_data.frame_info[frame_id]['path']
img_path = osp.join(img_dir, frame_name + ext)
os.makedirs(osp.dirname(img_path), exist_ok=True)
with open(img_path, 'wb') as f:
f.write(frame_data.getvalue())
dump_media_files(task_data, osp.join(temp_dir, 'images'))
make_zip_archive(temp_dir, dst_file)
def _export_project(dst_file: str, project_data: ProjectData, anno_callback: Callable, save_images: bool=False):
with TemporaryDirectory() as temp_dir:
with open(osp.join(temp_dir, 'annotations.xml'), 'wb') as f:
dump_project_anno(f, project_data, anno_callback)
if save_images:
for task_data in project_data.task_data:
subset = get_defaulted_subset(task_data.db_task.subset, project_data.subsets)
subset_dir = osp.join(temp_dir, 'images', subset)
os.makedirs(subset_dir, exist_ok=True)
dump_media_files(task_data, subset_dir, project_data)
make_zip_archive(temp_dir, dst_file)
@exporter(name='CVAT for video', ext='ZIP', version='1.1')
def _export_video(dst_file, task_data, save_images=False):
_export(dst_file, task_data,
anno_callback=dump_as_cvat_interpolation, save_images=save_images)
def _export_video(dst_file, instance_data, save_images=False):
if isinstance(instance_data, ProjectData):
_export_project(dst_file, instance_data,
anno_callback=dump_as_cvat_interpolation, save_images=save_images)
else:
_export_task(dst_file, instance_data,
anno_callback=dump_as_cvat_interpolation, save_images=save_images)
@exporter(name='CVAT for images', ext='ZIP', version='1.1')
def _export_images(dst_file, task_data, save_images=False):
_export(dst_file, task_data,
anno_callback=dump_as_cvat_annotation, save_images=save_images)
def _export_images(dst_file, instance_data, save_images=False):
if isinstance(instance_data, ProjectData):
_export_project(dst_file, instance_data,
anno_callback=dump_as_cvat_annotation, save_images=save_images)
else:
_export_task(dst_file, instance_data,
anno_callback=dump_as_cvat_annotation, save_images=save_images)
@importer(name='CVAT', ext='XML, ZIP', version='1.1')
def _import(src_file, task_data):

@ -8,8 +8,8 @@ import os.path as osp
import shutil
from tempfile import TemporaryDirectory
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations, ProjectData)
from cvat.apps.dataset_manager.util import make_zip_archive
from cvat.settings.base import BASE_DIR
from datumaro.components.project import Project
@ -23,23 +23,28 @@ class DatumaroProjectExporter:
_TEMPLATES_DIR = osp.join(osp.dirname(__file__), 'export_templates')
@staticmethod
def _save_image_info(save_dir, task_data):
def _save_image_info(save_dir, instance_data):
os.makedirs(save_dir, exist_ok=True)
config = {
'server_url': task_data._host or 'localhost',
'task_id': task_data.db_task.id,
'server_url': instance_data._host or 'localhost'
}
if isinstance(instance_data, ProjectData):
config['project_id'] = instance_data.db_project.id
else:
config['task_id'] = instance_data.db_task.id
images = []
images_meta = { 'images': images, }
for frame_id, frame in task_data.frame_info.items():
images.append({
for frame_id, frame in enumerate(instance_data.frame_info.values()):
image_info = {
'id': frame_id,
'name': osp.basename(frame['path']),
'width': frame['width'],
'height': frame['height'],
})
}
if isinstance(instance_data, ProjectData):
image_info['subset'] = frame['subset']
with open(osp.join(save_dir, 'config.json'),
'w', encoding='utf-8') as config_file:
@ -48,11 +53,12 @@ class DatumaroProjectExporter:
'w', encoding='utf-8') as images_file:
json.dump(images_meta, images_file)
def _export(self, task_data, save_dir, save_images=False):
dataset = CvatTaskDataExtractor(task_data, include_images=save_images)
def _export(self, instance_data, save_dir, save_images=False):
dataset = GetCVATDataExtractor(instance_data, include_images=save_images)
db_instance = instance_data.db_project if isinstance(instance_data, ProjectData) else instance_data.db_task
dm_env.converters.get('datumaro_project').convert(dataset,
save_dir=save_dir, save_images=save_images,
project_config={ 'project_name': task_data.db_task.name, }
project_config={ 'project_name': db_instance.name, }
)
project = Project.load(save_dir)
@ -64,13 +70,16 @@ class DatumaroProjectExporter:
if not save_images:
# add remote links to images
source_name = 'task_%s_images' % task_data.db_task.id
source_name = '{}_{}_images'.format(
'project' if isinstance(instance_data, ProjectData) else 'task',
db_instance.id,
)
project.add_source(source_name, {
'format': self._REMOTE_IMAGES_EXTRACTOR,
})
self._save_image_info(
osp.join(save_dir, project.local_source_dir(source_name)),
task_data)
instance_data)
project.save()
templates_dir = osp.join(self._TEMPLATES_DIR, 'plugins')
@ -87,7 +96,7 @@ class DatumaroProjectExporter:
shutil.copytree(osp.join(BASE_DIR, 'utils', 'cli'),
osp.join(cvat_utils_dst_dir, 'cli'))
def __call__(self, dst_file, task_data, save_images=False):
def __call__(self, dst_file, instance_data, save_images=False):
with TemporaryDirectory() as temp_dir:
self._export(task_data, save_dir=temp_dir, save_images=save_images)
self._export(instance_data, save_dir=temp_dir, save_images=save_images)
make_zip_archive(temp_dir, dst_file)

@ -9,7 +9,7 @@ from datumaro.components.dataset import Dataset
from datumaro.components.extractor import (AnnotationType, Caption, Label,
LabelCategories, ItemTransform)
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
@ -75,45 +75,45 @@ class LabelToCaption(ItemTransform):
return item.wrap(annotations=annotations)
@exporter(name='ICDAR Recognition', ext='ZIP', version='1.0')
def _export_recognition(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export_recognition(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
dataset.transform(LabelToCaption)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'icdar_word_recognition', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='ICDAR Recognition', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'icdar_word_recognition', env=dm_env)
dataset.transform(CaptionToLabel, 'icdar')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)
@exporter(name='ICDAR Localization', ext='ZIP', version='1.0')
def _export_localization(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export_localization(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'icdar_text_localization', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='ICDAR Localization', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'icdar_text_localization', env=dm_env)
dataset.transform(AddLabelToAnns, 'icdar')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)
@exporter(name='ICDAR Segmentation', ext='ZIP', version='1.0')
def _export_segmentation(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export_segmentation(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.transform('polygons_to_masks')
dataset.transform('boxes_to_masks')
@ -122,10 +122,10 @@ def _export_segmentation(dst_file, task_data, save_images=False):
make_zip_archive(temp_dir, dst_file)
@importer(name='ICDAR Segmentation', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'icdar_text_segmentation', env=dm_env)
dataset.transform(AddLabelToAnns, 'icdar')
dataset.transform('masks_to_polygons')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -9,7 +9,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, \
from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor, \
import_dm_annotations
from cvat.apps.dataset_manager.util import make_zip_archive
@ -17,9 +17,9 @@ from .registry import dm_env, exporter, importer
@exporter(name='ImageNet', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
if save_images:
dataset.export(temp_dir, 'imagenet', save_images=save_images)
@ -29,11 +29,11 @@ def _export(dst_file, task_data, save_images=False):
make_zip_archive(temp_dir, dst_file)
@importer(name='ImageNet', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
if glob(osp.join(tmp_dir, '*.txt')):
dataset = Dataset.import_from(tmp_dir, 'imagenet_txt', env=dm_env)
else:
dataset = Dataset.import_from(tmp_dir, 'imagenet', env=dm_env)
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
@ -15,19 +15,19 @@ from .registry import dm_env, exporter, importer
@exporter(name='LabelMe', ext='ZIP', version='3.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'label_me', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='LabelMe', ext='ZIP', version='3.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'label_me', env=dm_env)
dataset.transform('masks_to_polygons')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -9,7 +9,7 @@ from datumaro.components.dataset import Dataset
from datumaro.components.extractor import (AnnotationType, Label,
LabelCategories, ItemTransform)
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
@ -60,19 +60,19 @@ class LabelAttrToAttr(ItemTransform):
@exporter(name='Market-1501', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.transform(LabelAttrToAttr, 'market-1501')
dataset.export(temp_dir, 'market1501', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='Market-1501', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'market1501', env=dm_env)
dataset.transform(AttrToLabelAttr, 'market-1501')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
@ -16,23 +16,23 @@ from .utils import make_colormap
@exporter(name='Segmentation mask', ext='ZIP', version='1.1')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
dataset.transform('polygons_to_masks')
dataset.transform('boxes_to_masks')
dataset.transform('merge_instance_segments')
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'voc_segmentation', save_images=save_images,
apply_colormap=True, label_map=make_colormap(task_data))
apply_colormap=True, label_map=make_colormap(instance_data))
make_zip_archive(temp_dir, dst_file)
@importer(name='Segmentation mask', ext='ZIP', version='1.1')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'voc', env=dm_env)
dataset.transform('masks_to_polygons')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -8,16 +8,16 @@ import datumaro.components.extractor as datumaro
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor
from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor
from cvat.apps.dataset_manager.util import make_zip_archive
from .registry import dm_env, exporter, importer
@exporter(name='MOT', ext='ZIP', version='1.1')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'mot_seq_gt', save_images=save_images)

@ -8,7 +8,7 @@ from datumaro.components.dataset import Dataset
from datumaro.components.extractor import AnnotationType, ItemTransform
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
find_dataset_root, match_dm_item)
from cvat.apps.dataset_manager.util import make_zip_archive
@ -21,9 +21,9 @@ class KeepTracks(ItemTransform):
if 'track_id' in a.attributes])
@exporter(name='MOTS PNG', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
dataset.transform(KeepTracks) # can only export tracks
dataset.transform('polygons_to_masks')
dataset.transform('boxes_to_masks')

@ -11,17 +11,17 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
ProjectData, import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
from .registry import dm_env, exporter, importer
@exporter(name='PASCAL VOC', ext='ZIP', version='1.1')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'voc', save_images=save_images,
label_map='source')
@ -29,15 +29,16 @@ def _export(dst_file, task_data, save_images=False):
make_zip_archive(temp_dir, dst_file)
@importer(name='PASCAL VOC', ext='ZIP', version='1.1')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
# put label map from the task if not present
labelmap_file = osp.join(tmp_dir, 'labelmap.txt')
if not osp.isfile(labelmap_file):
labels = (label['name'] + ':::'
for _, label in task_data.meta['task']['labels'])
labels_meta = instance_data.meta['project']['labels'] \
if isinstance(instance_data, ProjectData) else instance_data.meta['task']['labels']
labels = (label['name'] + ':::' for _, label in labels_meta)
with open(labelmap_file, 'w') as f:
f.write('\n'.join(labels))
@ -57,4 +58,4 @@ def _import(src_file, task_data):
dataset = Dataset.import_from(tmp_dir, 'voc', env=dm_env)
dataset.transform('masks_to_polygons')
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor, TaskData,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
from cvat.apps.engine.models import DimensionType
@ -18,6 +18,9 @@ from .registry import dm_env, exporter, importer
@exporter(name='Sly Point Cloud Format', ext='ZIP', version='1.0', dimension=DimensionType.DIM_3D)
def _export_images(dst_file, task_data, save_images=False):
if not isinstance(task_data, TaskData):
raise Exception("Export to \"Sly Point Cloud\" format is working only with tasks temporarily")
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images, format_type='sly_pointcloud', dimension=DimensionType.DIM_3D), env=dm_env)

@ -6,7 +6,7 @@ from tempfile import TemporaryDirectory
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
from datumaro.components.project import Dataset
@ -23,18 +23,18 @@ except ImportError:
@exporter(name='TFRecord', ext='ZIP', version='1.0', enabled=tf_available)
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'tf_detection_api', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='TFRecord', ext='ZIP', version='1.0', enabled=tf_available)
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'tf_detection_api', env=dm_env)
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -48,8 +48,9 @@ def rgb2hex(color):
def hex2rgb(color):
return tuple(int(color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))
def make_colormap(task_data):
labels = [label for _, label in task_data.meta['task']['labels']]
def make_colormap(instance_data):
instance_name = 'project' if 'project' in instance_data.meta.keys() else 'task'
labels = [label for _, label in instance_data.meta[instance_name]['labels']]
label_names = [label['name'] for label in labels]
if 'background' not in label_names:

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, \
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, TaskData, \
import_dm_annotations
from .registry import dm_env
@ -20,6 +20,9 @@ from .registry import exporter, importer
@exporter(name='Kitti Raw Format', ext='ZIP', version='1.0', dimension=DimensionType.DIM_3D)
def _export_images(dst_file, task_data, save_images=False):
if not isinstance(task_data, TaskData):
raise Exception("Export to \"Kitti raw\" format is working only with tasks temporarily")
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images, format_type="kitti_raw", dimension=DimensionType.DIM_3D), env=dm_env)

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, \
from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor, \
import_dm_annotations
from cvat.apps.dataset_manager.util import make_zip_archive
@ -15,19 +15,19 @@ from .registry import dm_env, exporter, importer
@exporter(name='VGGFace2', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'vgg_face2', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='VGGFace2', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'vgg_face2', env=dm_env)
dataset.transform('rename', r"|([^/]+/)?(.+)|\2|")
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -7,7 +7,7 @@ from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, \
from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor, \
import_dm_annotations
from cvat.apps.dataset_manager.util import make_zip_archive
@ -15,18 +15,18 @@ from .registry import dm_env, exporter, importer
@exporter(name='WiderFace', ext='ZIP', version='1.0')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'wider_face', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
@importer(name='WiderFace', ext='ZIP', version='1.0')
def _import(src_file, task_data):
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'wider_face', env=dm_env)
import_dm_annotations(dataset, task_data)
import_dm_annotations(dataset, instance_data)

@ -8,7 +8,7 @@ from tempfile import TemporaryDirectory
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (CvatTaskDataExtractor,
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations, match_dm_item, find_dataset_root)
from cvat.apps.dataset_manager.util import make_zip_archive
from datumaro.components.extractor import DatasetItem
@ -19,9 +19,9 @@ from .registry import dm_env, exporter, importer
@exporter(name='YOLO', ext='ZIP', version='1.1')
def _export(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(CvatTaskDataExtractor(
task_data, include_images=save_images), env=dm_env)
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'yolo', save_images=save_images)

@ -0,0 +1,71 @@
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from typing import Callable
from django.db import transaction
from cvat.apps.engine import models
from cvat.apps.dataset_manager.task import TaskAnnotation
from .annotation import AnnotationIR
from .bindings import ProjectData
from .formats.registry import make_exporter
def export_project(project_id, dst_file, format_name,
server_url=None, save_images=False):
# For big tasks dump function may run for a long time and
# we dont need to acquire lock after the task has been initialized from DB.
# But there is the bug with corrupted dump file in case 2 or
# more dump request received at the same time:
# https://github.com/opencv/cvat/issues/217
with transaction.atomic():
project = ProjectAnnotation(project_id)
project.init_from_db()
exporter = make_exporter(format_name)
with open(dst_file, 'wb') as f:
project.export(f, exporter, host=server_url, save_images=save_images)
class ProjectAnnotation:
def __init__(self, pk: int):
self.db_project = models.Project.objects.get(id=pk)
self.db_tasks = models.Task.objects.filter(project__id=pk).order_by('id')
self.annotation_irs: dict[int, AnnotationIR] = dict()
def reset(self):
for annotation_ir in self.annotation_irs.values():
annotation_ir.reset()
def put(self, data):
raise NotImplementedError()
def create(self, data):
raise NotImplementedError()
def update(self, data):
raise NotImplementedError()
def delete(self, data=None):
raise NotImplementedError()
def init_from_db(self):
self.reset()
for task in self.db_tasks:
annotation = TaskAnnotation(pk=task.id)
annotation.init_from_db()
self.annotation_irs[task.id] = annotation.ir_data
def export(self, dst_file: str, exporter: Callable, host: str='', **options):
project_data = ProjectData(
annotation_irs=self.annotation_irs,
db_project=self.db_project,
host=host
)
exporter(dst_file, project_data, **options)
@property
def data(self) -> dict:
raise NotImplementedError()

@ -0,0 +1,55 @@
{
"main": {
"name": "Main project",
"owner_id": 1,
"assignee_id": 2,
"labels": [
{
"name": "car",
"color": "#2080c0",
"attributes": [
{
"name": "select_name",
"mutable": false,
"input_type": "select",
"default_value": "bmw",
"values": ["bmw", "mazda", "renault"]
},
{
"name": "radio_name",
"mutable": false,
"input_type": "radio",
"default_value": "x1",
"values": ["x1", "x2", "x3"]
},
{
"name": "check_name",
"mutable": true,
"input_type": "checkbox",
"default_value": "false",
"values": ["false"]
},
{
"name": "text_name",
"mutable": false,
"input_type": "text",
"default_value": "qwerty",
"values": ["qwerty"]
},
{
"name": "number_name",
"mutable": false,
"input_type": "number",
"default_value": "-4",
"values": ["-4", "4", "1"]
}
]
},
{
"name": "person",
"color": "#c06060",
"attributes": []
}
]
}
}

@ -282,17 +282,20 @@
}
]
},
"many jobs": {
"name": "many jobs",
"task in project #1": {
"name": "First task in project",
"project_id": 1,
"overlap": 0,
"segment_size": 5,
"segment_size": 100,
"owner_id": 1,
"labels": [
{
"name": "car",
"color": "#2080c0",
"attributes": []
}
]
"assignee_id": 2
},
"task in project #2": {
"name": "Second task in project",
"project_id": 1,
"overlap": 0,
"segment_size": 100,
"owner_id": 1,
"assignee_id": 2
}
}

@ -7,6 +7,7 @@ import json
import os.path as osp
import os
import av
from django.http import response
import numpy as np
import random
import xml.etree.ElementTree as ET
@ -26,6 +27,10 @@ from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, TaskData
from cvat.apps.dataset_manager.task import TaskAnnotation
from cvat.apps.engine.models import Task
projects_path = osp.join(osp.dirname(__file__), 'assets', 'projects.json')
with open(projects_path) as file:
projects = json.load(file)
tasks_path = osp.join(osp.dirname(__file__), 'assets', 'tasks.json')
with open(tasks_path) as file:
tasks = json.load(file)
@ -133,8 +138,8 @@ class _DbTestBase(APITestCase):
return response
@staticmethod
def _generate_task_images(count): # pylint: disable=no-self-use
images = {"client_files[%d]" % i: generate_image_file("image_%d.jpg" % i) for i in range(count)}
def _generate_task_images(count, name_offsets = 0): # pylint: disable=no-self-use
images = {"client_files[%d]" % i: generate_image_file("image_%d.jpg" % (i + name_offsets)) for i in range(count)}
images["image_quality"] = 75
return images
@ -159,6 +164,14 @@ class _DbTestBase(APITestCase):
return task
def _create_project(self, data):
with ForceLogin(self.user, self.client):
response = self.client.post('/api/v1/projects', data=data, format="json")
assert response.status_code == status.HTTP_201_CREATED, response.status_code
project = response.data
return project
def _get_jobs(self, task_id):
with ForceLogin(self.admin, self.client):
response = self.client.get("/api/v1/tasks/{}/jobs".format(task_id))
@ -297,14 +310,25 @@ class _DbTestBase(APITestCase):
def _generate_url_upload_job_annotations(self, job_id, upload_format_name):
return f"/api/v1/jobs/{job_id}/annotations?format={upload_format_name}"
def _generate_url_dump_dataset(self, task_id):
def _generate_url_dump_task_dataset(self, task_id):
return f"/api/v1/tasks/{task_id}/dataset"
def _generate_url_dump_project_annotations(self, project_id, format_name):
return f"/api/v1/projects/{project_id}/annotations?format={format_name}"
def _generate_url_dump_project_dataset(self, project_id, format_name):
return f"/api/v1/projects/{project_id}/dataset?format={format_name}"
def _remove_annotations(self, url, user):
response = self._delete_request(url, user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
return response
def _delete_project(self, project_id, user):
response = self._delete_request(f'/api/v1/projects/{project_id}', user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
return response
class TaskDumpUploadTest(_DbTestBase):
def test_api_v1_dump_and_upload_annotations_with_objects_type_is_shape(self):
@ -789,7 +813,7 @@ class TaskDumpUploadTest(_DbTestBase):
task = self._create_task(tasks["main"], images)
task_id = task["id"]
# dump annotations
url = self._generate_url_dump_dataset(task_id)
url = self._generate_url_dump_task_dataset(task_id)
for user, edata in list(expected.items()):
user_name = edata['name']
file_zip_name = osp.join(test_dir, f'{test_name}_{user_name}_{dump_format_name}.zip')
@ -1147,3 +1171,108 @@ class TaskDumpUploadTest(_DbTestBase):
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)
class ProjectDump(_DbTestBase):
def test_api_v1_export_dataset(self):
test_name = self._testMethodName
dump_formats = dm.views.get_export_formats()
expected = {
self.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
self.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED,
'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False},
}
with TestDir() as test_dir:
for dump_format in dump_formats:
if not dump_format.ENABLED or dump_format.DIMENSION == dm.bindings.DimensionType.DIM_3D:
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
project = self._create_project(projects['main'])
pid = project['id']
images = self._generate_task_images(3)
tasks['task in project #1']['project_id'] = pid
self._create_task(tasks['task in project #1'], images)
images = self._generate_task_images(3, 3)
tasks['task in project #2']['project_id'] = pid
self._create_task(tasks['task in project #2'], images)
url = self._generate_url_dump_project_dataset(project['id'], dump_format_name)
for user, edata in list(expected.items()):
user_name = edata['name']
file_zip_name = osp.join(test_dir, f'{test_name}_{user_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["accept code"])
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["create code"])
data = {
"format": dump_format_name,
"action": "download",
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["code"])
if response.status_code == status.HTTP_200_OK:
content = BytesIO(b"".join(response.streaming_content))
with open(file_zip_name, "wb") as f:
f.write(content.getvalue())
self.assertEqual(response.status_code, edata['code'])
self.assertEqual(osp.exists(file_zip_name), edata['file_exists'])
def test_api_v1_export_annotatios(self):
test_name = self._testMethodName
dump_formats = dm.views.get_export_formats()
expected = {
self.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
self.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED,
'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False},
}
with TestDir() as test_dir:
for dump_format in dump_formats:
if not dump_format.ENABLED or dump_format.DIMENSION == dm.bindings.DimensionType.DIM_3D:
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
project = self._create_project(projects['main'])
pid = project['id']
images = self._generate_task_images(3)
tasks['task in project #1']['project_id'] = pid
self._create_task(tasks['task in project #1'], images)
images = self._generate_task_images(3, 3)
tasks['task in project #2']['project_id'] = pid
self._create_task(tasks['task in project #2'], images)
url = self._generate_url_dump_project_annotations(project['id'], dump_format_name)
for user, edata in list(expected.items()):
user_name = edata['name']
file_zip_name = osp.join(test_dir, f'{test_name}_{user_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["accept code"])
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["create code"])
data = {
"format": dump_format_name,
"action": "download",
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["code"])
if response.status_code == status.HTTP_200_OK:
content = BytesIO(b"".join(response.streaming_content))
with open(file_zip_name, "wb") as f:
f.write(content.getvalue())
self.assertEqual(response.status_code, edata['code'])
self.assertEqual(osp.exists(file_zip_name), edata['file_exists'])

@ -13,9 +13,10 @@ from datumaro.util import to_snake_case
from django.utils import timezone
import cvat.apps.dataset_manager.task as task
from cvat.apps.engine.backup import TaskExporter
import cvat.apps.dataset_manager.project as project
from cvat.apps.engine.log import slogger
from cvat.apps.engine.models import Task
from cvat.apps.engine.models import Project, Task
from cvat.apps.engine.backup import TaskExporter
from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS
from .util import current_function_name
@ -29,22 +30,32 @@ def log_exception(logger=None, exc_info=True):
exc_info=exc_info)
def get_export_cache_dir(db_task):
task_dir = osp.abspath(db_task.get_task_dirname())
if osp.isdir(task_dir):
return osp.join(task_dir, 'export_cache')
def get_export_cache_dir(db_instance):
base_dir = osp.abspath(db_instance.get_project_dirname() if isinstance(db_instance, Project) else db_instance.get_task_dirname())
if osp.isdir(base_dir):
return osp.join(base_dir, 'export_cache')
else:
raise Exception('Task dir {} does not exist'.format(task_dir))
raise Exception('{} dir {} does not exist'.format("Project" if isinstance(db_instance, Project) else "Task", base_dir))
DEFAULT_CACHE_TTL = timedelta(hours=10)
CACHE_TTL = DEFAULT_CACHE_TTL
TASK_CACHE_TTL = DEFAULT_CACHE_TTL
PROJECT_CACHE_TTL = DEFAULT_CACHE_TTL / 3
def export_task(task_id, dst_format, server_url=None, save_images=False):
def export(dst_format, task_id=None, project_id=None, server_url=None, save_images=False):
try:
db_task = Task.objects.get(pk=task_id)
cache_dir = get_export_cache_dir(db_task)
if task_id is not None:
db_instance = Task.objects.get(pk=task_id)
logger = slogger.task[task_id]
cache_ttl = TASK_CACHE_TTL
export_fn = task.export_task
else:
db_instance = Project.objects.get(pk=project_id)
logger = slogger.project[project_id]
cache_ttl = PROJECT_CACHE_TTL
export_fn = project.export_project
cache_dir = get_export_cache_dir(db_instance)
exporter = EXPORT_FORMATS[dst_format]
output_base = '%s_%s' % ('dataset' if save_images else 'annotations',
@ -52,39 +63,51 @@ def export_task(task_id, dst_format, server_url=None, save_images=False):
output_path = '%s.%s' % (output_base, exporter.EXT)
output_path = osp.join(cache_dir, output_path)
task_time = timezone.localtime(db_task.updated_date).timestamp()
instance_time = timezone.localtime(db_instance.updated_date).timestamp()
if isinstance(db_instance, Project):
tasks_update = list(map(lambda db_task: timezone.localtime(db_task.updated_date).timestamp(), db_instance.tasks.all()))
instance_time = max(tasks_update + [instance_time])
if not (osp.exists(output_path) and \
task_time <= osp.getmtime(output_path)):
instance_time <= osp.getmtime(output_path)):
os.makedirs(cache_dir, exist_ok=True)
with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:
temp_file = osp.join(temp_dir, 'result')
task.export_task(task_id, temp_file, dst_format,
export_fn(db_instance.id, temp_file, dst_format,
server_url=server_url, save_images=save_images)
os.replace(temp_file, output_path)
archive_ctime = osp.getctime(output_path)
scheduler = django_rq.get_scheduler()
cleaning_job = scheduler.enqueue_in(time_delta=CACHE_TTL,
cleaning_job = scheduler.enqueue_in(time_delta=cache_ttl,
func=clear_export_cache,
task_id=task_id,
file_path=output_path, file_ctime=archive_ctime)
slogger.task[task_id].info(
"The task '{}' is exported as '{}' at '{}' "
logger.info(
"The {} '{}' is exported as '{}' at '{}' "
"and available for downloading for the next {}. "
"Export cache cleaning job is enqueued, id '{}'".format(
db_task.name, dst_format, output_path, CACHE_TTL,
cleaning_job.id))
"project" if isinstance(db_instance, Project) else 'task',
db_instance.name, dst_format, output_path, cache_ttl,
cleaning_job.id
))
return output_path
except Exception:
log_exception(slogger.task[task_id])
log_exception(logger)
raise
def export_task_as_dataset(task_id, dst_format=None, server_url=None):
return export_task(task_id, dst_format, server_url=server_url, save_images=True)
return export(dst_format, task_id=task_id, server_url=server_url, save_images=True)
def export_task_annotations(task_id, dst_format=None, server_url=None):
return export_task(task_id, dst_format, server_url=server_url, save_images=False)
return export(dst_format,task_id=task_id, server_url=server_url, save_images=False)
def export_project_as_dataset(project_id, dst_format=None, server_url=None):
return export(dst_format, project_id=project_id, server_url=server_url, save_images=True)
def export_project_annotations(project_id, dst_format=None, server_url=None):
return export(dst_format, project_id=project_id, server_url=server_url, save_images=False)
def clear_export_cache(task_id, file_path, file_ctime):
try:
@ -116,7 +139,7 @@ def backup_task(task_id, output_path):
archive_ctime = osp.getctime(output_path)
scheduler = django_rq.get_scheduler()
cleaning_job = scheduler.enqueue_in(time_delta=CACHE_TTL,
cleaning_job = scheduler.enqueue_in(time_delta=TASK_CACHE_TTL,
func=clear_export_cache,
task_id=task_id,
file_path=output_path, file_ctime=archive_ctime)
@ -124,7 +147,7 @@ def backup_task(task_id, output_path):
"The task '{}' is backuped at '{}' "
"and available for downloading for the next {}. "
"Export cache cleaning job is enqueued, id '{}'".format(
db_task.name, output_path, CACHE_TTL,
db_task.name, output_path, TASK_CACHE_TTL,
cleaning_job.id))
return output_path

@ -411,6 +411,8 @@ class TaskSerializer(WriteOnceMixin, serializers.ModelSerializer):
validated_project_id = validated_data.get('project_id', None)
if validated_project_id is not None and validated_project_id != instance.project_id:
project = models.Project.objects.get(id=validated_data.get('project_id', None))
if project.tasks.count() and project.tasks.first().dimension != instance.dimension:
raise serializers.ValidationError(f'Dimension ({instance.dimension}) of the task must be the same as other tasks in project ({project.tasks.first().dimension})')
if instance.project_id is None:
for old_label in instance.label_set.all():
try:
@ -453,8 +455,10 @@ class TaskSerializer(WriteOnceMixin, serializers.ModelSerializer):
# When moving task labels can be mapped to one, but when not names must be unique
if 'project_id' in attrs.keys() and self.instance is not None:
project_id = attrs.get('project_id')
if project_id is not None and not models.Project.objects.filter(id=project_id).count():
raise serializers.ValidationError(f'Cannot find project with ID {project_id}')
if project_id is not None:
project = models.Project.objects.filter(id=project_id).first()
if project is None:
raise serializers.ValidationError(f'Cannot find project with ID {project_id}')
# Check that all labels can be mapped
new_label_names = set()
old_labels = self.instance.project.label_set.all() if self.instance.project_id else self.instance.label_set.all()

@ -311,6 +311,9 @@ def _create_thread(tid, data, isImport=False):
validate_dimension.set_path(upload_dir)
validate_dimension.validate()
if db_task.project is not None and db_task.project.tasks.count() > 1 and db_task.project.tasks.first().dimension != validate_dimension.dimension:
raise Exception(f'Dimension ({validate_dimension.dimension}) of the task must be the same as other tasks in project ({db_task.project.tasks.first().dimension})')
if validate_dimension.dimension == models.DimensionType.DIM_3D:
db_task.dimension = models.DimensionType.DIM_3D

@ -298,6 +298,76 @@ class ProjectViewSet(auth.ProjectGetQuerySetMixin, viewsets.ModelViewSet):
return Response(serializer.data)
@swagger_auto_schema(method='get', operation_summary='Export project as a dataset in a specific format',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={'202': openapi.Response(description='Exporting has been started'),
'201': openapi.Response(description='Output file is ready for downloading'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
}
)
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/project/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_as_dataset,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
@swagger_auto_schema(method='get', operation_summary='Method allows to download project annotations',
manual_parameters=[
openapi.Parameter('format', openapi.IN_QUERY,
description="Desired output format name\nYou can get the list of supported formats at:\n/server/annotation/formats",
type=openapi.TYPE_STRING, required=True),
openapi.Parameter('filename', openapi.IN_QUERY,
description="Desired output file name",
type=openapi.TYPE_STRING, required=False),
openapi.Parameter('action', in_=openapi.IN_QUERY,
description='Used to start downloading process after annotation file had been created',
type=openapi.TYPE_STRING, required=False, enum=['download'])
],
responses={
'202': openapi.Response(description='Dump of annotations has been started'),
'201': openapi.Response(description='Annotations file is ready to download'),
'200': openapi.Response(description='Download of file started'),
'405': openapi.Response(description='Format is not available'),
'401': openapi.Response(description='Format is not specified'),
}
)
@action(detail=True, methods=['GET'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
db_project = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_instance=db_project,
rq_id="/api/v1/projects/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
callback=dm.views.export_project_annotations,
format_name=format_name,
filename=request.query_params.get("filename", "").lower(),
)
else:
return Response("Format is not specified",status=status.HTTP_400_BAD_REQUEST)
class TaskFilter(filters.FilterSet):
project = filters.CharFilter(field_name="project__name", lookup_expr="icontains")
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
@ -475,7 +545,7 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
else:
return Response(status=status.HTTP_202_ACCEPTED)
ttl = dm.views.CACHE_TTL.total_seconds()
ttl = dm.views.TASK_CACHE_TTL.total_seconds()
queue.enqueue_call(
func=dm.views.backup_task,
args=(pk, 'task_dump.zip'),
@ -679,7 +749,7 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
if request.method == 'GET':
format_name = request.query_params.get('format')
if format_name:
return _export_annotations(db_task=db_task,
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/annotations/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
@ -806,7 +876,7 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
db_task = self.get_object() # force to call check_object_permissions
format_name = request.query_params.get("format", "")
return _export_annotations(db_task=db_task,
return _export_annotations(db_instance=db_task,
rq_id="/api/v1/tasks/{}/dataset/{}".format(pk, format_name),
request=request,
action=request.query_params.get("action", "").lower(),
@ -1373,7 +1443,7 @@ def _import_annotations(request, rq_id, rq_func, pk, format_name):
return Response(status=status.HTTP_202_ACCEPTED)
def _export_annotations(db_task, rq_id, request, format_name, action, callback, filename):
def _export_annotations(db_instance, rq_id, request, format_name, action, callback, filename):
if action not in {"", "download"}:
raise serializers.ValidationError(
"Unexpected action specified for the request")
@ -1390,9 +1460,12 @@ def _export_annotations(db_task, rq_id, request, format_name, action, callback,
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_task_update_time = timezone.localtime(db_task.updated_date)
last_instance_update_time = timezone.localtime(db_instance.updated_date)
if isinstance(db_instance, Project):
tasks_update = list(map(lambda db_task: timezone.localtime(db_task.updated_date), db_instance.tasks.all()))
last_instance_update_time = max(tasks_update + [last_instance_update_time])
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_task_update_time:
if request_time is None or request_time < last_instance_update_time:
rq_job.cancel()
rq_job.delete()
else:
@ -1401,12 +1474,14 @@ def _export_annotations(db_task, rq_id, request, format_name, action, callback,
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_task_update_time,
timestamp = datetime.strftime(last_instance_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = filename or \
"task_{}-{}-{}{}".format(
db_task.name, timestamp,
format_name, osp.splitext(file_path)[1])
"{}_{}-{}-{}{}".format(
"project" if isinstance(db_instance, models.Project) else "task",
db_instance.name, timestamp,
format_name, osp.splitext(file_path)[1]
)
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
@ -1427,9 +1502,9 @@ def _export_annotations(db_task, rq_id, request, format_name, action, callback,
except Exception:
server_address = None
ttl = dm.views.CACHE_TTL.total_seconds()
ttl = (dm.views.PROJECT_CACHE_TTL if isinstance(db_instance, Project) else dm.views.TASK_CACHE_TTL).total_seconds()
queue.enqueue_call(func=callback,
args=(db_task.id, format_name, server_address), job_id=rq_id,
args=(db_instance.id, format_name, server_address), job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)

@ -13,16 +13,19 @@ description: 'Instructions on how to create and configure an annotation task.'
## Basic configuration
### Name
The name of the task to be created.
![](/images/image005.jpg)
### Projects
The project that this task will be related with.
![](/images/image193.jpg)
### Labels
There are two ways of working with labels (available only if the task is not related to the project):
- The `Constructor` is a simple way to add and adjust labels. To add a new label click the `Add label` button.
@ -67,6 +70,7 @@ description: 'Instructions on how to create and configure an annotation task.'
In `Raw` and `Constructor` mode, you can press the `Copy` button to copy the list of labels.
### Select files
Press tab `My computer` to choose some files for annotation from your PC.
If you select tab `Connected file share` you can choose files for annotation from your network.
If you select ` Remote source` , you'll see a field where you can enter a list of URLs (one URL per line).
@ -78,63 +82,67 @@ description: 'Instructions on how to create and configure an annotation task.'
### Data formats for a 3D task
To create a 3D task, you need to use the following directory structures:
{{< tabpane >}}
{{< tab header="Velodyne" >}}
VELODYNE FORMAT
Structure:
velodyne_points/
data/
image_01.bin
IMAGE_00 # unknown dirname, Generally image_01.png can be under IMAGE_00, IMAGE_01, IMAGE_02, IMAGE_03, etc
data/
image_01.png
{{< /tab >}}
{{< tab header="3D pointcloud" >}}
3D POINTCLOUD DATA FORMAT
Structure:
pointcloud/
00001.pcd
related_images/
00001_pcd/
image_01.png # or any other image
{{< /tab >}}
{{< tab header="3D Option 1" >}}
3D, DEFAULT DATAFORMAT Option 1
Structure:
data/
image.pcd
image.png
{{< /tab >}}
{{< tab header="3D Option 2" >}}
3D, DEFAULT DATAFORMAT Option 2
Structure:
data/
image_1/
image_1.pcd
context_1.png # or any other name
context_2.jpg
{{< /tab >}}
{{< /tabpane >}}
{{< tabpane >}}
{{< tab header="Velodyne" >}}
VELODYNE FORMAT
Structure:
velodyne_points/
data/
image_01.bin
IMAGE_00 # unknown dirname, Generally image_01.png can be under IMAGE_00, IMAGE_01, IMAGE_02, IMAGE_03, etc
data/
image_01.png
{{< /tab >}}
{{< tab header="3D pointcloud" >}}
3D POINTCLOUD DATA FORMAT
Structure:
pointcloud/
00001.pcd
related_images/
00001_pcd/
image_01.png # or any other image
{{< /tab >}}
{{< tab header="3D Option 1" >}}
3D, DEFAULT DATAFORMAT Option 1
Structure:
data/
image.pcd
image.png
{{< /tab >}}
{{< tab header="3D Option 2" >}}
3D, DEFAULT DATAFORMAT Option 2
Structure:
data/
image_1/
image_1.pcd
context_1.png # or any other name
context_2.jpg
{{< /tab >}}
{{< /tabpane >}}
## Advanced configuration
![](/images/image128_use_cache.jpg)
### Use zip chunks
Force to use zip chunks as compressed data. Actual for videos only.
### Use cache
Defines how to work with data. Select the checkbox to switch to the "on-the-fly data processing",
which will reduce the task creation time (by preparing chunks when requests are received)
and store data in a cache of limited size with a policy of evicting less popular items.
See more [here](/docs/manual/advanced/data_on_fly/).
### Image Quality
Use this option to specify quality of uploaded images.
The option helps to load high resolution datasets faster.
Use the value from `5` (almost completely compressed images) to `100` (not compressed images).
## Overlap Size
Use this option to make overlapped segments.
The option makes tracks continuous from one segment into another.
Use it for interpolation mode. There are several options for using the parameter:
@ -158,22 +166,27 @@ description: 'Instructions on how to create and configure an annotation task.'
even the overlap parameter isn't zero and match between corresponding shapes on adjacent segments is perfect.
### Segment size
Use this option to divide a huge dataset into a few smaller segments.
For example, one job cannot be annotated by several labelers (it isn't supported).
Thus using "segment size" you can create several jobs for the same annotation task.
It will help you to parallel data annotation process.
### Start frame
Frame from which video in task begins.
### Stop frame
Frame on which video in task ends.
### Frame Step
Use this option to filter video frames.
For example, enter `25` to leave every twenty fifth frame in the video or every twenty fifth image.
### Chunk size
Defines a number of frames to be packed in a chunk when send from client to server.
Server defines automatically if empty.
@ -185,6 +198,7 @@ description: 'Instructions on how to create and configure an annotation task.'
- More: 1 - 4
### Dataset Repository
URL link of the repository optionally specifies the path to the repository for storage
(`default: annotation / <dump_file_name> .zip`).
The .zip and .xml file extension of annotation are supported.
@ -199,10 +213,12 @@ description: 'Instructions on how to create and configure an annotation task.'
The task will be highlighted in red after creation if annotation isn't synchronized with the repository.
### Use LFS
If the annotation file is large, you can create a repository with
[LFS](https://git-lfs.github.com/) support.
### Issue tracker
Specify full issue tracker's URL if it's necessary.
Push `Submit` button and it will be added into the list of annotation tasks.
@ -248,4 +264,4 @@ description: 'Instructions on how to create and configure an annotation task.'
---
Push `Open` button to go to [task details](/docs/manual/basics/task-details/).
Push `Open` button to go to [task details](/docs/manual/basics/task-details/).

@ -11,16 +11,16 @@ context('Move a task between projects.', () => {
label: 'car',
attrName: 'color',
attrVaue: 'red',
multiAttrParams: false
}
multiAttrParams: false,
};
const secondProject = {
name: `Second project case ${caseID}`,
label: 'bicycle',
attrName: 'color',
attrVaue: 'yellow',
multiAttrParams: false
}
multiAttrParams: false,
};
const taskName = `Task case ${caseID}`;
const imagesCount = 1;
@ -39,14 +39,24 @@ context('Move a task between projects.', () => {
const attachToProject = false;
const multiAttrParams = false;
function checkTask (project, expectedResult) {
function checkTask(project, expectedResult) {
cy.goToProjectsList();
cy.openProject(project);
cy.get('.cvat-tasks-list-item').should(expectedResult);
}
before(() => {
cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, firtsProject.label, imagesCount);
cy.imageGenerator(
imagesFolder,
imageFileName,
width,
height,
color,
posX,
posY,
firtsProject.label,
imagesCount,
);
cy.createZipArchive(directoryToArchive, archivePath);
cy.visit('/');
cy.login();
@ -54,8 +64,20 @@ context('Move a task between projects.', () => {
beforeEach(() => {
cy.goToProjectsList();
cy.createProjects(firtsProject.name, firtsProject.label, firtsProject.attrName, firtsProject.attrVaue, firtsProject.multiAttrParams);
cy.createProjects(secondProject.name, secondProject.label, secondProject.attrName, secondProject.attrVaue, secondProject.multiAttrParams);
cy.createProjects(
firtsProject.name,
firtsProject.label,
firtsProject.attrName,
firtsProject.attrVaue,
firtsProject.multiAttrParams,
);
cy.createProjects(
secondProject.name,
secondProject.label,
secondProject.attrName,
secondProject.attrVaue,
secondProject.multiAttrParams,
);
cy.openProject(firtsProject.name);
cy.createAnnotationTask(
taskName,

@ -11,14 +11,14 @@ context('Move a task to a project.', () => {
label: 'Tree',
attrName: 'Kind',
attrValue: 'Oak',
}
};
const project = {
name: `Case ${caseID}`,
label: 'Tree',
attrName: 'Kind',
attrVaue: 'Oak'
}
attrVaue: 'Oak',
};
const imagesCount = 1;
const imageFileName = `image_${task.name.replace(' ', '_').toLowerCase()}`;

@ -68,9 +68,10 @@ context('Dump/Upload annotation.', { browser: '!firefox' }, () => {
it('Save job. Dump annotation. Remove annotation. Save job.', () => {
cy.saveJob('PATCH', 200, 'saveJobDump');
cy.intercept('GET', '/api/v1/tasks/**/annotations**').as('dumpAnnotations');
cy.interactMenu('Dump annotations');
cy.get('.cvat-menu-dump-submenu-item').within(() => {
cy.contains(dumpType).click();
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').within(() => {
cy.get('.cvat-modal-export-select').should('contain.text', dumpType);
cy.contains('button', 'OK').click();
});
cy.wait('@dumpAnnotations', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@dumpAnnotations').its('response.statusCode').should('equal', 201);

@ -66,10 +66,15 @@ context('Import annotations for frames with dots in name.', { browser: '!firefox
it('Save job. Dump annotation to YOLO format. Remove annotation. Save job.', () => {
cy.saveJob('PATCH', 200, 'saveJobDump');
cy.intercept('GET', '/api/v1/tasks/**/annotations**').as('dumpAnnotations');
cy.interactMenu('Dump annotations');
cy.get('.cvat-menu-dump-submenu-item').within(() => {
cy.contains(dumpType).click();
});
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').find('.cvat-modal-export-select').click();
cy.get('.ant-select-dropdown')
.not('.ant-select-dropdown-hidden')
.trigger('wheel', {deltaY: 700})
.contains('.cvat-modal-export-option-item', dumpType)
.click();
cy.get('.cvat-modal-export-select').should('contain.text', dumpType);
cy.get('.cvat-modal-export-task').contains('button', 'OK').click();
cy.wait('@dumpAnnotations', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@dumpAnnotations').its('response.statusCode').should('equal', 201);
cy.removeAnnotations();

@ -67,9 +67,11 @@ context('Export, import an annotation task.', { browser: '!firefox' }, () => {
.find('.cvat-item-open-task-actions > .cvat-menu-icon')
.trigger('mouseover');
cy.intercept('GET', '/api/v1/tasks/**?action=export').as('exportTask');
cy.get('.ant-dropdown').not('.ant-dropdown-hidden').within(() => {
cy.contains('[role="menuitem"]', 'Export Task').click().trigger('mouseout');
});
cy.get('.ant-dropdown')
.not('.ant-dropdown-hidden')
.within(() => {
cy.contains('[role="menuitem"]', new RegExp('^Export task$')).click().trigger('mouseout');
});
cy.wait('@exportTask', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@exportTask').its('response.statusCode').should('equal', 201);
cy.deleteTask(taskName);
@ -82,10 +84,7 @@ context('Export, import an annotation task.', { browser: '!firefox' }, () => {
it('Import the task. Check id, labels, shape.', () => {
cy.intercept('POST', '/api/v1/tasks?action=import').as('importTask');
cy.get('.cvat-import-task')
.click()
.find('input[type=file]')
.attachFile(taskBackupArchiveFullName);
cy.get('.cvat-import-task').click().find('input[type=file]').attachFile(taskBackupArchiveFullName);
cy.wait('@importTask', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@importTask').its('response.statusCode').should('equal', 201);
cy.contains('Task has been imported succesfully').should('exist').and('be.visible');

@ -6,8 +6,9 @@
import { taskName, labelName } from '../../support/const';
context('Export as a dataset.', () => {
context('Export task dataset.', () => {
const caseId = '47';
const exportFormat = 'CVAT for images';
const rectangleShape2Points = {
points: 'By 2 Points',
type: 'Shape',
@ -21,16 +22,20 @@ context('Export as a dataset.', () => {
before(() => {
cy.openTaskJob(taskName);
cy.createRectangle(rectangleShape2Points);
cy.saveJob();
cy.saveJob('PATCH', 200, 'saveJobExportDataset');
});
describe(`Testing case "${caseId}"`, () => {
it('Go to Menu. Press "Export as a dataset" -> "CVAT for images".', () => {
it(`Go to Menu. Press "Export task dataset" with the "${exportFormat}" format.`, () => {
cy.intercept('GET', '/api/v1/tasks/**/dataset**').as('exportDataset');
cy.interactMenu('Export as a dataset');
cy.get('.cvat-menu-export-submenu-item').within(() => {
cy.contains('CVAT for images').click();
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').within(() => {
cy.get('.cvat-modal-export-select').should('contain.text', exportFormat);
cy.get('[type="checkbox"]').should('not.be.checked').check();
cy.contains('button', 'OK').click();
});
cy.get('.cvat-notification-notice-export-task-start').should('exist');
cy.closeNotification('.cvat-notification-notice-export-task-start');
cy.wait('@exportDataset', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@exportDataset').its('response.statusCode').should('equal', 201);
});

@ -21,7 +21,7 @@ context('Context images for 2D tasks.', () => {
secondY: 450,
};
function previewRotate (directionRotation, expectedDeg) {
function previewRotate(directionRotation, expectedDeg) {
if (directionRotation === 'right') {
cy.get('[data-icon="rotate-right"]').click();
} else {
@ -30,30 +30,22 @@ context('Context images for 2D tasks.', () => {
cy.get('.ant-image-preview-img').should('have.attr', 'style').and('contain', `rotate(${expectedDeg}deg)`);
}
function previewScaleWheel (zoom, expectedScaleValue) {
function previewScaleWheel(zoom, expectedScaleValue) {
cy.get('.ant-image-preview-img')
.trigger('wheel', {deltaY: zoom})
.trigger('wheel', { deltaY: zoom })
.should('have.attr', 'style')
.and('contain', `scale3d(${expectedScaleValue})`);
}
function previewScaleButton (zoom, expectedScaleValue) {
function previewScaleButton(zoom, expectedScaleValue) {
cy.get(`[data-icon="zoom-${zoom}"]`).click();
cy.get('.ant-image-preview-img')
.should('have.attr', 'style')
.and('contain', `scale3d(${expectedScaleValue})`);
cy.get('.ant-image-preview-img').should('have.attr', 'style').and('contain', `scale3d(${expectedScaleValue})`);
}
before(() => {
cy.visit('auth/login');
cy.login();
cy.createAnnotationTask(
taskName,
labelName,
attrName,
textDefaultValue,
pathToArchive,
);
cy.createAnnotationTask(taskName, labelName, attrName, textDefaultValue, pathToArchive);
cy.openTaskJob(taskName);
});
@ -97,20 +89,22 @@ context('Context images for 2D tasks.', () => {
});
it('Preview a context image. Move.', () => {
cy.get('.ant-image-preview-img-wrapper').should('have.attr', 'style').then((translate3d) => {
cy.get('.ant-image-preview-img').trigger('mousedown', {button: 0});
cy.get('.ant-image-preview-moving').should('exist');
cy.get('.ant-image-preview-wrap').trigger('mousemove', 300, 300);
cy.get('.ant-image-preview-img-wrapper').should('have.attr', 'style').and('not.equal', translate3d)
cy.get('.ant-image-preview-img').trigger('mouseup');
cy.get('.ant-image-preview-moving').should('not.exist');
cy.get('.ant-image-preview-img-wrapper').should('have.attr', 'style').and('equal', translate3d)
});
cy.get('.ant-image-preview-img-wrapper')
.should('have.attr', 'style')
.then((translate3d) => {
cy.get('.ant-image-preview-img').trigger('mousedown', { button: 0 });
cy.get('.ant-image-preview-moving').should('exist');
cy.get('.ant-image-preview-wrap').trigger('mousemove', 300, 300);
cy.get('.ant-image-preview-img-wrapper').should('have.attr', 'style').and('not.equal', translate3d);
cy.get('.ant-image-preview-img').trigger('mouseup');
cy.get('.ant-image-preview-moving').should('not.exist');
cy.get('.ant-image-preview-img-wrapper').should('have.attr', 'style').and('equal', translate3d);
});
});
it('Preview a context image. Cancel preview.', () => {
cy.get('.ant-image-preview-wrap').type('{Esc}');
cy.get('.ant-image-preview-wrap').should('have.attr', 'style').and('contain', 'display: none')
cy.get('.ant-image-preview-wrap').should('have.attr', 'style').and('contain', 'display: none');
});
it('Checking issue "Context image disappears after undo/redo".', () => {

@ -51,13 +51,14 @@ context('Canvas 3D functionality. Dump/upload annotation. "Point Cloud" format',
it('Save a job. Dump with "Point Cloud" format.', () => {
cy.saveJob('PATCH', 200, 'saveJob');
cy.intercept('GET', '/api/v1/tasks/**/annotations**').as('dumpAnnotations');
cy.interactMenu('Dump annotations');
cy.get('.cvat-menu-dump-submenu-item').then((subMenu) => {
expect(subMenu.length).to.be.equal(2);
});
cy.get('.cvat-menu-dump-submenu-item').within(() => {
cy.contains(dumpTypePC).click();
});
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').find('.cvat-modal-export-select').click();
cy.get('.ant-select-dropdown')
.not('.ant-select-dropdown-hidden')
.contains('.cvat-modal-export-option-item', dumpTypePC)
.click();
cy.get('.cvat-modal-export-select').should('contain.text', dumpTypePC);
cy.get('.cvat-modal-export-task').contains('button', 'OK').click();
cy.wait('@dumpAnnotations', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@dumpAnnotations').its('response.statusCode').should('equal', 201);
cy.removeAnnotations();

@ -51,10 +51,14 @@ context('Canvas 3D functionality. Dump/upload annotation. "Velodyne Points" form
it('Save a job. Dump with "Velodyne Points" format.', () => {
cy.saveJob('PATCH', 200, 'saveJob');
cy.intercept('GET', '/api/v1/tasks/**/annotations**').as('dumpAnnotations');
cy.interactMenu('Dump annotations');
cy.get('.cvat-menu-dump-submenu-item').within(() => {
cy.contains(dumpTypeVC).click();
});
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').find('.cvat-modal-export-select').click();
cy.get('.ant-select-dropdown')
.not('.ant-select-dropdown-hidden')
.contains('.cvat-modal-export-option-item', dumpTypeVC)
.click();
cy.get('.cvat-modal-export-select').should('contain.text', dumpTypeVC);
cy.get('.cvat-modal-export-task').contains('button', 'OK').click();
cy.wait('@dumpAnnotations', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@dumpAnnotations').its('response.statusCode').should('equal', 201);
cy.removeAnnotations();

@ -15,8 +15,23 @@ context('Canvas 3D functionality. Export as a dataset.', () => {
const dumpTypePC = 'Sly Point Cloud Format';
const dumpTypeVC = 'Kitti Raw Format';
function exportDataset (format, as) {
cy.intercept('GET', '/api/v1/tasks/**/dataset**').as(as);
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').find('.cvat-modal-export-select').click();
cy.get('.ant-select-dropdown')
.not('.ant-select-dropdown-hidden')
.contains('.cvat-modal-export-option-item', format)
.click();
cy.get('.cvat-modal-export-select').should('contain.text', format);
cy.get('.cvat-modal-export-task').find('[type="checkbox"]').should('not.be.checked').check();
cy.get('.cvat-modal-export-task').contains('button', 'OK').click();
cy.wait(`@${as}`, { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait(`@${as}`).its('response.statusCode').should('equal', 201);
}
before(() => {
cy.openTask(taskName)
cy.openTask(taskName);
cy.openJob();
cy.wait(1000); // Waiting for the point cloud to display
cy.create3DCuboid(cuboidCreationParams);
@ -25,23 +40,11 @@ context('Canvas 3D functionality. Export as a dataset.', () => {
describe(`Testing case "${caseId}"`, () => {
it('Export as a dataset with "Point Cloud" format.', () => {
cy.intercept('GET', '/api/v1/tasks/**/dataset**').as('exportDatasetPC');
cy.interactMenu('Export as a dataset');
cy.get('.cvat-menu-export-submenu-item').within(() => {
cy.contains(dumpTypePC).click();
});
cy.wait('@exportDatasetPC', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@exportDatasetPC').its('response.statusCode').should('equal', 201);
exportDataset(dumpTypePC, 'exportDatasetPC');
});
it('Export as a dataset with "Velodyne Points" format.', () => {
cy.intercept('GET', '/api/v1/tasks/**/dataset**').as('exportDatasetVC');
cy.interactMenu('Export as a dataset');
cy.get('.cvat-menu-export-submenu-item').within(() => {
cy.contains(dumpTypeVC).click();
});
cy.wait('@exportDatasetVC', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@exportDatasetVC').its('response.statusCode').should('equal', 201);
exportDataset(dumpTypeVC, 'exportDatasetVC');
cy.removeAnnotations();
cy.saveJob('PUT');
});

@ -6,7 +6,7 @@
import { taskName, labelName } from '../../support/const';
context('Dump annotation if cuboid created', () => {
context('Dump annotation if cuboid created.', () => {
const issueId = '1568';
const createCuboidShape2Points = {
points: 'From rectangle',
@ -17,30 +17,33 @@ context('Dump annotation if cuboid created', () => {
secondX: 350,
secondY: 450,
};
const dumpType = 'Datumaro';
before(() => {
cy.openTaskJob(taskName);
});
describe(`Testing issue "${issueId}"`, () => {
it('Create a cuboid', () => {
it('Create a cuboid.', () => {
cy.createCuboid(createCuboidShape2Points);
cy.get('#cvat-objects-sidebar-state-item-1').should('contain', '1').and('contain', 'CUBOID SHAPE');
});
it('Dump an annotation', () => {
cy.get('.cvat-annotation-header-left-group').within(() => {
cy.saveJob();
cy.get('button').contains('Menu').trigger('mouseover', { force: true });
});
cy.get('.cvat-annotation-menu').within(() => {
cy.get('[title="Dump annotations"]').trigger('mouseover');
});
cy.get('.cvat-menu-dump-submenu-item').within(() => {
cy.contains('Datumaro').click();
});
it('Dump an annotation.', () => {
cy.saveJob('PATCH', 200, `dump${dumpType}Format`);
cy.intercept('GET', '/api/v1/tasks/**/annotations**').as('dumpAnnotations');
cy.interactMenu('Export task dataset');
cy.get('.cvat-modal-export-task').find('.cvat-modal-export-select').click();
cy.get('.ant-select-dropdown')
.not('.ant-select-dropdown-hidden')
.contains('.cvat-modal-export-option-item', dumpType)
.click();
cy.get('.cvat-modal-export-select').should('contain.text', dumpType);
cy.get('.cvat-modal-export-task').contains('button', 'OK').click();
cy.wait('@dumpAnnotations', { timeout: 5000 }).its('response.statusCode').should('equal', 202);
cy.wait('@dumpAnnotations').its('response.statusCode').should('equal', 201);
});
it('Error notification is not exists', () => {
cy.wait(5000);
it('Error notification is not exists.', () => {
cy.get('.ant-notification-notice').should('not.exist');
});
});

@ -101,9 +101,9 @@ Cypress.Commands.add('changeUserActiveStatus', (authKey, accountsToChangeActiveS
headers: {
Authorization: `Token ${authKey}`,
},
body: {
is_active: isActive,
},
body: {
is_active: isActive,
},
});
}
});
@ -124,7 +124,6 @@ Cypress.Commands.add('checkUserStatuses', (authKey, userName, staffStatus, super
expect(superuserStatus).to.be.equal(user['is_superuser']);
expect(activeStatus).to.be.equal(user['is_active']);
}
});
});
});
@ -181,9 +180,7 @@ Cypress.Commands.add(
}
cy.contains('button', 'Submit').click();
if (expectedResult === 'success') {
cy.get('.cvat-notification-create-task-success')
.should('exist')
.find('[data-icon="close"]').click();
cy.get('.cvat-notification-create-task-success').should('exist').find('[data-icon="close"]').click();
}
if (!forProject) {
cy.goToTaskList();

Loading…
Cancel
Save