Az/import export tasks (#3056)

* initial version of task export/import feature

* fixed tests

* CLI

* fix comments

* updated license headers

* fix eslint issues

* fix comments

* fixed comments

* reverted changes in *.md files

* fixed comments

* fix pylint issues

* fix import for share case

* improved unit tests

* updated changelog

* fixed Maria's comments

* fixed comments

* Fixed position of create new task button

* Fixed span position

* fixed comments

Co-authored-by: Nikita Manovich <nikita.manovich@intel.com>
Co-authored-by: Boris Sekachev <boris.sekachev@intel.com>
main
Andrey Zhavoronkov 5 years ago committed by GitHub
parent 6665fe1dc7
commit 72fdef4335
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Support of context images for 2D image tasks (<https://github.com/openvinotoolkit/cvat/pull/3122>) - Support of context images for 2D image tasks (<https://github.com/openvinotoolkit/cvat/pull/3122>)
- Filter `is_active` for user list (<https://github.com/openvinotoolkit/cvat/pull/3235>) - Filter `is_active` for user list (<https://github.com/openvinotoolkit/cvat/pull/3235>)
- Ability to export/import tasks (<https://github.com/openvinotoolkit/cvat/pull/3056>)
### Changed ### Changed

@ -490,6 +490,59 @@
}); });
} }
async function exportTask(id) {
const { backendAPI } = config;
const url = `${backendAPI}/tasks/${id}`;
return new Promise((resolve, reject) => {
async function request() {
try {
const response = await Axios.get(`${url}?action=export`, {
proxy: config.proxy,
});
if (response.status === 202) {
setTimeout(request, 3000);
} else {
resolve(`${url}?action=download`);
}
} catch (errorData) {
reject(generateError(errorData));
}
}
setTimeout(request);
});
}
async function importTask(file) {
const { backendAPI } = config;
let taskData = new FormData();
taskData.append('task_file', file);
return new Promise((resolve, reject) => {
async function request() {
try {
const response = await Axios.post(`${backendAPI}/tasks?action=import`, taskData, {
proxy: config.proxy,
});
if (response.status === 202) {
taskData = new FormData();
taskData.append('rq_id', response.data.rq_id);
setTimeout(request, 3000);
} else {
const importedTask = await getTasks(`?id=${response.data.id}`);
resolve(importedTask[0]);
}
} catch (errorData) {
reject(generateError(errorData));
}
}
setTimeout(request);
});
}
async function createTask(taskSpec, taskDataSpec, onUpdate) { async function createTask(taskSpec, taskDataSpec, onUpdate) {
const { backendAPI } = config; const { backendAPI } = config;
@ -1157,6 +1210,8 @@
createTask, createTask,
deleteTask, deleteTask,
exportDataset, exportDataset,
exportTask,
importTask,
}), }),
writable: false, writable: false,
}, },

@ -1664,6 +1664,36 @@
const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.delete); const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.delete);
return result; return result;
} }
/**
* Method makes a backup of a task
* @method export
* @memberof module:API.cvat.classes.Task
* @readonly
* @instance
* @async
* @throws {module:API.cvat.exceptions.ServerError}
* @throws {module:API.cvat.exceptions.PluginError}
*/
async export() {
const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.export);
return result;
}
/**
* Method imports a task from a backup
* @method import
* @memberof module:API.cvat.classes.Task
* @readonly
* @instance
* @async
* @throws {module:API.cvat.exceptions.ServerError}
* @throws {module:API.cvat.exceptions.PluginError}
*/
static async import(file) {
const result = await PluginRegistry.apiWrapper.call(this, Task.import, file);
return result;
}
} }
module.exports = { module.exports = {
@ -2073,6 +2103,16 @@
return result; return result;
}; };
Task.prototype.export.implementation = async function () {
const result = await serverProxy.tasks.exportTask(this.id);
return result;
};
Task.import.implementation = async function (file) {
const result = await serverProxy.tasks.importTask(file);
return result;
};
Task.prototype.frames.get.implementation = async function (frame, isPlaying, step) { Task.prototype.frames.get.implementation = async function (frame, isPlaying, step) {
if (!Number.isInteger(frame) || frame < 0) { if (!Number.isInteger(frame) || frame < 0) {
throw new ArgumentError(`Frame must be a positive integer. Got: "${frame}"`); throw new ArgumentError(`Frame must be a positive integer. Got: "${frame}"`);

@ -53735,9 +53735,9 @@
} }
}, },
"rc-menu": { "rc-menu": {
"version": "8.10.6", "version": "8.10.7",
"resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-8.10.6.tgz", "resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-8.10.7.tgz",
"integrity": "sha512-RVkd8XChwSmVOdNULbqLNnABthRZWnhqct1Q74onEXTClsXvsLADMhlIJtw/umglVSECM+14TJdIli9rl2Bzlw==", "integrity": "sha512-m/ypV7OjkkUsMdutzMUxEI8tWyi0Y1TQ5YkSDk7k2uv2aCKkHYEoDKsDAfcPeejo3HMo2z5unWE+jD+dCphraw==",
"requires": { "requires": {
"@babel/runtime": "^7.10.1", "@babel/runtime": "^7.10.1",
"classnames": "2.x", "classnames": "2.x",
@ -53758,9 +53758,9 @@
} }
}, },
"rc-util": { "rc-util": {
"version": "5.9.4", "version": "5.9.8",
"resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.9.4.tgz", "resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.9.8.tgz",
"integrity": "sha512-pzFmYZsKLJ1p+Uv4NqA4aNBaFh8/hOQxOOxA5G4TiyPboa0o/PjminxUCKvoSwVJVW5YgleSM2XPCTpTV6DCsQ==", "integrity": "sha512-typLSHYGf5irvGLYQshs0Ra3aze086h0FhzsAkyirMunYZ7b3Te8gKa5PVaanoHaZa9sS6qx98BxgysoRP+6Tw==",
"requires": { "requires": {
"@babel/runtime": "^7.12.5", "@babel/runtime": "^7.12.5",
"react-is": "^16.12.0", "react-is": "^16.12.0",

@ -73,6 +73,7 @@
"mousetrap": "^1.6.5", "mousetrap": "^1.6.5",
"platform": "^1.3.6", "platform": "^1.3.6",
"prop-types": "^15.7.2", "prop-types": "^15.7.2",
"rc-menu": "^8.10.7",
"react": "^16.14.0", "react": "^16.14.0",
"react-awesome-query-builder": "^3.0.0", "react-awesome-query-builder": "^3.0.0",
"react-color": "^2.19.3", "react-color": "^2.19.3",

@ -35,6 +35,12 @@ export enum TasksActionTypes {
UPDATE_TASK_SUCCESS = 'UPDATE_TASK_SUCCESS', UPDATE_TASK_SUCCESS = 'UPDATE_TASK_SUCCESS',
UPDATE_TASK_FAILED = 'UPDATE_TASK_FAILED', UPDATE_TASK_FAILED = 'UPDATE_TASK_FAILED',
HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS', HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS',
EXPORT_TASK = 'EXPORT_TASK',
EXPORT_TASK_SUCCESS = 'EXPORT_TASK_SUCCESS',
EXPORT_TASK_FAILED = 'EXPORT_TASK_FAILED',
IMPORT_TASK = 'IMPORT_TASK',
IMPORT_TASK_SUCCESS = 'IMPORT_TASK_SUCCESS',
IMPORT_TASK_FAILED = 'IMPORT_TASK_FAILED',
SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE', SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE',
} }
@ -214,6 +220,49 @@ export function loadAnnotationsAsync(
}; };
} }
function importTask(): AnyAction {
const action = {
type: TasksActionTypes.IMPORT_TASK,
payload: {},
};
return action;
}
function importTaskSuccess(task: any): AnyAction {
const action = {
type: TasksActionTypes.IMPORT_TASK_SUCCESS,
payload: {
task,
},
};
return action;
}
function importTaskFailed(error: any): AnyAction {
const action = {
type: TasksActionTypes.IMPORT_TASK_FAILED,
payload: {
error,
},
};
return action;
}
export function importTaskAsync(file: File): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
try {
dispatch(importTask());
const taskInstance = await cvat.classes.Task.import(file);
dispatch(importTaskSuccess(taskInstance));
} catch (error) {
dispatch(importTaskFailed(error));
}
};
}
function exportDataset(task: any, exporter: any): AnyAction { function exportDataset(task: any, exporter: any): AnyAction {
const action = { const action = {
type: TasksActionTypes.EXPORT_DATASET, type: TasksActionTypes.EXPORT_DATASET,
@ -268,6 +317,56 @@ export function exportDatasetAsync(task: any, exporter: any): ThunkAction<Promis
}; };
} }
function exportTask(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK,
payload: {
taskID,
},
};
return action;
}
function exportTaskSuccess(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK_SUCCESS,
payload: {
taskID,
},
};
return action;
}
function exportTaskFailed(taskID: number, error: Error): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK_FAILED,
payload: {
taskID,
error,
},
};
return action;
}
export function exportTaskAsync(taskInstance: any): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
dispatch(exportTask(taskInstance.id));
try {
const url = await taskInstance.export();
const downloadAnchor = window.document.getElementById('downloadAnchor') as HTMLAnchorElement;
downloadAnchor.href = url;
downloadAnchor.click();
dispatch(exportTaskSuccess(taskInstance.id));
} catch (error) {
dispatch(exportTaskFailed(taskInstance.id, error));
}
};
}
function deleteTask(taskID: number): AnyAction { function deleteTask(taskID: number): AnyAction {
const action = { const action = {
type: TasksActionTypes.DELETE_TASK, type: TasksActionTypes.DELETE_TASK,

@ -6,6 +6,7 @@ import './styles.scss';
import React from 'react'; import React from 'react';
import Menu from 'antd/lib/menu'; import Menu from 'antd/lib/menu';
import Modal from 'antd/lib/modal'; import Modal from 'antd/lib/modal';
import { LoadingOutlined } from '@ant-design/icons';
// eslint-disable-next-line import/no-extraneous-dependencies // eslint-disable-next-line import/no-extraneous-dependencies
import { MenuInfo } from 'rc-menu/lib/interface'; import { MenuInfo } from 'rc-menu/lib/interface';
import DumpSubmenu from './dump-submenu'; import DumpSubmenu from './dump-submenu';
@ -25,6 +26,7 @@ interface Props {
inferenceIsActive: boolean; inferenceIsActive: boolean;
taskDimension: DimensionType; taskDimension: DimensionType;
onClickMenu: (params: MenuInfo, file?: File) => void; onClickMenu: (params: MenuInfo, file?: File) => void;
exportIsActive: boolean;
} }
export enum Actions { export enum Actions {
@ -35,6 +37,7 @@ export enum Actions {
RUN_AUTO_ANNOTATION = 'run_auto_annotation', RUN_AUTO_ANNOTATION = 'run_auto_annotation',
MOVE_TASK_TO_PROJECT = 'move_task_to_project', MOVE_TASK_TO_PROJECT = 'move_task_to_project',
OPEN_BUG_TRACKER = 'open_bug_tracker', OPEN_BUG_TRACKER = 'open_bug_tracker',
EXPORT_TASK = 'export_task',
} }
export default function ActionsMenuComponent(props: Props): JSX.Element { export default function ActionsMenuComponent(props: Props): JSX.Element {
@ -50,6 +53,7 @@ export default function ActionsMenuComponent(props: Props): JSX.Element {
exportActivities, exportActivities,
loadActivity, loadActivity,
taskDimension, taskDimension,
exportIsActive,
} = props; } = props;
let latestParams: MenuInfo | null = null; let latestParams: MenuInfo | null = null;
@ -128,6 +132,10 @@ export default function ActionsMenuComponent(props: Props): JSX.Element {
<Menu.Item disabled={inferenceIsActive} key={Actions.RUN_AUTO_ANNOTATION}> <Menu.Item disabled={inferenceIsActive} key={Actions.RUN_AUTO_ANNOTATION}>
Automatic annotation Automatic annotation
</Menu.Item> </Menu.Item>
<Menu.Item key={Actions.EXPORT_TASK} disabled={exportIsActive}>
{exportIsActive && <LoadingOutlined id='cvat-export-task-loading' />}
Export Task
</Menu.Item>
<hr /> <hr />
<Menu.Item key={Actions.MOVE_TASK_TO_PROJECT}>Move to project</Menu.Item> <Menu.Item key={Actions.MOVE_TASK_TO_PROJECT}>Move to project</Menu.Item>
<Menu.Item key={Actions.DELETE_TASK}>Delete</Menu.Item> <Menu.Item key={Actions.DELETE_TASK}>Delete</Menu.Item>

@ -48,3 +48,7 @@
.cvat-menu-icon { .cvat-menu-icon {
transform: scale(0.5); transform: scale(0.5);
} }
#cvat-export-task-loading {
margin-left: 10;
}

@ -95,7 +95,12 @@ function ShortcutsDialog(props: StateToProps & DispatchToProps): JSX.Element | n
zIndex={1001} /* default antd is 1000 */ zIndex={1001} /* default antd is 1000 */
className='cvat-shortcuts-modal-window' className='cvat-shortcuts-modal-window'
> >
<Table dataSource={dataSource} columns={columns} size='small' className='cvat-shortcuts-modal-window-table' /> <Table
dataSource={dataSource}
columns={columns}
size='small'
className='cvat-shortcuts-modal-window-table'
/>
</Modal> </Modal>
); );
} }

@ -11,6 +11,23 @@
height: 100%; height: 100%;
width: 100%; width: 100%;
.cvat-tasks-page-top-bar {
> div:nth-child(1) {
> div:nth-child(1) {
width: 100%;
> div:nth-child(1) {
display: flex;
> span:nth-child(2) {
width: 200px;
margin-left: 10px;
}
}
}
}
}
> div:nth-child(2) { > div:nth-child(2) {
height: 83%; height: 83%;
padding-top: 10px; padding-top: 10px;
@ -19,22 +36,6 @@
> div:nth-child(3) { > div:nth-child(3) {
padding-top: 10px; padding-top: 10px;
} }
> div:nth-child(1) {
> div:nth-child(1) {
display: flex;
> span:nth-child(2) {
width: 200px;
margin-left: 10px;
}
}
> div:nth-child(2) {
display: flex;
justify-content: flex-end;
}
}
} }
/* empty-tasks icon */ /* empty-tasks icon */
@ -157,3 +158,11 @@
#cvat-create-task-button { #cvat-create-task-button {
padding: 0 30px; padding: 0 30px;
} }
#cvat-import-task-button {
padding: 0 30px;
}
#cvat-import-task-button-loading {
margin-left: 10;
}

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation // Copyright (C) 2020-2021 Intel Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -25,6 +25,8 @@ interface TasksPageProps {
numberOfHiddenTasks: number; numberOfHiddenTasks: number;
onGetTasks: (gettingQuery: TasksQuery) => void; onGetTasks: (gettingQuery: TasksQuery) => void;
hideEmptyTasks: (hideEmpty: boolean) => void; hideEmptyTasks: (hideEmpty: boolean) => void;
onImportTask: (file: File) => void;
taskImporting: boolean;
} }
function getSearchField(gettingQuery: TasksQuery): string { function getSearchField(gettingQuery: TasksQuery): string {
@ -81,9 +83,20 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
} }
public componentDidUpdate(prevProps: TasksPageProps & RouteComponentProps): void { public componentDidUpdate(prevProps: TasksPageProps & RouteComponentProps): void {
const { location, gettingQuery, tasksFetching, numberOfHiddenTasks, onGetTasks, hideEmptyTasks } = this.props; const {
location,
if (prevProps.location.search !== location.search) { gettingQuery,
tasksFetching,
numberOfHiddenTasks,
onGetTasks,
hideEmptyTasks,
taskImporting,
} = this.props;
if (
prevProps.location.search !== location.search ||
(prevProps.taskImporting === true && taskImporting === false)
) {
// get new tasks if any query changes // get new tasks if any query changes
const query = updateQuery(gettingQuery, location.search); const query = updateQuery(gettingQuery, location.search);
message.destroy(); message.destroy();
@ -186,7 +199,9 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
} }
public render(): JSX.Element { public render(): JSX.Element {
const { tasksFetching, gettingQuery, numberOfVisibleTasks } = this.props; const {
tasksFetching, gettingQuery, numberOfVisibleTasks, onImportTask, taskImporting,
} = this.props;
if (tasksFetching) { if (tasksFetching) {
return <Spin size='large' className='cvat-spinner' />; return <Spin size='large' className='cvat-spinner' />;
@ -194,7 +209,12 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
return ( return (
<div className='cvat-tasks-page'> <div className='cvat-tasks-page'>
<TopBar onSearch={this.handleSearch} searchValue={getSearchField(gettingQuery)} /> <TopBar
onSearch={this.handleSearch}
searchValue={getSearchField(gettingQuery)}
onFileUpload={onImportTask}
taskImporting={taskImporting}
/>
{numberOfVisibleTasks ? ( {numberOfVisibleTasks ? (
<TaskListContainer onSwitchPage={this.handlePagination} /> <TaskListContainer onSwitchPage={this.handlePagination} />
) : ( ) : (

@ -5,50 +5,83 @@
import React from 'react'; import React from 'react';
import { useHistory } from 'react-router'; import { useHistory } from 'react-router';
import { Row, Col } from 'antd/lib/grid'; import { Row, Col } from 'antd/lib/grid';
import { PlusOutlined } from '@ant-design/icons'; import { PlusOutlined, UploadOutlined, LoadingOutlined } from '@ant-design/icons';
import Button from 'antd/lib/button'; import Button from 'antd/lib/button';
import Input from 'antd/lib/input'; import Input from 'antd/lib/input';
import Text from 'antd/lib/typography/Text'; import Text from 'antd/lib/typography/Text';
import Upload from 'antd/lib/upload';
import SearchTooltip from 'components/search-tooltip/search-tooltip'; import SearchTooltip from 'components/search-tooltip/search-tooltip';
interface VisibleTopBarProps { interface VisibleTopBarProps {
onSearch: (value: string) => void; onSearch: (value: string) => void;
onFileUpload(file: File): void;
searchValue: string; searchValue: string;
taskImporting: boolean;
} }
export default function TopBarComponent(props: VisibleTopBarProps): JSX.Element { export default function TopBarComponent(props: VisibleTopBarProps): JSX.Element {
const { searchValue, onSearch } = props; const {
searchValue, onSearch, onFileUpload, taskImporting,
} = props;
const history = useHistory(); const history = useHistory();
return ( return (
<> <Row className='cvat-tasks-page-top-bar' justify='center' align='middle'>
<Row justify='center' align='middle'> <Col md={22} lg={18} xl={16} xxl={14}>
<Col md={11} lg={9} xl={8} xxl={7}> <Row justify='space-between' align='bottom'>
<Text className='cvat-title'>Tasks</Text> <Col>
<SearchTooltip instance='task'> <Text className='cvat-title'>Tasks</Text>
<Input.Search <SearchTooltip instance='task'>
className='cvat-task-page-search-task' <Input.Search
defaultValue={searchValue} className='cvat-task-page-search-task'
onSearch={onSearch} defaultValue={searchValue}
size='large' onSearch={onSearch}
placeholder='Search' size='large'
/> placeholder='Search'
</SearchTooltip> />
</Col> </SearchTooltip>
<Col md={{ span: 11 }} lg={{ span: 9 }} xl={{ span: 8 }} xxl={{ span: 7 }}> </Col>
<Button <Col>
size='large' <Row gutter={8}>
id='cvat-create-task-button' <Col>
type='primary' <Upload
onClick={(): void => history.push('/tasks/create')} accept='.zip'
icon={<PlusOutlined />} multiple={false}
> showUploadList={false}
Create new task beforeUpload={(file: File): boolean => {
</Button> onFileUpload(file);
</Col> return false;
</Row> }}
</> >
<Button
size='large'
id='cvat-import-task-button'
type='primary'
disabled={taskImporting}
icon={<UploadOutlined />}
>
Import Task
{taskImporting && <LoadingOutlined id='cvat-import-task-button-loading' />}
</Button>
</Upload>
</Col>
<Col>
<Button
size='large'
id='cvat-create-task-button'
type='primary'
onClick={(): void => history.push('/tasks/create')}
icon={<PlusOutlined />}
>
Create new task
</Button>
</Col>
</Row>
</Col>
</Row>
</Col>
</Row>
); );
} }

@ -16,6 +16,7 @@ import {
loadAnnotationsAsync, loadAnnotationsAsync,
exportDatasetAsync, exportDatasetAsync,
deleteTaskAsync, deleteTaskAsync,
exportTaskAsync,
switchMoveTaskModalVisible, switchMoveTaskModalVisible,
} from 'actions/tasks-actions'; } from 'actions/tasks-actions';
@ -29,6 +30,7 @@ interface StateToProps {
dumpActivities: string[] | null; dumpActivities: string[] | null;
exportActivities: string[] | null; exportActivities: string[] | null;
inferenceIsActive: boolean; inferenceIsActive: boolean;
exportIsActive: boolean;
} }
interface DispatchToProps { interface DispatchToProps {
@ -37,6 +39,7 @@ interface DispatchToProps {
exportDataset: (taskInstance: any, exporter: any) => void; exportDataset: (taskInstance: any, exporter: any) => void;
deleteTask: (taskInstance: any) => void; deleteTask: (taskInstance: any) => void;
openRunModelWindow: (taskInstance: any) => void; openRunModelWindow: (taskInstance: any) => void;
exportTask: (taskInstance: any) => void;
openMoveTaskToProjectWindow: (taskInstance: any) => void; openMoveTaskToProjectWindow: (taskInstance: any) => void;
} }
@ -48,7 +51,9 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const { const {
formats: { annotationFormats }, formats: { annotationFormats },
tasks: { tasks: {
activities: { dumps, loads, exports: activeExports }, activities: {
dumps, loads, exports: activeExports, backups,
},
}, },
} = state; } = state;
@ -58,6 +63,7 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
loadActivity: tid in loads ? loads[tid] : null, loadActivity: tid in loads ? loads[tid] : null,
annotationFormats, annotationFormats,
inferenceIsActive: tid in state.models.inferences, inferenceIsActive: tid in state.models.inferences,
exportIsActive: tid in backups,
}; };
} }
@ -78,6 +84,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
openRunModelWindow: (taskInstance: any): void => { openRunModelWindow: (taskInstance: any): void => {
dispatch(modelsActions.showRunModelDialog(taskInstance)); dispatch(modelsActions.showRunModelDialog(taskInstance));
}, },
exportTask: (taskInstance: any): void => {
dispatch(exportTaskAsync(taskInstance));
},
openMoveTaskToProjectWindow: (taskId: number): void => { openMoveTaskToProjectWindow: (taskId: number): void => {
dispatch(switchMoveTaskModalVisible(true, taskId)); dispatch(switchMoveTaskModalVisible(true, taskId));
}, },
@ -92,12 +101,14 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
dumpActivities, dumpActivities,
exportActivities, exportActivities,
inferenceIsActive, inferenceIsActive,
exportIsActive,
loadAnnotations, loadAnnotations,
dumpAnnotations, dumpAnnotations,
exportDataset, exportDataset,
deleteTask, deleteTask,
openRunModelWindow, openRunModelWindow,
exportTask,
openMoveTaskToProjectWindow, openMoveTaskToProjectWindow,
} = props; } = props;
@ -131,6 +142,8 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
window.open(`${taskInstance.bugTracker}`, '_blank'); window.open(`${taskInstance.bugTracker}`, '_blank');
} else if (action === Actions.RUN_AUTO_ANNOTATION) { } else if (action === Actions.RUN_AUTO_ANNOTATION) {
openRunModelWindow(taskInstance); openRunModelWindow(taskInstance);
} else if (action === Actions.EXPORT_TASK) {
exportTask(taskInstance);
} else if (action === Actions.MOVE_TASK_TO_PROJECT) { } else if (action === Actions.MOVE_TASK_TO_PROJECT) {
openMoveTaskToProjectWindow(taskInstance.id); openMoveTaskToProjectWindow(taskInstance.id);
} }
@ -150,6 +163,7 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
inferenceIsActive={inferenceIsActive} inferenceIsActive={inferenceIsActive}
onClickMenu={onClickMenu} onClickMenu={onClickMenu}
taskDimension={taskInstance.dimension} taskDimension={taskInstance.dimension}
exportIsActive={exportIsActive}
/> />
); );
} }

@ -8,7 +8,7 @@ import { Task, TasksQuery, CombinedState } from 'reducers/interfaces';
import TasksPageComponent from 'components/tasks-page/tasks-page'; import TasksPageComponent from 'components/tasks-page/tasks-page';
import { getTasksAsync, hideEmptyTasks } from 'actions/tasks-actions'; import { getTasksAsync, hideEmptyTasks, importTaskAsync } from 'actions/tasks-actions';
interface StateToProps { interface StateToProps {
tasksFetching: boolean; tasksFetching: boolean;
@ -16,11 +16,13 @@ interface StateToProps {
numberOfTasks: number; numberOfTasks: number;
numberOfVisibleTasks: number; numberOfVisibleTasks: number;
numberOfHiddenTasks: number; numberOfHiddenTasks: number;
taskImporting: boolean;
} }
interface DispatchToProps { interface DispatchToProps {
onGetTasks: (gettingQuery: TasksQuery) => void; onGetTasks: (gettingQuery: TasksQuery) => void;
hideEmptyTasks: (hideEmpty: boolean) => void; hideEmptyTasks: (hideEmpty: boolean) => void;
onImportTask: (file: File) => void;
} }
function mapStateToProps(state: CombinedState): StateToProps { function mapStateToProps(state: CombinedState): StateToProps {
@ -34,6 +36,7 @@ function mapStateToProps(state: CombinedState): StateToProps {
numberOfHiddenTasks: tasks.hideEmpty ? numberOfHiddenTasks: tasks.hideEmpty ?
tasks.current.filter((task: Task): boolean => !task.instance.jobs.length).length : tasks.current.filter((task: Task): boolean => !task.instance.jobs.length).length :
0, 0,
taskImporting: state.tasks.importing,
}; };
} }
@ -45,6 +48,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
hideEmptyTasks: (hideEmpty: boolean): void => { hideEmptyTasks: (hideEmpty: boolean): void => {
dispatch(hideEmptyTasks(hideEmpty)); dispatch(hideEmptyTasks(hideEmpty));
}, },
onImportTask: (file: File): void => {
dispatch(importTaskAsync(file));
},
}; };
} }

@ -73,6 +73,7 @@ export interface Task {
} }
export interface TasksState { export interface TasksState {
importing: boolean;
initialized: boolean; initialized: boolean;
fetching: boolean; fetching: boolean;
updating: boolean; updating: boolean;
@ -105,6 +106,9 @@ export interface TasksState {
status: string; status: string;
error: string; error: string;
}; };
backups: {
[tid: number]: boolean;
};
}; };
} }
@ -249,9 +253,11 @@ export interface NotificationsState {
updating: null | ErrorState; updating: null | ErrorState;
dumping: null | ErrorState; dumping: null | ErrorState;
loading: null | ErrorState; loading: null | ErrorState;
exporting: null | ErrorState; exportingAsDataset: null | ErrorState;
deleting: null | ErrorState; deleting: null | ErrorState;
creating: null | ErrorState; creating: null | ErrorState;
exporting: null | ErrorState;
importing: null | ErrorState;
moving: null | ErrorState; moving: null | ErrorState;
}; };
formats: { formats: {
@ -318,6 +324,7 @@ export interface NotificationsState {
messages: { messages: {
tasks: { tasks: {
loadingDone: string; loadingDone: string;
importingDone: string;
movingDone: string; movingDone: string;
}; };
models: { models: {

@ -42,9 +42,11 @@ const defaultState: NotificationsState = {
updating: null, updating: null,
dumping: null, dumping: null,
loading: null, loading: null,
exporting: null, exportingAsDataset: null,
deleting: null, deleting: null,
creating: null, creating: null,
exporting: null,
importing: null,
moving: null, moving: null,
}, },
formats: { formats: {
@ -111,6 +113,7 @@ const defaultState: NotificationsState = {
messages: { messages: {
tasks: { tasks: {
loadingDone: '', loadingDone: '',
importingDone: '',
movingDone: '', movingDone: '',
}, },
models: { models: {
@ -313,7 +316,7 @@ export default function (state = defaultState, action: AnyAction): Notifications
...state.errors, ...state.errors,
tasks: { tasks: {
...state.errors.tasks, ...state.errors.tasks,
exporting: { exportingAsDataset: {
message: message:
'Could not export dataset for the ' + 'Could not export dataset for the ' +
`<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a>`, `<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a>`,
@ -389,24 +392,6 @@ export default function (state = defaultState, action: AnyAction): Notifications
}, },
}; };
} }
case TasksActionTypes.MOVE_TASK_TO_PROJECT_FAILED: {
const taskID = action.payload.task.id;
return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
moving: {
message:
'Could not move the' +
`<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a> to a project`,
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.DUMP_ANNOTATIONS_FAILED: { case TasksActionTypes.DUMP_ANNOTATIONS_FAILED: {
const taskID = action.payload.task.id; const taskID = action.payload.task.id;
return { return {
@ -460,16 +445,45 @@ export default function (state = defaultState, action: AnyAction): Notifications
}, },
}; };
} }
case TasksActionTypes.MOVE_TASK_TO_PROJECT_SUCCESS: { case TasksActionTypes.EXPORT_TASK_FAILED: {
const { id: taskId, projectId } = action.payload.task; return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
exporting: {
message: 'Could not export the task',
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.IMPORT_TASK_FAILED: {
return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
importing: {
message: 'Could not import the task',
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.IMPORT_TASK_SUCCESS: {
const taskID = action.payload.task.id;
return { return {
...state, ...state,
messages: { messages: {
...state.messages, ...state.messages,
tasks: { tasks: {
...state.messages.tasks, ...state.messages.tasks,
movingDone: `The task #${taskId} has been successfully moved to the project #${projectId}`, importingDone: `Task has been imported succesfully <a href="/tasks/${taskID}">Open task</a>`,
}, },
}, },
}; };

@ -3,6 +3,7 @@
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
import { AnyAction } from 'redux'; import { AnyAction } from 'redux';
import { omit } from 'lodash';
import { BoundariesActionTypes } from 'actions/boundaries-actions'; import { BoundariesActionTypes } from 'actions/boundaries-actions';
import { TasksActionTypes } from 'actions/tasks-actions'; import { TasksActionTypes } from 'actions/tasks-actions';
import { AuthActionTypes } from 'actions/auth-actions'; import { AuthActionTypes } from 'actions/auth-actions';
@ -40,7 +41,9 @@ const defaultState: TasksState = {
status: '', status: '',
error: '', error: '',
}, },
backups: {},
}, },
importing: false,
}; };
export default (state: TasksState = defaultState, action: AnyAction): TasksState => { export default (state: TasksState = defaultState, action: AnyAction): TasksState => {
@ -242,6 +245,49 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
}, },
}; };
} }
case TasksActionTypes.EXPORT_TASK: {
const { taskID } = action.payload;
const { backups } = state.activities;
return {
...state,
activities: {
...state.activities,
backups: {
...backups,
...Object.fromEntries([[taskID, true]]),
},
},
};
}
case TasksActionTypes.EXPORT_TASK_FAILED:
case TasksActionTypes.EXPORT_TASK_SUCCESS: {
const { taskID } = action.payload;
const { backups } = state.activities;
delete backups[taskID];
return {
...state,
activities: {
...state.activities,
backups: omit(backups, [taskID]),
},
};
}
case TasksActionTypes.IMPORT_TASK: {
return {
...state,
importing: true,
};
}
case TasksActionTypes.IMPORT_TASK_FAILED:
case TasksActionTypes.IMPORT_TASK_SUCCESS: {
return {
...state,
importing: false,
};
}
case TasksActionTypes.CREATE_TASK: { case TasksActionTypes.CREATE_TASK: {
return { return {
...state, ...state,

@ -8,18 +8,18 @@ import tempfile
from datetime import timedelta from datetime import timedelta
import django_rq import django_rq
from datumaro.cli.util import make_file_name
from datumaro.util import to_snake_case
from django.utils import timezone from django.utils import timezone
import cvat.apps.dataset_manager.task as task import cvat.apps.dataset_manager.task as task
from cvat.apps.engine.backup import TaskExporter
from cvat.apps.engine.log import slogger from cvat.apps.engine.log import slogger
from cvat.apps.engine.models import Task from cvat.apps.engine.models import Task
from datumaro.cli.util import make_file_name
from datumaro.util import to_snake_case
from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS
from .util import current_function_name from .util import current_function_name
_MODULE_NAME = __package__ + '.' + osp.splitext(osp.basename(__file__))[0] _MODULE_NAME = __package__ + '.' + osp.splitext(osp.basename(__file__))[0]
def log_exception(logger=None, exc_info=True): def log_exception(logger=None, exc_info=True):
if logger is None: if logger is None:
@ -97,6 +97,40 @@ def clear_export_cache(task_id, file_path, file_ctime):
log_exception(slogger.task[task_id]) log_exception(slogger.task[task_id])
raise raise
def backup_task(task_id, output_path):
try:
db_task = Task.objects.get(pk=task_id)
cache_dir = get_export_cache_dir(db_task)
output_path = osp.join(cache_dir, output_path)
task_time = timezone.localtime(db_task.updated_date).timestamp()
if not (osp.exists(output_path) and \
task_time <= osp.getmtime(output_path)):
os.makedirs(cache_dir, exist_ok=True)
with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:
temp_file = osp.join(temp_dir, 'dump')
task_exporter = TaskExporter(task_id)
task_exporter.export_to(temp_file)
os.replace(temp_file, output_path)
archive_ctime = osp.getctime(output_path)
scheduler = django_rq.get_scheduler()
cleaning_job = scheduler.enqueue_in(time_delta=CACHE_TTL,
func=clear_export_cache,
task_id=task_id,
file_path=output_path, file_ctime=archive_ctime)
slogger.task[task_id].info(
"The task '{}' is backuped at '{}' "
"and available for downloading for the next {}. "
"Export cache cleaning job is enqueued, id '{}'".format(
db_task.name, output_path, CACHE_TTL,
cleaning_job.id))
return output_path
except Exception:
log_exception(slogger.task[task_id])
raise
def get_export_formats(): def get_export_formats():
return list(EXPORT_FORMATS.values()) return list(EXPORT_FORMATS.values())
@ -108,4 +142,4 @@ def get_all_formats():
return { return {
'importers': get_import_formats(), 'importers': get_import_formats(),
'exporters': get_export_formats(), 'exporters': get_export_formats(),
} }

@ -0,0 +1,54 @@
## Task and Project Import/Export functionality
This document describes the high-level design for implementing import / export implementation for tasks and projects.
API endpoints:
- Import task
- endpoint: `/api/v1/tasks?action=import`
- method: `POST`
- Content-Type: `multipart/form-data`
- returns: json
- Export task
- endpoint: `/api/v1/tasks/{id}?action=export`
- method: `GET`
- returns: zip archive
The zip archive has the following structure:
```
.
├── data
│   ├── {user uploaded data}
│   ├── manifest.jsonl
├── task.json
└── annotations.json
```
- Import project
- endpoint: `/api/v1/projects?action=import`
- method: `POST`
- Content-Type: `multipart/form-data`
- returns: json
- Export project
- endpoint: `/api/v1/projects/<id>?action=export`
- method: `GET`
- returns: zip archive
The zip archive has the following structure:
```
.
├── tasks
│   ├── task_1
│   ├── task_2
│   ├── ...
│ └── task_N
└── project.json
```

@ -0,0 +1,550 @@
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import io
import os
from enum import Enum
import shutil
from zipfile import ZipFile
from django.conf import settings
from django.db import transaction
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
import cvat.apps.dataset_manager as dm
from cvat.apps.engine import models
from cvat.apps.engine.log import slogger
from cvat.apps.engine.serializers import (AttributeSerializer, DataSerializer,
LabeledDataSerializer, SegmentSerializer, SimpleJobSerializer, TaskSerializer,
ReviewSerializer, IssueSerializer, CommentSerializer)
from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.models import StorageChoice, StorageMethodChoice, DataChoice
from cvat.apps.engine.task import _create_thread
class Version(Enum):
V1 = '1.0'
class _TaskBackupBase():
MANIFEST_FILENAME = 'task.json'
ANNOTATIONS_FILENAME = 'annotations.json'
DATA_DIRNAME = 'data'
TASK_DIRNAME = 'task'
def _prepare_meta(self, allowed_keys, meta):
keys_to_drop = set(meta.keys()) - allowed_keys
if keys_to_drop:
logger = slogger.task[self._db_task.id] if hasattr(self, '_db_task') else slogger.glob
logger.warning('the following keys are dropped {}'.format(keys_to_drop))
for key in keys_to_drop:
del meta[key]
return meta
def _prepare_task_meta(self, task):
allowed_fields = {
'name',
'bug_tracker',
'status',
'subset',
'labels',
}
return self._prepare_meta(allowed_fields, task)
def _prepare_data_meta(self, data):
allowed_fields = {
'chunk_size',
'image_quality',
'start_frame',
'stop_frame',
'frame_filter',
'chunk_type',
'storage_method',
'storage',
}
self._prepare_meta(allowed_fields, data)
if 'frame_filter' in data and not data['frame_filter']:
data.pop('frame_filter')
return data
def _prepare_job_meta(self, job):
allowed_fields = {
'status',
}
return self._prepare_meta(allowed_fields, job)
def _prepare_attribute_meta(self, attribute):
allowed_fields = {
'name',
'mutable',
'input_type',
'default_value',
'values',
}
return self._prepare_meta(allowed_fields, attribute)
def _prepare_label_meta(self, labels):
allowed_fields = {
'name',
'color',
'attributes',
}
return self._prepare_meta(allowed_fields, labels)
def _prepare_annotations(self, annotations, label_mapping):
allowed_fields = {
'label',
'label_id',
'type',
'occluded',
'outside',
'z_order',
'points',
'frame',
'group',
'source',
'attributes',
'shapes',
}
def _update_attribute(attribute, label):
if 'name' in attribute:
source, dest = attribute.pop('name'), 'spec_id'
else:
source, dest = attribute.pop('spec_id'), 'name'
attribute[dest] = label_mapping[label]['attributes'][source]
def _update_label(shape):
if 'label_id' in shape:
source, dest = shape.pop('label_id'), 'label'
elif 'label' in shape:
source, dest = shape.pop('label'), 'label_id'
shape[dest] = label_mapping[source]['value']
return source
for tag in annotations['tags']:
label = _update_label(tag)
for attr in tag['attributes']:
_update_attribute(attr, label)
self._prepare_meta(allowed_fields, tag)
for shape in annotations['shapes']:
label = _update_label(shape)
for attr in shape['attributes']:
_update_attribute(attr, label)
self._prepare_meta(allowed_fields, shape)
for track in annotations['tracks']:
label = _update_label(track)
for shape in track['shapes']:
for attr in shape['attributes']:
_update_attribute(attr, label)
self._prepare_meta(allowed_fields, shape)
for attr in track['attributes']:
_update_attribute(attr, label)
self._prepare_meta(allowed_fields, track)
return annotations
def _prepare_review_meta(self, review):
allowed_fields = {
'estimated_quality',
'status',
'issues',
}
return self._prepare_meta(allowed_fields, review)
def _prepare_issue_meta(self, issue):
allowed_fields = {
'frame',
'position',
'created_date',
'resolved_date',
'comments',
}
return self._prepare_meta(allowed_fields, issue)
def _prepare_comment_meta(self, comment):
allowed_fields = {
'message',
'created_date',
'updated_date',
}
return self._prepare_meta(allowed_fields, comment)
def _get_db_jobs(self):
if self._db_task:
db_segments = list(self._db_task.segment_set.all().prefetch_related('job_set'))
db_segments.sort(key=lambda i: i.job_set.first().id)
db_jobs = (s.job_set.first() for s in db_segments)
return db_jobs
return ()
class TaskExporter(_TaskBackupBase):
def __init__(self, pk, version=Version.V1):
self._db_task = models.Task.objects.prefetch_related('data__images').select_related('data__video').get(pk=pk)
self._db_data = self._db_task.data
self._version = version
db_labels = (self._db_task.project if self._db_task.project_id else self._db_task).label_set.all().prefetch_related(
'attributespec_set')
self._label_mapping = {}
self._label_mapping = {db_label.id: db_label.name for db_label in db_labels}
self._attribute_mapping = {}
for db_label in db_labels:
self._label_mapping[db_label.id] = {
'value': db_label.name,
'attributes': {},
}
for db_attribute in db_label.attributespec_set.all():
self._label_mapping[db_label.id]['attributes'][db_attribute.id] = db_attribute.name
def _write_files(self, source_dir, zip_object, files, target_dir):
for filename in files:
arcname = os.path.normpath(
os.path.join(
target_dir,
os.path.relpath(filename, source_dir),
)
)
zip_object.write(filename=filename, arcname=arcname)
def _write_directory(self, source_dir, zip_object, target_dir, recursive=True, exclude_files=None):
for root, dirs, files in os.walk(source_dir, topdown=True):
if not recursive:
dirs.clear()
if files:
self._write_files(
source_dir=source_dir,
zip_object=zip_object,
files=(os.path.join(root, f) for f in files if not exclude_files or f not in exclude_files),
target_dir=target_dir,
)
def _write_data(self, zip_object):
if self._db_data.storage == StorageChoice.LOCAL:
self._write_directory(
source_dir=self._db_data.get_upload_dirname(),
zip_object=zip_object,
target_dir=self.DATA_DIRNAME,
)
elif self._db_data.storage == StorageChoice.SHARE:
data_dir = settings.SHARE_ROOT
if hasattr(self._db_data, 'video'):
media_files = (os.path.join(data_dir, self._db_data.video.path), )
else:
media_files = (os.path.join(data_dir, im.path) for im in self._db_data.images.all().order_by('frame'))
self._write_files(
source_dir=data_dir,
zip_object=zip_object,
files=media_files,
target_dir=self.DATA_DIRNAME
)
upload_dir = self._db_data.get_upload_dirname()
self._write_files(
source_dir=upload_dir,
zip_object=zip_object,
files=(os.path.join(upload_dir, f) for f in ('manifest.jsonl',)),
target_dir=self.DATA_DIRNAME
)
else:
raise NotImplementedError()
def _write_task(self, zip_object):
task_dir = self._db_task.get_task_dirname()
self._write_directory(
source_dir=task_dir,
zip_object=zip_object,
target_dir=self.TASK_DIRNAME,
recursive=False,
)
def _write_manifest(self, zip_object):
def serialize_task():
task_serializer = TaskSerializer(self._db_task)
task_serializer.fields.pop('url')
task_serializer.fields.pop('owner')
task_serializer.fields.pop('assignee')
task_serializer.fields.pop('segments')
task = self._prepare_task_meta(task_serializer.data)
task['labels'] = [self._prepare_label_meta(l) for l in task['labels']]
for label in task['labels']:
label['attributes'] = [self._prepare_attribute_meta(a) for a in label['attributes']]
return task
def serialize_comment(db_comment):
comment_serializer = CommentSerializer(db_comment)
comment_serializer.fields.pop('author')
return self._prepare_comment_meta(comment_serializer.data)
def serialize_issue(db_issue):
issue_serializer = IssueSerializer(db_issue)
issue_serializer.fields.pop('owner')
issue_serializer.fields.pop('resolver')
issue = issue_serializer.data
issue['comments'] = (serialize_comment(c) for c in db_issue.comment_set.order_by('id'))
return self._prepare_issue_meta(issue)
def serialize_review(db_review):
review_serializer = ReviewSerializer(db_review)
review_serializer.fields.pop('reviewer')
review_serializer.fields.pop('assignee')
review = review_serializer.data
review['issues'] = (serialize_issue(i) for i in db_review.issue_set.order_by('id'))
return self._prepare_review_meta(review)
def serialize_segment(db_segment):
db_job = db_segment.job_set.first()
job_serializer = SimpleJobSerializer(db_job)
job_serializer.fields.pop('url')
job_serializer.fields.pop('assignee')
job_serializer.fields.pop('reviewer')
job_data = self._prepare_job_meta(job_serializer.data)
segment_serailizer = SegmentSerializer(db_segment)
segment_serailizer.fields.pop('jobs')
segment = segment_serailizer.data
segment.update(job_data)
db_reviews = db_job.review_set.order_by('id')
segment['reviews'] = (serialize_review(r) for r in db_reviews)
return segment
def serialize_jobs():
db_segments = list(self._db_task.segment_set.all())
db_segments.sort(key=lambda i: i.job_set.first().id)
return (serialize_segment(s) for s in db_segments)
def serialize_data():
data_serializer = DataSerializer(self._db_data)
data = data_serializer.data
data['chunk_type'] = data.pop('compressed_chunk_type')
return self._prepare_data_meta(data)
task = serialize_task()
task['version'] = self._version.value
task['data'] = serialize_data()
task['jobs'] = serialize_jobs()
zip_object.writestr(self.MANIFEST_FILENAME, data=JSONRenderer().render(task))
def _write_annotations(self, zip_object):
def serialize_annotations():
job_annotations = []
db_jobs = self._get_db_jobs()
db_job_ids = (j.id for j in db_jobs)
for db_job_id in db_job_ids:
annotations = dm.task.get_job_data(db_job_id)
annotations_serializer = LabeledDataSerializer(data=annotations)
annotations_serializer.is_valid(raise_exception=True)
job_annotations.append(self._prepare_annotations(annotations_serializer.data, self._label_mapping))
return job_annotations
annotations = serialize_annotations()
zip_object.writestr(self.ANNOTATIONS_FILENAME, data=JSONRenderer().render(annotations))
def export_to(self, filename):
if self._db_task.data.storage_method == StorageMethodChoice.FILE_SYSTEM and \
self._db_task.data.storage == StorageChoice.SHARE:
raise Exception('The task cannot be exported because it does not contain any raw data')
with ZipFile(filename, 'w') as output_file:
self._write_data(output_file)
self._write_task(output_file)
self._write_manifest(output_file)
self._write_annotations(output_file)
class TaskImporter(_TaskBackupBase):
def __init__(self, filename, user_id):
self._filename = filename
self._user_id = user_id
self._manifest, self._annotations = self._read_meta()
self._version = self._read_version()
self._labels_mapping = {}
self._db_task = None
def _read_meta(self):
with ZipFile(self._filename, 'r') as input_file:
manifest = JSONParser().parse(io.BytesIO(input_file.read(self.MANIFEST_FILENAME)))
annotations = JSONParser().parse(io.BytesIO(input_file.read(self.ANNOTATIONS_FILENAME)))
return manifest, annotations
def _read_version(self):
version = self._manifest.pop('version')
try:
return Version(version)
except ValueError:
raise ValueError('{} version is not supported'.format(version))
@staticmethod
def _prepare_dirs(filepath):
target_dir = os.path.dirname(filepath)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def _create_labels(self, db_task, labels):
label_mapping = {}
for label in labels:
label_name = label['name']
attributes = label.pop('attributes', [])
db_label = models.Label.objects.create(task=db_task, **label)
label_mapping[label_name] = {
'value': db_label.id,
'attributes': {},
}
for attribute in attributes:
attribute_name = attribute['name']
attribute_serializer = AttributeSerializer(data=attribute)
attribute_serializer.is_valid(raise_exception=True)
db_attribute = attribute_serializer.save(label=db_label)
label_mapping[label_name]['attributes'][attribute_name] = db_attribute.id
return label_mapping
def _create_annotations(self, db_job, annotations):
self._prepare_annotations(annotations, self._labels_mapping)
serializer = LabeledDataSerializer(data=annotations)
serializer.is_valid(raise_exception=True)
dm.task.put_job_data(db_job.id, serializer.data)
@staticmethod
def _calculate_segment_size(jobs):
segment_size = jobs[0]['stop_frame'] - jobs[0]['start_frame'] + 1
overlap = 0 if len(jobs) == 1 else jobs[0]['stop_frame'] - jobs[1]['start_frame'] + 1
return segment_size, overlap
def _import_task(self):
def _create_comment(comment, db_issue):
comment['issue'] = db_issue.id
comment_serializer = CommentSerializer(data=comment)
comment_serializer.is_valid(raise_exception=True)
db_comment = comment_serializer.save()
return db_comment
def _create_issue(issue, db_review, db_job):
issue['review'] = db_review.id
issue['job'] = db_job.id
comments = issue.pop('comments')
issue_serializer = IssueSerializer(data=issue)
issue_serializer.is_valid( raise_exception=True)
db_issue = issue_serializer.save()
for comment in comments:
_create_comment(comment, db_issue)
return db_issue
def _create_review(review, db_job):
review['job'] = db_job.id
issues = review.pop('issues')
review_serializer = ReviewSerializer(data=review)
review_serializer.is_valid(raise_exception=True)
db_review = review_serializer.save()
for issue in issues:
_create_issue(issue, db_review, db_job)
return db_review
data = self._manifest.pop('data')
labels = self._manifest.pop('labels')
jobs = self._manifest.pop('jobs')
self._prepare_task_meta(self._manifest)
self._manifest['segment_size'], self._manifest['overlap'] = self._calculate_segment_size(jobs)
self._manifest["owner_id"] = self._user_id
self._db_task = models.Task.objects.create(**self._manifest)
task_path = self._db_task.get_task_dirname()
if os.path.isdir(task_path):
shutil.rmtree(task_path)
os.makedirs(self._db_task.get_task_logs_dirname())
os.makedirs(self._db_task.get_task_artifacts_dirname())
self._labels_mapping = self._create_labels(self._db_task, labels)
self._prepare_data_meta(data)
data_serializer = DataSerializer(data=data)
data_serializer.is_valid(raise_exception=True)
db_data = data_serializer.save()
self._db_task.data = db_data
self._db_task.save()
data_path = self._db_task.data.get_upload_dirname()
uploaded_files = []
with ZipFile(self._filename, 'r') as input_file:
for f in input_file.namelist():
if f.startswith(self.DATA_DIRNAME + os.path.sep):
target_file = os.path.join(data_path, os.path.relpath(f, self.DATA_DIRNAME))
self._prepare_dirs(target_file)
with open(target_file, "wb") as out:
out.write(input_file.read(f))
uploaded_files.append(os.path.relpath(f, self.DATA_DIRNAME))
elif f.startswith(self.TASK_DIRNAME + os.path.sep):
target_file = os.path.join(task_path, os.path.relpath(f, self.TASK_DIRNAME))
self._prepare_dirs(target_file)
with open(target_file, "wb") as out:
out.write(input_file.read(f))
data['use_zip_chunks'] = data.pop('chunk_type') == DataChoice.IMAGESET
data = data_serializer.data
data['client_files'] = uploaded_files
_create_thread(self._db_task.pk, data.copy(), True)
db_data.start_frame = data['start_frame']
db_data.stop_frame = data['stop_frame']
db_data.frame_filter = data['frame_filter']
db_data.storage = StorageChoice.LOCAL
db_data.save(update_fields=['start_frame', 'stop_frame', 'frame_filter', 'storage'])
for db_job, job in zip(self._get_db_jobs(), jobs):
db_job.status = job['status']
db_job.save()
for review in job['reviews']:
_create_review(review, db_job)
def _import_annotations(self):
db_jobs = self._get_db_jobs()
for db_job, annotations in zip(db_jobs, self._annotations):
self._create_annotations(db_job, annotations)
def import_task(self):
self._import_task()
self._import_annotations()
return self._db_task
@transaction.atomic
def import_task(filename, user):
av_scan_paths(filename)
task_importer = TaskImporter(filename, user)
db_task = task_importer.import_task()
return db_task.id

@ -48,11 +48,12 @@ def files_to_ignore(directory):
return False return False
class IMediaReader(ABC): class IMediaReader(ABC):
def __init__(self, source_path, step, start, stop): def __init__(self, source_path, step, start, stop, dimension):
self._source_path = sorted(source_path) self._source_path = sorted(source_path)
self._step = step self._step = step
self._start = start self._start = start
self._stop = stop self._stop = stop
self._dimension = dimension
@abstractmethod @abstractmethod
def __iter__(self): def __iter__(self):
@ -89,7 +90,7 @@ class IMediaReader(ABC):
return range(self._start, self._stop, self._step) return range(self._start, self._stop, self._step)
class ImageListReader(IMediaReader): class ImageListReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None): def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path: if not source_path:
raise Exception('No image found') raise Exception('No image found')
@ -105,6 +106,7 @@ class ImageListReader(IMediaReader):
step=step, step=step,
start=start, start=start,
stop=stop, stop=stop,
dimension=dimension
) )
def __iter__(self): def __iter__(self):
@ -113,7 +115,14 @@ class ImageListReader(IMediaReader):
def filter(self, callback): def filter(self, callback):
source_path = list(filter(callback, self._source_path)) source_path = list(filter(callback, self._source_path))
ImageListReader.__init__(self, source_path, step=self._step, start=self._start, stop=self._stop) ImageListReader.__init__(
self,
source_path,
step=self._step,
start=self._start,
stop=self._stop,
dimension=self._dimension
)
def get_path(self, i): def get_path(self, i):
return self._source_path[i] return self._source_path[i]
@ -125,19 +134,36 @@ class ImageListReader(IMediaReader):
return (pos - self._start + 1) / (self._stop - self._start) return (pos - self._start + 1) / (self._stop - self._start)
def get_preview(self): def get_preview(self):
fp = open(self._source_path[0], "rb") if self._dimension == DimensionType.DIM_3D:
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
else:
fp = open(self._source_path[0], "rb")
return self._get_preview(fp) return self._get_preview(fp)
def get_image_size(self, i): def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(self._source_path[i]) img = Image.open(self._source_path[i])
return img.width, img.height return img.width, img.height
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
# FIXME
ImageListReader.__init__(self,
source_path=source_files,
step=step,
start=start,
stop=stop
)
self._dimension = dimension
@property @property
def absolute_source_paths(self): def absolute_source_paths(self):
return [self.get_path(idx) for idx, _ in enumerate(self._source_path)] return [self.get_path(idx) for idx, _ in enumerate(self._source_path)]
class DirectoryReader(ImageListReader): class DirectoryReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None): def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
image_paths = [] image_paths = []
for source in source_path: for source in source_path:
for root, _, files in os.walk(source): for root, _, files in os.walk(source):
@ -149,10 +175,11 @@ class DirectoryReader(ImageListReader):
step=step, step=step,
start=start, start=start,
stop=stop, stop=stop,
dimension=dimension,
) )
class ArchiveReader(DirectoryReader): class ArchiveReader(DirectoryReader):
def __init__(self, source_path, step=1, start=0, stop=None): def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._archive_source = source_path[0] self._archive_source = source_path[0]
extract_dir = source_path[1] if len(source_path) > 1 else os.path.dirname(source_path[0]) extract_dir = source_path[1] if len(source_path) > 1 else os.path.dirname(source_path[0])
Archive(self._archive_source).extractall(extract_dir) Archive(self._archive_source).extractall(extract_dir)
@ -163,10 +190,11 @@ class ArchiveReader(DirectoryReader):
step=step, step=step,
start=start, start=start,
stop=stop, stop=stop,
dimension=dimension
) )
class PdfReader(ImageListReader): class PdfReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None): def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path: if not source_path:
raise Exception('No PDF found') raise Exception('No PDF found')
@ -194,21 +222,22 @@ class PdfReader(ImageListReader):
step=step, step=step,
start=start, start=start,
stop=stop, stop=stop,
dimension=dimension,
) )
class ZipReader(ImageListReader): class ZipReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None): def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._dimension = DimensionType.DIM_2D self._zip_source = zipfile.ZipFile(source_path[0], mode='r')
self._zip_source = zipfile.ZipFile(source_path[0], mode='a')
self.extract_dir = source_path[1] if len(source_path) > 1 else None self.extract_dir = source_path[1] if len(source_path) > 1 else None
file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image'] file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image']
super().__init__(file_list, step=step, start=start, stop=stop) super().__init__(file_list, step=step, start=start, stop=stop, dimension=dimension)
def __del__(self): def __del__(self):
self._zip_source.close() self._zip_source.close()
def get_preview(self): def get_preview(self):
if self._dimension == DimensionType.DIM_3D: if self._dimension == DimensionType.DIM_3D:
# TODO
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb") fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
return self._get_preview(fp) return self._get_preview(fp)
io_image = io.BytesIO(self._zip_source.read(self._source_path[0])) io_image = io.BytesIO(self._zip_source.read(self._source_path[0]))
@ -216,32 +245,20 @@ class ZipReader(ImageListReader):
def get_image_size(self, i): def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D: if self._dimension == DimensionType.DIM_3D:
with self._zip_source.open(self._source_path[i], "r") as file: with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(file) properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"]) return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i]))) img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i])))
return img.width, img.height return img.width, img.height
def get_image(self, i): def get_image(self, i):
if self._dimension == DimensionType.DIM_3D:
return self.get_path(i)
return io.BytesIO(self._zip_source.read(self._source_path[i])) return io.BytesIO(self._zip_source.read(self._source_path[i]))
def add_files(self, source_path):
root_path = os.path.split(self._zip_source.filename)[0]
for path in source_path:
self._zip_source.write(path, path.replace(root_path, ""))
def get_zip_filename(self): def get_zip_filename(self):
return self._zip_source.filename return self._zip_source.filename
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._dimension = dimension
super().__init__(
source_path=source_files,
step=step,
start=start,
stop=stop
)
def get_path(self, i): def get_path(self, i):
if self._zip_source.filename: if self._zip_source.filename:
return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \ return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \
@ -249,18 +266,28 @@ class ZipReader(ImageListReader):
else: # necessary for mime_type definition else: # necessary for mime_type definition
return self._source_path[i] return self._source_path[i]
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().reconcile(
source_files=source_files,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
def extract(self): def extract(self):
self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename)) self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename))
if not self.extract_dir: if not self.extract_dir:
os.remove(self._zip_source.filename) os.remove(self._zip_source.filename)
class VideoReader(IMediaReader): class VideoReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None): def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().__init__( super().__init__(
source_path=source_path, source_path=source_path,
step=step, step=step,
start=start, start=start,
stop=stop + 1 if stop is not None else stop, stop=stop + 1 if stop is not None else stop,
dimension=dimension,
) )
def _has_frame(self, i): def _has_frame(self, i):
@ -743,15 +770,15 @@ class ValidateDimension:
pcd_files = {} pcd_files = {}
for file in files: for file in files:
file_name, file_extension = file.rsplit('.', maxsplit=1) file_name, file_extension = os.path.splitext(file)
file_path = os.path.abspath(os.path.join(root, file)) file_path = os.path.abspath(os.path.join(root, file))
if file_extension == "bin": if file_extension == ".bin":
path = self.bin_operation(file_path, actual_path) path = self.bin_operation(file_path, actual_path)
pcd_files[file_name] = path pcd_files[file_name] = path
self.related_files[path] = [] self.related_files[path] = []
elif file_extension == "pcd": elif file_extension == ".pcd":
path = ValidateDimension.pcd_operation(file_path, actual_path) path = ValidateDimension.pcd_operation(file_path, actual_path)
if path == file_path: if path == file_path:
self.image_files[file_name] = file_path self.image_files[file_name] = file_path
@ -759,7 +786,8 @@ class ValidateDimension:
pcd_files[file_name] = path pcd_files[file_name] = path
self.related_files[path] = [] self.related_files[path] = []
else: else:
self.image_files[file_name] = file_path if _is_image(file_path):
self.image_files[file_name] = file_path
return pcd_files return pcd_files
def validate(self): def validate(self):

@ -1,4 +1,4 @@
# Copyright (C) 2019 Intel Corporation # Copyright (C) 2019-2021 Intel Corporation
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -278,7 +278,7 @@ class DataSerializer(serializers.ModelSerializer):
model = models.Data model = models.Data
fields = ('chunk_size', 'size', 'image_quality', 'start_frame', 'stop_frame', 'frame_filter', fields = ('chunk_size', 'size', 'image_quality', 'start_frame', 'stop_frame', 'frame_filter',
'compressed_chunk_type', 'original_chunk_type', 'client_files', 'server_files', 'remote_files', 'use_zip_chunks', 'compressed_chunk_type', 'original_chunk_type', 'client_files', 'server_files', 'remote_files', 'use_zip_chunks',
'use_cache', 'copy_data') 'use_cache', 'copy_data', 'storage_method', 'storage')
# pylint: disable=no-self-use # pylint: disable=no-self-use
def validate_frame_filter(self, value): def validate_frame_filter(self, value):
@ -707,6 +707,9 @@ class LogEventSerializer(serializers.Serializer):
class AnnotationFileSerializer(serializers.Serializer): class AnnotationFileSerializer(serializers.Serializer):
annotation_file = serializers.FileField() annotation_file = serializers.FileField()
class TaskFileSerializer(serializers.Serializer):
task_file = serializers.FileField()
class ReviewSerializer(serializers.ModelSerializer): class ReviewSerializer(serializers.ModelSerializer):
assignee = BasicUserSerializer(allow_null=True, required=False) assignee = BasicUserSerializer(allow_null=True, required=False)
assignee_id = serializers.IntegerField(write_only=True, allow_null=True, required=False) assignee_id = serializers.IntegerField(write_only=True, allow_null=True, required=False)
@ -767,3 +770,10 @@ class CombinedReviewSerializer(ReviewSerializer):
models.Comment.objects.create(**comment) models.Comment.objects.create(**comment)
return db_review return db_review
class RelatedFileSerializer(serializers.ModelSerializer):
class Meta:
model = models.RelatedFile
fields = '__all__'
read_only_fields = ('path',)

@ -9,27 +9,25 @@ import sys
import rq import rq
import re import re
import shutil import shutil
from distutils.dir_util import copy_tree
from traceback import print_exception from traceback import print_exception
from urllib import parse as urlparse from urllib import parse as urlparse
from urllib import request as urlrequest from urllib import request as urlrequest
import requests import requests
import django_rq
from django.conf import settings
from django.db import transaction
from cvat.apps.engine.media_extractors import get_mime, MEDIA_TYPES, Mpeg4ChunkWriter, ZipChunkWriter, Mpeg4CompressedChunkWriter, ZipCompressedChunkWriter, ValidateDimension from cvat.apps.engine import models
from cvat.apps.engine.models import DataChoice, StorageMethodChoice, StorageChoice, RelatedFile from cvat.apps.engine.log import slogger
from cvat.apps.engine.media_extractors import (MEDIA_TYPES, Mpeg4ChunkWriter, Mpeg4CompressedChunkWriter,
ValidateDimension, ZipChunkWriter, ZipCompressedChunkWriter, get_mime)
from cvat.apps.engine.utils import av_scan_paths from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.models import DimensionType
from utils.dataset_manifest import ImageManifestManager, VideoManifestManager from utils.dataset_manifest import ImageManifestManager, VideoManifestManager
from utils.dataset_manifest.core import VideoManifestValidator from utils.dataset_manifest.core import VideoManifestValidator
from utils.dataset_manifest.utils import detect_related_images from utils.dataset_manifest.utils import detect_related_images
import django_rq
from django.conf import settings
from django.db import transaction
from distutils.dir_util import copy_tree
from . import models
from .log import slogger
############################# Low Level server API ############################# Low Level server API
def create(tid, data): def create(tid, data):
@ -41,12 +39,13 @@ def create(tid, data):
@transaction.atomic @transaction.atomic
def rq_handler(job, exc_type, exc_value, traceback): def rq_handler(job, exc_type, exc_value, traceback):
split = job.id.split('/') split = job.id.split('/')
tid = int(split[split.index('tasks') + 1]) tid = split[split.index('tasks') + 1]
try: try:
tid = int(tid)
db_task = models.Task.objects.select_for_update().get(pk=tid) db_task = models.Task.objects.select_for_update().get(pk=tid)
with open(db_task.get_log_path(), "wt") as log_file: with open(db_task.get_log_path(), "wt") as log_file:
print_exception(exc_type, exc_value, traceback, file=log_file) print_exception(exc_type, exc_value, traceback, file=log_file)
except models.Task.DoesNotExist: except (models.Task.DoesNotExist, ValueError):
pass # skip exceptions in the code pass # skip exceptions in the code
return False return False
@ -76,8 +75,9 @@ def _save_task_to_db(db_task):
segment_size = db_task.segment_size segment_size = db_task.segment_size
segment_step = segment_size segment_step = segment_size
if segment_size == 0: if segment_size == 0 or segment_size > db_task.data.size:
segment_size = db_task.data.size segment_size = db_task.data.size
db_task.segment_size = segment_size
# Segment step must be more than segment_size + overlap in single-segment tasks # Segment step must be more than segment_size + overlap in single-segment tasks
# Otherwise a task contains an extra segment # Otherwise a task contains an extra segment
@ -209,15 +209,15 @@ def _download_data(urls, upload_dir):
return list(local_files.keys()) return list(local_files.keys())
def _get_manifest_frame_indexer(start_frame=0, frame_step=1):
return lambda frame_id: start_frame + frame_id * frame_step
@transaction.atomic @transaction.atomic
def _create_thread(tid, data): def _create_thread(tid, data, isImport=False):
slogger.glob.info("create task #{}".format(tid)) slogger.glob.info("create task #{}".format(tid))
db_task = models.Task.objects.select_for_update().get(pk=tid) db_task = models.Task.objects.select_for_update().get(pk=tid)
db_data = db_task.data db_data = db_task.data
if db_task.data.size != 0:
raise NotImplementedError("Adding more data is not implemented")
upload_dir = db_data.get_upload_dirname() upload_dir = db_data.get_upload_dirname()
if data['remote_files']: if data['remote_files']:
@ -227,11 +227,11 @@ def _create_thread(tid, data):
media = _count_files(data, manifest_file) media = _count_files(data, manifest_file)
media, task_mode = _validate_data(media, manifest_file) media, task_mode = _validate_data(media, manifest_file)
if manifest_file: if manifest_file:
assert settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE, \ assert settings.USE_CACHE and db_data.storage_method == models.StorageMethodChoice.CACHE, \
"File with meta information can be uploaded if 'Use cache' option is also selected" "File with meta information can be uploaded if 'Use cache' option is also selected"
if data['server_files']: if data['server_files']:
if db_data.storage == StorageChoice.LOCAL: if db_data.storage == models.StorageChoice.LOCAL:
_copy_data_from_share(data['server_files'], upload_dir) _copy_data_from_share(data['server_files'], upload_dir)
else: else:
upload_dir = settings.SHARE_ROOT upload_dir = settings.SHARE_ROOT
@ -244,16 +244,23 @@ def _create_thread(tid, data):
db_images = [] db_images = []
extractor = None extractor = None
manifest_index = _get_manifest_frame_indexer()
for media_type, media_files in media.items(): for media_type, media_files in media.items():
if media_files: if media_files:
if extractor is not None: if extractor is not None:
raise Exception('Combined data types are not supported') raise Exception('Combined data types are not supported')
source_paths=[os.path.join(upload_dir, f) for f in media_files] source_paths=[os.path.join(upload_dir, f) for f in media_files]
if media_type in {'archive', 'zip'} and db_data.storage == StorageChoice.SHARE: if media_type in {'archive', 'zip'} and db_data.storage == models.StorageChoice.SHARE:
source_paths.append(db_data.get_upload_dirname()) source_paths.append(db_data.get_upload_dirname())
upload_dir = db_data.get_upload_dirname() upload_dir = db_data.get_upload_dirname()
db_data.storage = StorageChoice.LOCAL db_data.storage = models.StorageChoice.LOCAL
if isImport and media_type == 'image' and db_data.storage == models.StorageChoice.SHARE:
manifest_index = _get_manifest_frame_indexer(db_data.start_frame, db_data.get_frame_step())
db_data.start_frame = 0
data['stop_frame'] = None
db_data.frame_filter = ''
extractor = MEDIA_TYPES[media_type]['extractor']( extractor = MEDIA_TYPES[media_type]['extractor'](
source_path=source_paths, source_path=source_paths,
step=db_data.get_frame_step(), step=db_data.get_frame_step(),
@ -261,22 +268,27 @@ def _create_thread(tid, data):
stop=data['stop_frame'], stop=data['stop_frame'],
) )
validate_dimension = ValidateDimension() validate_dimension = ValidateDimension()
if extractor.__class__ == MEDIA_TYPES['zip']['extractor']: if isinstance(extractor, MEDIA_TYPES['zip']['extractor']):
extractor.extract() extractor.extract()
validate_dimension.set_path(os.path.split(extractor.get_zip_filename())[0])
if db_data.storage == models.StorageChoice.LOCAL or \
(db_data.storage == models.StorageChoice.SHARE and \
isinstance(extractor, MEDIA_TYPES['zip']['extractor'])):
validate_dimension.set_path(upload_dir)
validate_dimension.validate() validate_dimension.validate()
if validate_dimension.dimension == DimensionType.DIM_3D:
db_task.dimension = DimensionType.DIM_3D
extractor.reconcile( if validate_dimension.dimension == models.DimensionType.DIM_3D:
source_files=list(validate_dimension.related_files.keys()), db_task.dimension = models.DimensionType.DIM_3D
step=db_data.get_frame_step(),
start=db_data.start_frame, extractor.reconcile(
stop=data['stop_frame'], source_files=[os.path.join(upload_dir, f) for f in validate_dimension.related_files.keys()],
dimension=DimensionType.DIM_3D, step=db_data.get_frame_step(),
) start=db_data.start_frame,
extractor.add_files(validate_dimension.converted_files) stop=data['stop_frame'],
dimension=models.DimensionType.DIM_3D,
)
related_images = {} related_images = {}
if isinstance(extractor, MEDIA_TYPES['image']['extractor']): if isinstance(extractor, MEDIA_TYPES['image']['extractor']):
@ -301,8 +313,8 @@ def _create_thread(tid, data):
job.save_meta() job.save_meta()
update_progress.call_counter = (update_progress.call_counter + 1) % len(progress_animation) update_progress.call_counter = (update_progress.call_counter + 1) % len(progress_animation)
compressed_chunk_writer_class = Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter compressed_chunk_writer_class = Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == models.DataChoice.VIDEO else ZipCompressedChunkWriter
if db_data.original_chunk_type == DataChoice.VIDEO: if db_data.original_chunk_type == models.DataChoice.VIDEO:
original_chunk_writer_class = Mpeg4ChunkWriter original_chunk_writer_class = Mpeg4ChunkWriter
# Let's use QP=17 (that is 67 for 0-100 range) for the original chunks, which should be visually lossless or nearly so. # Let's use QP=17 (that is 67 for 0-100 range) for the original chunks, which should be visually lossless or nearly so.
# A lower value will significantly increase the chunk size with a slight increase of quality. # A lower value will significantly increase the chunk size with a slight increase of quality.
@ -312,7 +324,7 @@ def _create_thread(tid, data):
original_quality = 100 original_quality = 100
kwargs = {} kwargs = {}
if validate_dimension.dimension == DimensionType.DIM_3D: if validate_dimension.dimension == models.DimensionType.DIM_3D:
kwargs["dimension"] = validate_dimension.dimension kwargs["dimension"] = validate_dimension.dimension
compressed_chunk_writer = compressed_chunk_writer_class(db_data.image_quality, **kwargs) compressed_chunk_writer = compressed_chunk_writer_class(db_data.image_quality, **kwargs)
original_chunk_writer = original_chunk_writer_class(original_quality) original_chunk_writer = original_chunk_writer_class(original_quality)
@ -326,7 +338,6 @@ def _create_thread(tid, data):
else: else:
db_data.chunk_size = 36 db_data.chunk_size = 36
video_path = "" video_path = ""
video_size = (0, 0) video_size = (0, 0)
@ -334,7 +345,7 @@ def _create_thread(tid, data):
job.meta['status'] = msg job.meta['status'] = msg
job.save_meta() job.save_meta()
if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE: if settings.USE_CACHE and db_data.storage_method == models.StorageMethodChoice.CACHE:
for media_type, media_files in media.items(): for media_type, media_files in media.items():
if not media_files: if not media_files:
@ -392,7 +403,7 @@ def _create_thread(tid, data):
if data['stop_frame'] else all_frames, all_frames), db_data.get_frame_step())) if data['stop_frame'] else all_frames, all_frames), db_data.get_frame_step()))
video_path = os.path.join(upload_dir, media_files[0]) video_path = os.path.join(upload_dir, media_files[0])
except Exception as ex: except Exception as ex:
db_data.storage_method = StorageMethodChoice.FILE_SYSTEM db_data.storage_method = models.StorageMethodChoice.FILE_SYSTEM
if os.path.exists(db_data.get_manifest_path()): if os.path.exists(db_data.get_manifest_path()):
os.remove(db_data.get_manifest_path()) os.remove(db_data.get_manifest_path())
if os.path.exists(db_data.get_index_path()): if os.path.exists(db_data.get_index_path()):
@ -404,7 +415,7 @@ def _create_thread(tid, data):
db_data.size = len(extractor) db_data.size = len(extractor)
manifest = ImageManifestManager(db_data.get_manifest_path()) manifest = ImageManifestManager(db_data.get_manifest_path())
if not manifest_file: if not manifest_file:
if db_task.dimension == DimensionType.DIM_2D: if db_task.dimension == models.DimensionType.DIM_2D:
meta_info = manifest.prepare_meta( meta_info = manifest.prepare_meta(
sources=extractor.absolute_source_paths, sources=extractor.absolute_source_paths,
meta={ k: {'related_images': related_images[k] } for k in related_images }, meta={ k: {'related_images': related_images[k] } for k in related_images },
@ -428,8 +439,8 @@ def _create_thread(tid, data):
img_sizes = [] img_sizes = []
for _, frame_id in chunk_paths: for _, frame_id in chunk_paths:
properties = manifest[frame_id] properties = manifest[manifest_index(frame_id)]
if db_task.dimension == DimensionType.DIM_2D: if db_task.dimension == models.DimensionType.DIM_2D:
resolution = (properties['width'], properties['height']) resolution = (properties['width'], properties['height'])
else: else:
resolution = extractor.get_image_size(frame_id) resolution = extractor.get_image_size(frame_id)
@ -442,7 +453,7 @@ def _create_thread(tid, data):
for (path, frame), (w, h) in zip(chunk_paths, img_sizes) for (path, frame), (w, h) in zip(chunk_paths, img_sizes)
]) ])
if db_data.storage_method == StorageMethodChoice.FILE_SYSTEM or not settings.USE_CACHE: if db_data.storage_method == models.StorageMethodChoice.FILE_SYSTEM or not settings.USE_CACHE:
counter = itertools.count() counter = itertools.count()
generator = itertools.groupby(extractor, lambda x: next(counter) // db_data.chunk_size) generator = itertools.groupby(extractor, lambda x: next(counter) // db_data.chunk_size)
for chunk_idx, chunk_data in generator: for chunk_idx, chunk_data in generator:
@ -477,11 +488,11 @@ def _create_thread(tid, data):
created_images = models.Image.objects.filter(data_id=db_data.id) created_images = models.Image.objects.filter(data_id=db_data.id)
db_related_files = [ db_related_files = [
RelatedFile(data=image.data, primary_image=image, path=os.path.join(upload_dir, related_file_path)) models.RelatedFile(data=image.data, primary_image=image, path=os.path.join(upload_dir, related_file_path))
for image in created_images for image in created_images
for related_file_path in related_images.get(image.path, []) for related_file_path in related_images.get(image.path, [])
] ]
RelatedFile.objects.bulk_create(db_related_files) models.RelatedFile.objects.bulk_create(db_related_files)
db_images = [] db_images = []
else: else:
models.Video.objects.create( models.Video.objects.create(

@ -1,4 +1,4 @@
# Copyright (C) 2020 Intel Corporation # Copyright (C) 2020-2021 Intel Corporation
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -2071,6 +2071,348 @@ class TaskCreateAPITestCase(APITestCase):
} }
self._check_api_v1_tasks(None, data) self._check_api_v1_tasks(None, data)
class TaskImportExportAPITestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.tasks = []
@classmethod
def setUpTestData(cls):
create_db_users(cls)
cls.media_data = []
image_count = 10
imagename_pattern = "test_{}.jpg"
for i in range(image_count):
filename = imagename_pattern.format(i)
path = os.path.join(settings.SHARE_ROOT, filename)
_, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls.media_data.append(
{
**{"image_quality": 75,
"copy_data": True,
"start_frame": 2,
"stop_frame": 9,
"frame_filter": "step=2",
},
**{"server_files[{}]".format(i): imagename_pattern.format(i) for i in range(image_count)},
}
)
filename = "test_video_1.mp4"
path = os.path.join(settings.SHARE_ROOT, filename)
_, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video:
video.write(data.read())
cls.media_data.append(
{
"image_quality": 75,
"copy_data": True,
"start_frame": 2,
"stop_frame": 24,
"frame_filter": "step=2",
"server_files[0]": filename,
}
)
filename = os.path.join("test_archive_1.zip")
path = os.path.join(settings.SHARE_ROOT, filename)
_, data = generate_zip_archive_file(filename, count=5)
with open(path, "wb") as zip_archive:
zip_archive.write(data.read())
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
}
)
filename = "test_pointcloud_pcd.zip"
source_path = os.path.join(os.path.dirname(__file__), 'assets', filename)
path = os.path.join(settings.SHARE_ROOT, filename)
shutil.copyfile(source_path, path)
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
}
)
filename = "test_velodyne_points.zip"
source_path = os.path.join(os.path.dirname(__file__), 'assets', filename)
path = os.path.join(settings.SHARE_ROOT, filename)
shutil.copyfile(source_path, path)
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
}
)
filename = os.path.join("videos", "test_video_1.mp4")
path = os.path.join(settings.SHARE_ROOT, filename)
os.makedirs(os.path.dirname(path))
_, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video:
video.write(data.read())
generate_manifest_file(data_type='video', manifest_path=os.path.join(settings.SHARE_ROOT, 'videos', 'manifest.jsonl'),
sources=[path])
cls.media_data.append(
{
"image_quality": 70,
"copy_data": True,
"server_files[0]": filename,
"server_files[1]": os.path.join("videos", "manifest.jsonl"),
"use_cache": True,
}
)
generate_manifest_file(data_type='images', manifest_path=os.path.join(settings.SHARE_ROOT, 'manifest.jsonl'),
sources=[os.path.join(settings.SHARE_ROOT, imagename_pattern.format(i)) for i in range(1, 8)])
cls.media_data.append(
{
**{"image_quality": 70,
"copy_data": True,
"use_cache": True,
"frame_filter": "step=2",
"server_files[0]": "manifest.jsonl",
},
**{
**{"server_files[{}]".format(i): imagename_pattern.format(i) for i in range(1, 8)},
}
}
)
cls.media_data.extend([
# image list local
{
"client_files[0]": generate_image_file("test_1.jpg")[1],
"client_files[1]": generate_image_file("test_2.jpg")[1],
"client_files[2]": generate_image_file("test_3.jpg")[1],
"image_quality": 75,
},
# video local
{
"client_files[0]": generate_video_file("test_video.mp4")[1],
"image_quality": 75,
},
# zip archive local
{
"client_files[0]": generate_zip_archive_file("test_archive_1.zip", 10)[1],
"image_quality": 50,
},
# pdf local
{
"client_files[0]": generate_pdf_file("test_pdf_1.pdf", 7)[1],
"image_quality": 54,
},
])
def tearDown(self):
for task in self.tasks:
shutil.rmtree(os.path.join(settings.TASKS_ROOT, str(task["id"])))
shutil.rmtree(os.path.join(settings.MEDIA_DATA_ROOT, str(task["data_id"])))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
path = os.path.join(settings.SHARE_ROOT, "test_1.jpg")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_2.jpg")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_3.jpg")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_video_1.mp4")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "videos", "test_video_1.mp4")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl")
os.remove(path)
os.rmdir(os.path.dirname(path))
path = os.path.join(settings.SHARE_ROOT, "test_pointcloud_pcd.zip")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_velodyne_points.zip")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl")
os.remove(path)
def _create_tasks(self):
self.tasks = []
def _create_task(task_data, media_data):
response = self.client.post('/api/v1/tasks', data=task_data, format="json")
assert response.status_code == status.HTTP_201_CREATED
tid = response.data["id"]
for media in media_data.values():
if isinstance(media, io.BytesIO):
media.seek(0)
response = self.client.post("/api/v1/tasks/{}/data".format(tid), data=media_data)
assert response.status_code == status.HTTP_202_ACCEPTED
response = self.client.get("/api/v1/tasks/{}".format(tid))
data_id = response.data["data"]
self.tasks.append({
"id": tid,
"data_id": data_id,
})
task_data = [
{
"name": "my task #1",
"owner_id": self.owner.id,
"assignee_id": self.assignee.id,
"overlap": 0,
"segment_size": 100,
"labels": [{
"name": "car",
"color": "#ff00ff",
"attributes": [{
"name": "bool_attribute",
"mutable": True,
"input_type": AttributeType.CHECKBOX,
"default_value": "true"
}],
}, {
"name": "person",
},
]
},
{
"name": "my task #2",
"owner_id": self.owner.id,
"assignee_id": self.assignee.id,
"overlap": 1,
"segment_size": 3,
"labels": [{
"name": "car",
"color": "#ff00ff",
"attributes": [{
"name": "bool_attribute",
"mutable": True,
"input_type": AttributeType.CHECKBOX,
"default_value": "true"
}],
}, {
"name": "person",
},
]
},
]
with ForceLogin(self.owner, self.client):
for data in task_data:
for media in self.media_data:
_create_task(data, media)
def _run_api_v1_tasks_id_export(self, tid, user, query_params=""):
with ForceLogin(user, self.client):
response = self.client.get('/api/v1/tasks/{}?{}'.format(tid, query_params), format="json")
return response
def _run_api_v1_tasks_id_import(self, user, data):
with ForceLogin(user, self.client):
response = self.client.post('/api/v1/tasks?action=import', data=data, format="multipart")
return response
def _run_api_v1_tasks_id(self, tid, user):
with ForceLogin(user, self.client):
response = self.client.get('/api/v1/tasks/{}'.format(tid), format="json")
return response.data
def _run_api_v1_tasks_id_export_import(self, user):
if user:
if user is self.user or user is self.annotator:
HTTP_200_OK = status.HTTP_403_FORBIDDEN
HTTP_202_ACCEPTED = status.HTTP_403_FORBIDDEN
HTTP_201_CREATED = status.HTTP_403_FORBIDDEN
else:
HTTP_200_OK = status.HTTP_200_OK
HTTP_202_ACCEPTED = status.HTTP_202_ACCEPTED
HTTP_201_CREATED = status.HTTP_201_CREATED
else:
HTTP_200_OK = status.HTTP_401_UNAUTHORIZED
HTTP_202_ACCEPTED = status.HTTP_401_UNAUTHORIZED
HTTP_201_CREATED = status.HTTP_401_UNAUTHORIZED
self._create_tasks()
for task in self.tasks:
tid = task["id"]
response = self._run_api_v1_tasks_id_export(tid, user, "action=export")
self.assertEqual(response.status_code, HTTP_202_ACCEPTED)
response = self._run_api_v1_tasks_id_export(tid, user, "action=export")
self.assertEqual(response.status_code, HTTP_201_CREATED)
response = self._run_api_v1_tasks_id_export(tid, user, "action=download")
self.assertEqual(response.status_code, HTTP_200_OK)
if user and user is not self.observer and user is not self.user and user is not self.annotator:
self.assertTrue(response.streaming)
content = io.BytesIO(b"".join(response.streaming_content))
content.seek(0)
uploaded_data = {
"task_file": content,
}
response = self._run_api_v1_tasks_id_import(user, uploaded_data)
self.assertEqual(response.status_code, HTTP_202_ACCEPTED)
if user is not self.observer and user is not self.user and user is not self.annotator:
rq_id = response.data["rq_id"]
response = self._run_api_v1_tasks_id_import(user, {"rq_id": rq_id})
self.assertEqual(response.status_code, HTTP_201_CREATED)
original_task = self._run_api_v1_tasks_id(tid, user)
imported_task = self._run_api_v1_tasks_id(response.data["id"], user)
compare_objects(
self=self,
obj1=original_task,
obj2=imported_task,
ignore_keys=(
"id",
"url",
"owner",
"project_id",
"assignee",
"created_date",
"updated_date",
"data",
),
)
def test_api_v1_tasks_id_export_admin(self):
self._run_api_v1_tasks_id_export_import(self.admin)
def test_api_v1_tasks_id_export_user(self):
self._run_api_v1_tasks_id_export_import(self.user)
def test_api_v1_tasks_id_export_annotator(self):
self._run_api_v1_tasks_id_export_import(self.annotator)
def test_api_v1_tasks_id_export_observer(self):
self._run_api_v1_tasks_id_export_import(self.observer)
def test_api_v1_tasks_id_export_no_auth(self):
self._run_api_v1_tasks_id_export_import(None)
def generate_image_file(filename): def generate_image_file(filename):
f = BytesIO() f = BytesIO()
gen = random.SystemRandom() gen = random.SystemRandom()
@ -2326,6 +2668,7 @@ class TaskDataAPITestCase(APITestCase):
path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl") path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl")
os.remove(path) os.remove(path)
os.rmdir(os.path.dirname(path))
path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl") path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl")
os.remove(path) os.remove(path)
@ -2995,7 +3338,7 @@ def compare_objects(self, obj1, obj2, ignore_keys, fp_tolerance=.001):
continue continue
v2 = obj2[k] v2 = obj2[k]
if k == 'attributes': if k == 'attributes':
key = lambda a: a['spec_id'] key = lambda a: a['spec_id'] if 'spec_id' in a else a['id']
v1.sort(key=key) v1.sort(key=key)
v2.sort(key=key) v2.sort(key=key)
compare_objects(self, v1, v2, ignore_keys) compare_objects(self, v1, v2, ignore_keys)

@ -1,4 +1,4 @@
# Copyright (C) 2018-2020 Intel Corporation # Copyright (C) 2018-2021 Intel Corporation
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
@ -7,6 +7,7 @@ import os
import os.path as osp import os.path as osp
import shutil import shutil
import traceback import traceback
import uuid
from datetime import datetime from datetime import datetime
from distutils.util import strtobool from distutils.util import strtobool
from tempfile import mkstemp from tempfile import mkstemp
@ -50,9 +51,11 @@ from cvat.apps.engine.serializers import (
FileInfoSerializer, JobSerializer, LabeledDataSerializer, FileInfoSerializer, JobSerializer, LabeledDataSerializer,
LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer, LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer,
RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer, RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer,
CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer,
TaskFileSerializer,
) )
from cvat.apps.engine.utils import av_scan_paths from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.backup import import_task
from . import models, task from . import models, task
from .log import clogger, slogger from .log import clogger, slogger
@ -360,20 +363,134 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
return [perm() for perm in permissions] return [perm() for perm in permissions]
def perform_create(self, serializer): def _validate_task_limit(self, owner):
def validate_task_limit(owner): admin_perm = auth.AdminRolePermission()
admin_perm = auth.AdminRolePermission() is_admin = admin_perm.has_permission(self.request, self)
is_admin = admin_perm.has_permission(self.request, self) if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \
if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \ Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']:
Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']: raise serializers.ValidationError('The user has the maximum number of tasks')
raise serializers.ValidationError('The user has the maximum number of tasks')
def create(self, request):
action = self.request.query_params.get('action', None)
if action is None:
return super().create(request)
elif action == 'import':
self._validate_task_limit(owner=self.request.user)
if 'rq_id' in request.data:
rq_id = request.data['rq_id']
else:
rq_id = "{}@/api/v1/tasks/{}/import".format(request.user, uuid.uuid4())
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = TaskFileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
task_file = serializer.validated_data['task_file']
fd, filename = mkstemp(prefix='cvat_')
with open(filename, 'wb+') as f:
for chunk in task_file.chunks():
f.write(chunk)
rq_job = queue.enqueue_call(
func=import_task,
args=(filename, request.user.id),
job_id=rq_id,
meta={
'tmp_file': filename,
'tmp_file_descriptor': fd,
},
)
else:
if rq_job.is_finished:
task_id = rq_job.return_value
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response({'id': task_id}, status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'rq_id': rq_id}, status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def retrieve(self, request, pk=None):
db_task = self.get_object() # force to call check_object_permissions
action = self.request.query_params.get('action', None)
if action is None:
return super().retrieve(request, pk)
elif action in ('export', 'download'):
queue = django_rq.get_queue("default")
rq_id = "/api/v1/tasks/{}/export".format(pk)
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_task_update_time = timezone.localtime(db_task.updated_date)
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_task_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_task_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = "task_{}_backup_{}{}".format(
db_task.name, timestamp,
osp.splitext(file_path)[1])
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
ttl = dm.views.CACHE_TTL.total_seconds()
queue.enqueue_call(
func=dm.views.backup_task,
args=(pk, 'task_dump.zip'),
job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def perform_create(self, serializer):
owner = self.request.data.get('owner', None) owner = self.request.data.get('owner', None)
if owner: if owner:
validate_task_limit(owner) self._validate_task_limit(owner)
serializer.save() serializer.save()
else: else:
validate_task_limit(self.request.user) self._validate_task_limit(self.request.user)
serializer.save(owner=self.request.user) serializer.save(owner=self.request.user)
def perform_destroy(self, instance): def perform_destroy(self, instance):
@ -414,6 +531,9 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
def data(self, request, pk): def data(self, request, pk):
if request.method == 'POST': if request.method == 'POST':
db_task = self.get_object() # call check_object_permissions as well db_task = self.get_object() # call check_object_permissions as well
if db_task.data:
return Response(data='Adding more data is not supported',
status=status.HTTP_400_BAD_REQUEST)
serializer = DataSerializer(data=request.data) serializer = DataSerializer(data=request.data)
serializer.is_valid(raise_exception=True) serializer.is_valid(raise_exception=True)
db_data = serializer.save() db_data = serializer.save()

@ -86,7 +86,7 @@ context('Actions on Cuboid', () => {
it('Draw a Cuboid shape in two ways (From rectangle, by 4 points)', () => { it('Draw a Cuboid shape in two ways (From rectangle, by 4 points)', () => {
cy.createCuboid(createCuboidShape2Points); cy.createCuboid(createCuboidShape2Points);
cy.get('.cvat-canvas-container').trigger('mousemove', 300, 400); cy.get('.cvat-canvas-container').trigger('mousemove', 300, 400);
cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated') cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated');
// Increase code coverage for cvat-canvas/src/typescript/svg.patch.ts. Block start // Increase code coverage for cvat-canvas/src/typescript/svg.patch.ts. Block start
// Checking for changes in the size and orientation of the shape is based on // Checking for changes in the size and orientation of the shape is based on
@ -95,7 +95,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 360, 340); .trigger('mouseenter', 360, 340);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 360, 340, {button: 0}) .trigger('mousedown', 360, 340, { button: 0 })
.trigger('mousemove', 360, 240) .trigger('mousemove', 360, 240)
.trigger('mouseup', 360, 240); .trigger('mouseup', 360, 240);
@ -103,7 +103,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 360, 340); .trigger('mouseenter', 360, 340);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 360, 340, {which: 1}) .trigger('mousedown', 360, 340, { which: 1 })
.trigger('mousemove', 430, 340) .trigger('mousemove', 430, 340)
.trigger('mouseup', 430, 340); .trigger('mouseup', 430, 340);
@ -111,7 +111,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 250, 250); .trigger('mouseenter', 250, 250);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 250, 250, {button: 0}) .trigger('mousedown', 250, 250, { button: 0 })
.trigger('mousemove', 200, 250) .trigger('mousemove', 200, 250)
.trigger('mouseup', 200, 250); .trigger('mouseup', 200, 250);
@ -119,7 +119,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 350, 250); .trigger('mouseenter', 350, 250);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 350, 250, {button: 0}) .trigger('mousedown', 350, 250, { button: 0 })
.trigger('mousemove', 300, 250) .trigger('mousemove', 300, 250)
.trigger('mouseup', 300, 250); .trigger('mouseup', 300, 250);
@ -127,13 +127,13 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 200, 350); .trigger('mouseenter', 200, 350);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 200, 350, {which: 1}) .trigger('mousedown', 200, 350, { which: 1 })
.trigger('mousemove', 150, 350) .trigger('mousemove', 150, 350)
.trigger('mouseup', 150, 350); .trigger('mouseup', 150, 350);
cy.get('.cvat-canvas-container') // Orientation to right. drCenter.hide() cy.get('.cvat-canvas-container') // Orientation to right. drCenter.hide()
.trigger('mouseenter', 300, 200) .trigger('mouseenter', 300, 200)
.trigger('mousedown', 300, 200, {which: 1}) .trigger('mousedown', 300, 200, { which: 1 })
.trigger('mousemove', 150, 200) .trigger('mousemove', 150, 200)
.trigger('mouseup', 150, 200); .trigger('mouseup', 150, 200);
@ -141,7 +141,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 85, 270); .trigger('mouseenter', 85, 270);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 85, 270, {which: 1}) .trigger('mousedown', 85, 270, { which: 1 })
.trigger('mousemove', 120, 270) .trigger('mousemove', 120, 270)
.trigger('mouseup', 120, 270); .trigger('mouseup', 120, 270);
@ -149,19 +149,19 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 120, 410); .trigger('mouseenter', 120, 410);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 120, 410, {button: 0}) .trigger('mousedown', 120, 410, { button: 0 })
.trigger('mousemove', 120, 350) .trigger('mousemove', 120, 350)
.trigger('mouseup', 120, 350); .trigger('mouseup', 120, 350);
cy.get('.cvat-canvas-container') // this.face cy.get('.cvat-canvas-container') // this.face
.trigger('mouseenter', 230, 300) .trigger('mouseenter', 230, 300)
.trigger('mousedown', 230, 300, {which: 1}) .trigger('mousedown', 230, 300, { which: 1 })
.trigger('mousemove', 200, 300) .trigger('mousemove', 200, 300)
.trigger('mouseup', 200, 300); .trigger('mouseup', 200, 300);
cy.get('.cvat-canvas-container') // this.right cy.get('.cvat-canvas-container') // this.right
.trigger('mouseenter', 250, 240) .trigger('mouseenter', 250, 240)
.trigger('mousedown', 250, 240, {which: 1}) .trigger('mousedown', 250, 240, { which: 1 })
.trigger('mousemove', 280, 200) .trigger('mousemove', 280, 200)
.trigger('mouseup', 280, 200); .trigger('mouseup', 280, 200);
@ -169,8 +169,8 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 90, 215); .trigger('mouseenter', 90, 215);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 90, 215, {button: 0, shiftKey: true}) .trigger('mousedown', 90, 215, { button: 0, shiftKey: true })
.trigger('mousemove', 90, 270, {shiftKey: true}) .trigger('mousemove', 90, 270, { shiftKey: true })
.trigger('mouseup', 90, 270); .trigger('mouseup', 90, 270);
cy.get('.cvat-appearance-cuboid-projections-checkbox').click(); // if (v === true) cy.get('.cvat-appearance-cuboid-projections-checkbox').click(); // if (v === true)
@ -182,58 +182,57 @@ context('Actions on Cuboid', () => {
cy.get('.cvat-canvas-container') // Moving the shape for further testing convenience cy.get('.cvat-canvas-container') // Moving the shape for further testing convenience
.trigger('mouseenter', 150, 305) .trigger('mouseenter', 150, 305)
.trigger('mousedown', 230, 300, {which: 1}) .trigger('mousedown', 230, 300, { which: 1 })
.trigger('mousemove', 400, 200) .trigger('mousemove', 400, 200)
.trigger('mouseup', 400, 200); .trigger('mouseup', 400, 200);
cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT) ecle{} cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT) ecle{}
.trigger('mouseenter', 260, 250); .trigger('mouseenter', 260, 250);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container').dblclick(260, 250, { shiftKey: true });
.dblclick(260, 250, {shiftKey: true})
cy.get('.cvat-canvas-container') // Change orientation to left cy.get('.cvat-canvas-container') // Change orientation to left
.trigger('mouseenter', 300, 130) .trigger('mouseenter', 300, 130)
.trigger('mousedown', 300, 130, {which: 1}) .trigger('mousedown', 300, 130, { which: 1 })
.trigger('mousemove', 500, 100) .trigger('mousemove', 500, 100)
.trigger('mouseup', 500, 100); .trigger('mouseup', 500, 100);
cy.get('.cvat-canvas-container') // frCenter cy.get('.cvat-canvas-container') // frCenter
.trigger('mouseenter', 465, 180) .trigger('mouseenter', 465, 180);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 465, 180, {which: 1}) .trigger('mousedown', 465, 180, { which: 1 })
.trigger('mousemove', 500, 180) .trigger('mousemove', 500, 180)
.trigger('mouseup', 500, 180); .trigger('mouseup', 500, 180);
cy.get('.cvat-canvas-container') // ftCenter cy.get('.cvat-canvas-container') // ftCenter
.trigger('mouseenter', 395, 125) .trigger('mouseenter', 395, 125);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 395, 125, {which: 1}) .trigger('mousedown', 395, 125, { which: 1 })
.trigger('mousemove', 395, 150) .trigger('mousemove', 395, 150)
.trigger('mouseup', 395, 150); .trigger('mouseup', 395, 150);
cy.get('.cvat-canvas-container') // fbCenter cy.get('.cvat-canvas-container') // fbCenter
.trigger('mouseenter', 400, 265) .trigger('mouseenter', 400, 265);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 400, 265, {which: 1}) .trigger('mousedown', 400, 265, { which: 1 })
.trigger('mousemove', 400, 250) .trigger('mousemove', 400, 250)
.trigger('mouseup', 400, 250); .trigger('mouseup', 400, 250);
cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT) cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT)
.trigger('mouseenter', 600, 180) .trigger('mouseenter', 600, 180);
cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container') cy.get('.cvat-canvas-container')
.trigger('mousedown', 600, 180, {button: 0, shiftKey: true}) .trigger('mousedown', 600, 180, { button: 0, shiftKey: true })
.trigger('mousemove', 600, 150, {shiftKey: true}) .trigger('mousemove', 600, 150, { shiftKey: true })
.trigger('mouseup', 600, 150) .trigger('mouseup', 600, 150)
.dblclick(600, 150, {shiftKey: true}); .dblclick(600, 150, { shiftKey: true });
cy.get('.cvat-canvas-container') // this.left cy.get('.cvat-canvas-container') // this.left
.trigger('mouseenter', 400, 130) .trigger('mouseenter', 400, 130)
.trigger('mousedown', 400, 130, {which: 1}) .trigger('mousedown', 400, 130, { which: 1 })
.trigger('mousemove', 400, 100) .trigger('mousemove', 400, 100)
.trigger('mouseup', 400, 100) .trigger('mouseup', 400, 100)
.trigger('mouseout', 400, 100); .trigger('mouseout', 400, 100);

@ -18,27 +18,37 @@ context('Shortcuts window.', () => {
describe(`Testing case "${caseId}"`, () => { describe(`Testing case "${caseId}"`, () => {
it('Press "F1" from a task. Shortcuts window be visible. Closing the modal window by button "OK".', () => { it('Press "F1" from a task. Shortcuts window be visible. Closing the modal window by button "OK".', () => {
cy.get('body').trigger('keydown', { keyCode: keyCodeF1 }); cy.get('body').trigger('keydown', { keyCode: keyCodeF1 });
cy.get('.cvat-shortcuts-modal-window').should('exist').and('be.visible').within(() => { cy.get('.cvat-shortcuts-modal-window')
cy.get('.cvat-shortcuts-modal-window-table').within(() => { .should('exist')
cy.get('tr').should('exist').then(($shortcutsTableTrCount) => { .and('be.visible')
shortcutsTableTrCount = $shortcutsTableTrCount.length; .within(() => {
cy.get('.cvat-shortcuts-modal-window-table').within(() => {
cy.get('tr')
.should('exist')
.then(($shortcutsTableTrCount) => {
shortcutsTableTrCount = $shortcutsTableTrCount.length;
});
}); });
cy.contains('button', 'OK').click();
}); });
cy.contains('button', 'OK').click();
});
cy.get('.cvat-shortcuts-modal-window').should('not.be.visible'); cy.get('.cvat-shortcuts-modal-window').should('not.be.visible');
}); });
it('Open a job. Press "F1". Shortcuts window be visible. Closing the modal window by F1.', () => { it('Open a job. Press "F1". Shortcuts window be visible. Closing the modal window by F1.', () => {
cy.openJob(); cy.openJob();
cy.get('body').trigger('keydown', { keyCode: keyCodeF1 }); cy.get('body').trigger('keydown', { keyCode: keyCodeF1 });
cy.get('.cvat-shortcuts-modal-window').should('exist').and('be.visible').within(() => { cy.get('.cvat-shortcuts-modal-window')
cy.get('.cvat-shortcuts-modal-window-table').within(() => { .should('exist')
cy.get('tr').should('exist').then(($shortcutsTableTrCount) => { .and('be.visible')
expect($shortcutsTableTrCount.length).to.be.gt(shortcutsTableTrCount); .within(() => {
cy.get('.cvat-shortcuts-modal-window-table').within(() => {
cy.get('tr')
.should('exist')
.then(($shortcutsTableTrCount) => {
expect($shortcutsTableTrCount.length).to.be.gt(shortcutsTableTrCount);
});
}); });
}); });
});
cy.get('body').trigger('keydown', { keyCode: keyCodeF1 }); cy.get('body').trigger('keydown', { keyCode: keyCodeF1 });
cy.get('.cvat-shortcuts-modal-window').should('not.be.visible'); cy.get('.cvat-shortcuts-modal-window').should('not.be.visible');
}); });

@ -33,7 +33,15 @@ context('Overlap size.', () => {
cy.login(); cy.login();
cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount); cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount);
cy.createZipArchive(directoryToArchive, archivePath); cy.createZipArchive(directoryToArchive, archivePath);
cy.createAnnotationTask(taskName, labelName, attrName, textDefaultValue, archiveName, false, advancedConfigurationParams); cy.createAnnotationTask(
taskName,
labelName,
attrName,
textDefaultValue,
archiveName,
false,
advancedConfigurationParams,
);
cy.openTask(taskName); cy.openTask(taskName);
}); });
@ -45,31 +53,43 @@ context('Overlap size.', () => {
describe(`Testing case "${caseId}"`, () => { describe(`Testing case "${caseId}"`, () => {
it('The task parameters is correct.', () => { it('The task parameters is correct.', () => {
cy.get('.cvat-task-parameters').within(() => { cy.get('.cvat-task-parameters').within(() => {
cy.get('table').find('tr').last().find('td').then(($taskParameters) => { cy.get('table')
expect(Number($taskParameters[0].innerText)).equal(calculatedOverlapSize); .find('tr')
expect(Number($taskParameters[1].innerText)).equal(advancedConfigurationParams.segmentSize); .last()
}); .find('td')
.then(($taskParameters) => {
expect(Number($taskParameters[0].innerText)).equal(calculatedOverlapSize);
expect(Number($taskParameters[1].innerText)).equal(advancedConfigurationParams.segmentSize);
});
}); });
}); });
it('The range of frame values corresponds to the parameters.', () => { it('The range of frame values corresponds to the parameters.', () => {
cy.getJobNum(0).then(($job) => { cy.getJobNum(0).then(($job) => {
cy.contains('a', `Job #${$job}`).parents('tr').find('.cvat-job-item-frames').then(($frameRange) => { cy.contains('a', `Job #${$job}`)
expect(Number($frameRange.text().split('-')[1])).equal(advancedConfigurationParams.segmentSize - 1); // expected 4 to equal 4 .parents('tr')
}); .find('.cvat-job-item-frames')
.then(($frameRange) => {
expect(Number($frameRange.text().split('-')[1])).equal(
advancedConfigurationParams.segmentSize - 1,
); // expected 4 to equal 4
});
}); });
cy.getJobNum(1).then(($job) => { cy.getJobNum(1).then(($job) => {
cy.contains('a', `Job #${$job}`).parents('tr').find('.cvat-job-item-frames').then(($frameRange) => { cy.contains('a', `Job #${$job}`)
expect(Number($frameRange.text().split('-')[0])).equal(advancedConfigurationParams.segmentSize - 2); // expected 3 to equal 3 .parents('tr')
}); .find('.cvat-job-item-frames')
.then(($frameRange) => {
expect(Number($frameRange.text().split('-')[0])).equal(
advancedConfigurationParams.segmentSize - 2,
); // expected 3 to equal 3
});
}); });
}); });
it('The range of frame values in a job corresponds to the parameters.', () => { it('The range of frame values in a job corresponds to the parameters.', () => {
cy.openJob(0); cy.openJob(0);
cy.get('.cvat-player-frame-selector') cy.get('.cvat-player-frame-selector').find('input[role="spinbutton"]').should('have.value', '0');
.find('input[role="spinbutton"]')
.should('have.value', '0');
cy.get('.cvat-player-last-button').click(); cy.get('.cvat-player-last-button').click();
cy.get('.cvat-player-frame-selector') cy.get('.cvat-player-frame-selector')
.find('input[role="spinbutton"]') .find('input[role="spinbutton"]')

@ -170,7 +170,8 @@ context('Label constructor. Color label. Label name editing', () => {
}); });
cy.get('.cvat-change-task-label-color-badge') cy.get('.cvat-change-task-label-color-badge')
.children() .children()
.should('have.attr', 'style').and('contain', 'rgb(179, 179, 179)'); .should('have.attr', 'style')
.and('contain', 'rgb(179, 179, 179)');
cy.get('.cvat-label-constructor-updater').contains('button', 'Done').click(); cy.get('.cvat-label-constructor-updater').contains('button', 'Done').click();
cy.contains('.cvat-constructor-viewer-item', `Case ${caseId}`) cy.contains('.cvat-constructor-viewer-item', `Case ${caseId}`)
.should('have.attr', 'style') .should('have.attr', 'style')

@ -17,27 +17,33 @@ context('Drag canvas.', () => {
describe(`Testing case "${caseId}"`, () => { describe(`Testing case "${caseId}"`, () => {
it('Drag canvas', () => { it('Drag canvas', () => {
cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => { cy.get('#cvat_canvas_background')
topBefore = Number($style.split(';')[0].split(' ')[1].replace('px', '')); .invoke('attr', 'style')
leftBefore = Number($style.split(';')[1].split(' ')[2].replace('px', '')); .then(($style) => {
}); topBefore = Number($style.split(';')[0].split(' ')[1].replace('px', ''));
leftBefore = Number($style.split(';')[1].split(' ')[2].replace('px', ''));
});
cy.get('.cvat-move-control').click(); // Without this action, the function is not covered cy.get('.cvat-move-control').click(); // Without this action, the function is not covered
cy.get('.cvat-canvas-container').trigger('mousedown', {button: 0}).trigger('mousemove', 500, 500); cy.get('.cvat-canvas-container').trigger('mousedown', { button: 0 }).trigger('mousemove', 500, 500);
}); });
it('Top and left style perameters is changed.', () => { it('Top and left style perameters is changed.', () => {
cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => { cy.get('#cvat_canvas_background')
expect(topBefore).not.equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to not equal 95 .invoke('attr', 'style')
expect(leftBefore).not.equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to not equal 95 .then(($style) => {
}); expect(topBefore).not.equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to not equal 95
expect(leftBefore).not.equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to not equal 95
});
}); });
it('Duble click on canvas. Parameters returned to their original value', () => { it('Duble click on canvas. Parameters returned to their original value', () => {
cy.get('.cvat-canvas-container').dblclick(); cy.get('.cvat-canvas-container').dblclick();
cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => { cy.get('#cvat_canvas_background')
expect(topBefore).equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to equal 20 .invoke('attr', 'style')
expect(leftBefore).equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to equal 73 .then(($style) => {
}); expect(topBefore).equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to equal 20
expect(leftBefore).equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to equal 73
});
}); });
}); });
}); });

@ -6,7 +6,7 @@
context('Reset password notification.', () => { context('Reset password notification.', () => {
const caseId = '73'; const caseId = '73';
const dummyEmail = 'admin@local.local' const dummyEmail = 'admin@local.local';
before(() => { before(() => {
cy.visit('auth/login'); cy.visit('auth/login');

@ -424,7 +424,10 @@ Cypress.Commands.add('updateAttributes', (multiAttrParams) => {
} }
if (multiAttrParams.mutable) { if (multiAttrParams.mutable) {
cy.get('.cvat-attribute-mutable-checkbox') cy.get('.cvat-attribute-mutable-checkbox')
.find('[type="checkbox"]').should('not.be.checked').check().should('be.checked'); .find('[type="checkbox"]')
.should('not.be.checked')
.check()
.should('be.checked');
} }
}); });
}); });

@ -66,7 +66,7 @@ Cypress.Commands.add('setGroupCondition', (groupIndex, condition) => {
Cypress.Commands.add( Cypress.Commands.add(
'setFilter', 'setFilter',
({groupIndex, ruleIndex, field, operator, valueSource, value, label, labelAttr, submit}) => { ({ groupIndex, ruleIndex, field, operator, valueSource, value, label, labelAttr, submit }) => {
cy.сheckFiltersModalOpened(); cy.сheckFiltersModalOpened();
cy.collectGroupID().then((groupIdIndex) => { cy.collectGroupID().then((groupIdIndex) => {
cy.collectRuleID().then((ruleIdIndex) => { cy.collectRuleID().then((ruleIdIndex) => {

@ -19,12 +19,16 @@ def config_log(level):
def main(): def main():
actions = {'create': CLI.tasks_create, actions = {
'delete': CLI.tasks_delete, 'create': CLI.tasks_create,
'ls': CLI.tasks_list, 'delete': CLI.tasks_delete,
'frames': CLI.tasks_frame, 'ls': CLI.tasks_list,
'dump': CLI.tasks_dump, 'frames': CLI.tasks_frame,
'upload': CLI.tasks_upload} 'dump': CLI.tasks_dump,
'upload': CLI.tasks_upload,
'export': CLI.tasks_export,
'import': CLI.tasks_import,
}
args = parser.parse_args() args = parser.parse_args()
config_log(args.loglevel) config_log(args.loglevel)
with requests.Session() as session: with requests.Session() as session:

@ -213,6 +213,53 @@ class CLI():
"with annotation file {} finished".format(filename) "with annotation file {} finished".format(filename)
log.info(logger_string) log.info(logger_string)
def tasks_export(self, task_id, filename, export_verification_period=3, **kwargs):
""" Export and download a whole task """
url = self.api.tasks_id(task_id)
export_url = url + '?action=export'
while True:
response = self.session.get(export_url)
response.raise_for_status()
log.info('STATUS {}'.format(response.status_code))
if response.status_code == 201:
break
sleep(export_verification_period)
response = self.session.get(url + '?action=download')
response.raise_for_status()
with open(filename, 'wb') as fp:
fp.write(response.content)
logger_string = "Task {} has been exported sucessfully. ".format(task_id) +\
"to {}".format(os.path.abspath(filename))
log.info(logger_string)
def tasks_import(self, filename, import_verification_period=3, **kwargs):
""" Import a task"""
url = self.api.tasks + '?action=import'
with open(filename, 'rb') as input_file:
response = self.session.post(
url,
files={'task_file': input_file}
)
response.raise_for_status()
response_json = response.json()
rq_id = response_json['rq_id']
while True:
sleep(import_verification_period)
response = self.session.post(
url,
data={'rq_id': rq_id}
)
response.raise_for_status()
if response.status_code == 201:
break
task_id = response.json()['id']
logger_string = "Task has been imported sucessfully. Task ID: {}".format(task_id)
log.info(logger_string)
def login(self, credentials): def login(self, credentials):
url = self.api.login url = self.api.login
auth = {'username': credentials[0], 'password': credentials[1]} auth = {'username': credentials[0], 'password': credentials[1]}

@ -310,3 +310,36 @@ upload_parser.add_argument(
default='CVAT 1.1', default='CVAT 1.1',
help='annotation format (default: %(default)s)' help='annotation format (default: %(default)s)'
) )
#######################################################################
# Export task
#######################################################################
export_task_parser = task_subparser.add_parser(
'export',
description='Export a CVAT task.'
)
export_task_parser.add_argument(
'task_id',
type=int,
help='task ID'
)
export_task_parser.add_argument(
'filename',
type=str,
help='output file'
)
#######################################################################
# Import task
#######################################################################
import_task_parser = task_subparser.add_parser(
'import',
description='import a CVAT task.'
)
import_task_parser.add_argument(
'filename',
type=str,
help='upload file'
)

Loading…
Cancel
Save