Get preview images on the fly and keep them in cache (#5478)

Improved image preview loading for **Tasks**, **Jobs** and **Projects**
views
Backend behaviour change: creating image previews by request and storing
them in the cache
Added corresponding endpoints:
tasks/{id}/preview
projects/{id}/preview
jobs/{id}/preview

Demonstration(added random 0-1s delay for demo purposes):
https://user-images.githubusercontent.com/41117609/208106321-951b8647-6e6b-452e-910c-31c4d0b8682d.mp4
https://user-images.githubusercontent.com/41117609/208106339-2d3a5a7b-d422-4b27-9e76-08729022e1ca.mp4
main
Andrey Zhavoronkov 3 years ago committed by GitHub
parent 37b685f47a
commit 1ecc607286
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,6 +1,6 @@
{ {
"name": "cvat-core", "name": "cvat-core",
"version": "7.3.0", "version": "7.4.0",
"description": "Part of Computer Vision Tool which presents an interface for client-side integration", "description": "Part of Computer Vision Tool which presents an interface for client-side integration",
"main": "src/api.ts", "main": "src/api.ts",
"scripts": { "scripts": {

@ -1356,11 +1356,8 @@ async function getPreview(tid, jid) {
let response = null; let response = null;
try { try {
const url = `${backendAPI}/${jid !== null ? 'jobs' : 'tasks'}/${jid || tid}/data`; const url = `${backendAPI}/${jid !== null ? 'jobs' : 'tasks'}/${jid || tid}/preview`;
response = await Axios.get(url, { response = await Axios.get(url, {
params: {
type: 'preview',
},
proxy: config.proxy, proxy: config.proxy,
responseType: 'blob', responseType: 'blob',
}); });

@ -112,7 +112,7 @@ class Job(
def get_preview( def get_preview(
self, self,
) -> io.RawIOBase: ) -> io.RawIOBase:
(_, response) = self.api.retrieve_data(self.id, type="preview") (_, response) = self.api.retrieve_preview(self.id)
return io.BytesIO(response.data) return io.BytesIO(response.data)
def download_frames( def download_frames(

@ -159,7 +159,7 @@ class Task(
def get_preview( def get_preview(
self, self,
) -> io.RawIOBase: ) -> io.RawIOBase:
(_, response) = self.api.retrieve_data(self.id, type="preview") (_, response) = self.api.retrieve_preview(self.id)
return io.BytesIO(response.data) return io.BytesIO(response.data)
def download_chunk( def download_chunk(

@ -1,6 +1,6 @@
{ {
"name": "cvat-ui", "name": "cvat-ui",
"version": "1.45.0", "version": "1.46.0",
"description": "CVAT single-page application", "description": "CVAT single-page application",
"main": "src/index.tsx", "main": "src/index.tsx",
"scripts": { "scripts": {

@ -996,8 +996,7 @@ export function getJobAsync(
// Check if the task was already downloaded to the state // Check if the task was already downloaded to the state
let job: any | null = null; let job: any | null = null;
const [task] = state.tasks.current const [task] = state.tasks.current
.filter((_task: Task) => _task.instance.id === tid) .filter((_task: Task) => _task.id === tid);
.map((_task: Task) => _task.instance);
if (task) { if (task) {
[job] = task.jobs.filter((_job: any) => _job.id === jid); [job] = task.jobs.filter((_job: any) => _job.id === jid);
if (!job) { if (!job) {

@ -4,7 +4,7 @@
import { ActionUnion, createAction, ThunkAction } from 'utils/redux'; import { ActionUnion, createAction, ThunkAction } from 'utils/redux';
import { getCore } from 'cvat-core-wrapper'; import { getCore } from 'cvat-core-wrapper';
import { Indexable, JobsQuery } from 'reducers'; import { Indexable, JobsQuery, Job } from 'reducers';
const cvat = getCore(); const cvat = getCore();
@ -12,6 +12,9 @@ export enum JobsActionTypes {
GET_JOBS = 'GET_JOBS', GET_JOBS = 'GET_JOBS',
GET_JOBS_SUCCESS = 'GET_JOBS_SUCCESS', GET_JOBS_SUCCESS = 'GET_JOBS_SUCCESS',
GET_JOBS_FAILED = 'GET_JOBS_FAILED', GET_JOBS_FAILED = 'GET_JOBS_FAILED',
GET_JOB_PREVIEW = 'GET_JOB_PREVIEW',
GET_JOB_PREVIEW_SUCCESS = 'GET_JOB_PREVIEW_SUCCESS',
GET_JOB_PREVIEW_FAILED = 'GET_JOB_PREVIEW_FAILED',
} }
interface JobsList extends Array<any> { interface JobsList extends Array<any> {
@ -20,10 +23,19 @@ interface JobsList extends Array<any> {
const jobsActions = { const jobsActions = {
getJobs: (query: Partial<JobsQuery>) => createAction(JobsActionTypes.GET_JOBS, { query }), getJobs: (query: Partial<JobsQuery>) => createAction(JobsActionTypes.GET_JOBS, { query }),
getJobsSuccess: (jobs: JobsList, previews: string[]) => ( getJobsSuccess: (jobs: JobsList) => (
createAction(JobsActionTypes.GET_JOBS_SUCCESS, { jobs, previews }) createAction(JobsActionTypes.GET_JOBS_SUCCESS, { jobs })
), ),
getJobsFailed: (error: any) => createAction(JobsActionTypes.GET_JOBS_FAILED, { error }), getJobsFailed: (error: any) => createAction(JobsActionTypes.GET_JOBS_FAILED, { error }),
getJobPreiew: (jobID: number) => (
createAction(JobsActionTypes.GET_JOB_PREVIEW, { jobID })
),
getJobPreiewSuccess: (jobID: number, preview: string) => (
createAction(JobsActionTypes.GET_JOB_PREVIEW_SUCCESS, { jobID, preview })
),
getJobPreiewFailed: (jobID: number, error: any) => (
createAction(JobsActionTypes.GET_JOB_PREVIEW_FAILED, { jobID, error })
),
}; };
export type JobsActions = ActionUnion<typeof jobsActions>; export type JobsActions = ActionUnion<typeof jobsActions>;
@ -40,9 +52,18 @@ export const getJobsAsync = (query: JobsQuery): ThunkAction => async (dispatch)
dispatch(jobsActions.getJobs(filteredQuery)); dispatch(jobsActions.getJobs(filteredQuery));
const jobs = await cvat.jobs.get(filteredQuery); const jobs = await cvat.jobs.get(filteredQuery);
const previewPromises = jobs.map((job: any) => (job as any).frames.preview().catch(() => '')); dispatch(jobsActions.getJobsSuccess(jobs));
dispatch(jobsActions.getJobsSuccess(jobs, await Promise.all(previewPromises)));
} catch (error) { } catch (error) {
dispatch(jobsActions.getJobsFailed(error)); dispatch(jobsActions.getJobsFailed(error));
} }
}; };
export const getJobPreviewAsync = (job: Job): ThunkAction => async (dispatch) => {
dispatch(jobsActions.getJobPreiew(job.id));
try {
const result = await job.frames.preview();
dispatch(jobsActions.getJobPreiewSuccess(job.id, result));
} catch (error) {
dispatch(jobsActions.getJobPreiewFailed(job.id, error));
}
};

@ -29,13 +29,16 @@ export enum ProjectsActionTypes {
DELETE_PROJECT = 'DELETE_PROJECT', DELETE_PROJECT = 'DELETE_PROJECT',
DELETE_PROJECT_SUCCESS = 'DELETE_PROJECT_SUCCESS', DELETE_PROJECT_SUCCESS = 'DELETE_PROJECT_SUCCESS',
DELETE_PROJECT_FAILED = 'DELETE_PROJECT_FAILED', DELETE_PROJECT_FAILED = 'DELETE_PROJECT_FAILED',
GET_PROJECT_PREVIEW = 'GET_PROJECT_PREVIEW',
GET_PROJECT_PREVIEW_SUCCESS = 'GET_PROJECT_PREVIEW_SUCCESS',
GET_PROJECT_PREVIEW_FAILED = 'GET_PROJECT_PREVIEW_FAILED',
} }
// prettier-ignore // prettier-ignore
const projectActions = { const projectActions = {
getProjects: () => createAction(ProjectsActionTypes.GET_PROJECTS), getProjects: () => createAction(ProjectsActionTypes.GET_PROJECTS),
getProjectsSuccess: (array: any[], previews: string[], count: number) => ( getProjectsSuccess: (array: any[], count: number) => (
createAction(ProjectsActionTypes.GET_PROJECTS_SUCCESS, { array, previews, count }) createAction(ProjectsActionTypes.GET_PROJECTS_SUCCESS, { array, count })
), ),
getProjectsFailed: (error: any) => createAction(ProjectsActionTypes.GET_PROJECTS_FAILED, { error }), getProjectsFailed: (error: any) => createAction(ProjectsActionTypes.GET_PROJECTS_FAILED, { error }),
updateProjectsGettingQuery: (query: Partial<ProjectsQuery>, tasksQuery: Partial<TasksQuery> = {}) => ( updateProjectsGettingQuery: (query: Partial<ProjectsQuery>, tasksQuery: Partial<TasksQuery> = {}) => (
@ -58,6 +61,15 @@ const projectActions = {
deleteProjectFailed: (projectId: number, error: any) => ( deleteProjectFailed: (projectId: number, error: any) => (
createAction(ProjectsActionTypes.DELETE_PROJECT_FAILED, { projectId, error }) createAction(ProjectsActionTypes.DELETE_PROJECT_FAILED, { projectId, error })
), ),
getProjectPreiew: (projectID: number) => (
createAction(ProjectsActionTypes.GET_PROJECT_PREVIEW, { projectID })
),
getProjectPreiewSuccess: (projectID: number, preview: string) => (
createAction(ProjectsActionTypes.GET_PROJECT_PREVIEW_SUCCESS, { projectID, preview })
),
getProjectPreiewFailed: (projectID: number, error: any) => (
createAction(ProjectsActionTypes.GET_PROJECT_PREVIEW_FAILED, { projectID, error })
),
}; };
export type ProjectActions = ActionUnion<typeof projectActions>; export type ProjectActions = ActionUnion<typeof projectActions>;
@ -109,8 +121,7 @@ export function getProjectsAsync(
const array = Array.from(result); const array = Array.from(result);
const previewPromises = array.map((project): string => (project as any).preview().catch(() => '')); dispatch(projectActions.getProjectsSuccess(array, result.count));
dispatch(projectActions.getProjectsSuccess(array, await Promise.all(previewPromises), result.count));
// Appropriate tasks fetching proccess needs with retrieving only a single project // Appropriate tasks fetching proccess needs with retrieving only a single project
if (Object.keys(filteredQuery).includes('id') && typeof filteredQuery.id === 'number') { if (Object.keys(filteredQuery).includes('id') && typeof filteredQuery.id === 'number') {
@ -171,3 +182,13 @@ export function deleteProjectAsync(projectInstance: any): ThunkAction {
} }
}; };
} }
export const getProjectsPreviewAsync = (project: any): ThunkAction => async (dispatch) => {
dispatch(projectActions.getProjectPreiew(project.id));
try {
const result = await project.preview();
dispatch(projectActions.getProjectPreiewSuccess(project.id, result));
} catch (error) {
dispatch(projectActions.getProjectPreiewFailed(project.id, error));
}
};

@ -29,6 +29,9 @@ export enum TasksActionTypes {
UPDATE_JOB_FAILED = 'UPDATE_JOB_FAILED', UPDATE_JOB_FAILED = 'UPDATE_JOB_FAILED',
HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS', HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS',
SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE', SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE',
GET_TASK_PREVIEW = 'GET_TASK_PREVIEW',
GET_TASK_PREVIEW_SUCCESS = 'GET_TASK_PREVIEW_SUCCESS',
GET_TASK_PREVIEW_FAILED = 'GET_TASK_PREVIEW_FAILED',
} }
function getTasks(query: Partial<TasksQuery>, updateQuery: boolean): AnyAction { function getTasks(query: Partial<TasksQuery>, updateQuery: boolean): AnyAction {
@ -43,11 +46,10 @@ function getTasks(query: Partial<TasksQuery>, updateQuery: boolean): AnyAction {
return action; return action;
} }
export function getTasksSuccess(array: any[], previews: string[], count: number): AnyAction { export function getTasksSuccess(array: any[], count: number): AnyAction {
const action = { const action = {
type: TasksActionTypes.GET_TASKS_SUCCESS, type: TasksActionTypes.GET_TASKS_SUCCESS,
payload: { payload: {
previews,
array, array,
count, count,
}, },
@ -89,10 +91,9 @@ export function getTasksAsync(
} }
const array = Array.from(result); const array = Array.from(result);
const promises = array.map((task): string => (task as any).frames.preview().catch(() => ''));
dispatch(getInferenceStatusAsync()); dispatch(getInferenceStatusAsync());
dispatch(getTasksSuccess(array, await Promise.all(promises), result.count)); dispatch(getTasksSuccess(array, result.count));
}; };
} }
@ -379,3 +380,50 @@ export function moveTaskToProjectAsync(
} }
}; };
} }
function getTaskPreview(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.GET_TASK_PREVIEW,
payload: {
taskID,
},
};
return action;
}
function getTaskPreviewSuccess(taskID: number, preview: string): AnyAction {
const action = {
type: TasksActionTypes.GET_TASK_PREVIEW_SUCCESS,
payload: {
taskID,
preview,
},
};
return action;
}
function getTaskPreviewFailed(taskID: number, error: any): AnyAction {
const action = {
type: TasksActionTypes.GET_TASK_PREVIEW_FAILED,
payload: {
taskID,
error,
},
};
return action;
}
export function getTaskPreviewAsync(taskInstance: any): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
try {
dispatch(getTaskPreview(taskInstance.id));
const result = await taskInstance.frames.preview();
dispatch(getTaskPreviewSuccess(taskInstance.id, result));
} catch (error) {
dispatch(getTaskPreviewFailed(taskInstance.id, error));
}
};
}

@ -1,4 +1,5 @@
// Copyright (C) 2021-2022 Intel Corporation // Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -19,8 +20,8 @@ import moment from 'moment';
import { CloudStorage, CombinedState } from 'reducers'; import { CloudStorage, CombinedState } from 'reducers';
import { deleteCloudStorageAsync } from 'actions/cloud-storage-actions'; import { deleteCloudStorageAsync } from 'actions/cloud-storage-actions';
import CVATTooltip from 'components/common/cvat-tooltip'; import CVATTooltip from 'components/common/cvat-tooltip';
import Preview from 'components/common/preview';
import Status from './cloud-storage-status'; import Status from './cloud-storage-status';
import Preview from './cloud-storage-preview';
interface Props { interface Props {
cloudStorage: CloudStorage; cloudStorage: CloudStorage;
@ -74,7 +75,12 @@ export default function CloudStorageItemComponent(props: Props): JSX.Element {
<Card <Card
cover={( cover={(
<> <>
<Preview cloudStorage={cloudStorage} /> <Preview
cloudStorage={cloudStorage}
loadingClassName='cvat-cloud-storage-item-loading-preview'
emptyPreviewClassName='cvat-cloud-storage-item-empty-preview'
previewClassName='cvat-cloud-storage-item-preview'
/>
{description ? ( {description ? (
<CVATTooltip overlay={description}> <CVATTooltip overlay={description}>
<QuestionCircleOutlined className='cvat-cloud-storage-description-icon' /> <QuestionCircleOutlined className='cvat-cloud-storage-description-icon' />

@ -1,51 +0,0 @@
// Copyright (C) 2021-2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
import React, { useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { PictureOutlined } from '@ant-design/icons';
import Spin from 'antd/lib/spin';
import { getCloudStoragePreviewAsync } from 'actions/cloud-storage-actions';
import { CombinedState, CloudStorage } from 'reducers';
interface Props {
cloudStorage: CloudStorage;
}
export default function Preview({ cloudStorage }: Props): JSX.Element {
const dispatch = useDispatch();
const preview = useSelector((state: CombinedState) => state.cloudStorages.previews[cloudStorage.id]);
useEffect(() => {
if (preview === undefined) {
dispatch(getCloudStoragePreviewAsync(cloudStorage));
}
}, [preview]);
if (!preview || (preview && preview.fetching)) {
return (
<div className='cvat-cloud-storage-item-loading-preview' aria-hidden>
<Spin size='default' />
</div>
);
}
if (preview.initialized && !preview.preview) {
return (
<div className='cvat-cloud-storage-item-empty-preview' aria-hidden>
<PictureOutlined />
</div>
);
}
return (
<img
className='cvat-cloud-storage-item-preview'
src={preview.preview}
alt='Preview image'
aria-hidden
/>
);
}

@ -1,4 +1,5 @@
// Copyright (C) 2021-2022 Intel Corporation // Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -78,10 +79,11 @@
height: $grid-unit-size * 24; height: $grid-unit-size * 24;
} }
img { .cvat-cloud-storage-item-preview {
height: $grid-unit-size * 24; height: $grid-unit-size * 24;
object-fit: cover; object-fit: cover;
margin: auto; margin: auto;
width: 100%;
} }
.cvat-cloud-storage-item-menu-button { .cvat-cloud-storage-item-menu-button {

@ -0,0 +1,99 @@
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
import React, { useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { PictureOutlined } from '@ant-design/icons';
import Spin from 'antd/lib/spin';
import { getJobPreviewAsync } from 'actions/jobs-actions';
import { getTaskPreviewAsync } from 'actions/tasks-actions';
import { getProjectsPreviewAsync } from 'actions/projects-actions';
import { getCloudStoragePreviewAsync } from 'actions/cloud-storage-actions';
import {
CombinedState, Job, Task, Project, CloudStorage,
} from 'reducers';
interface Props {
job?: Job | undefined;
task?: Task | undefined;
project?: Project | undefined;
cloudStorage?: CloudStorage | undefined;
onClick?: (event: React.MouseEvent) => void;
loadingClassName?: string;
emptyPreviewClassName?: string;
previewWrapperClassName?: string;
previewClassName?: string;
}
export default function Preview(props: Props): JSX.Element {
const dispatch = useDispatch();
const {
job,
task,
project,
cloudStorage,
onClick,
loadingClassName,
emptyPreviewClassName,
previewWrapperClassName,
previewClassName,
} = props;
const preview = useSelector((state: CombinedState) => {
if (job !== undefined) {
return state.jobs.previews[job.id];
} if (project !== undefined) {
return state.projects.previews[project.id];
} if (task !== undefined) {
return state.tasks.previews[task.id];
} if (cloudStorage !== undefined) {
return state.cloudStorages.previews[cloudStorage.id];
}
return '';
});
useEffect(() => {
if (preview === undefined) {
if (job !== undefined) {
dispatch(getJobPreviewAsync(job));
} else if (project !== undefined) {
dispatch(getProjectsPreviewAsync(project));
} else if (task !== undefined) {
dispatch(getTaskPreviewAsync(task));
} else if (cloudStorage !== undefined) {
dispatch(getCloudStoragePreviewAsync(cloudStorage));
}
}
}, [preview]);
if (!preview || (preview && preview.fetching)) {
return (
<div className={loadingClassName || ''} aria-hidden>
<Spin size='default' />
</div>
);
}
if (preview.initialized && !preview.preview) {
return (
<div className={emptyPreviewClassName || ''} aria-hidden>
<PictureOutlined />
</div>
);
}
return (
<div className={previewWrapperClassName || ''} aria-hidden>
<img
className={previewClassName || ''}
src={preview.preview}
onClick={onClick}
alt='Preview image'
aria-hidden
/>
</div>
);
}

@ -7,7 +7,6 @@ import React, { useState } from 'react';
import { useDispatch } from 'react-redux'; import { useDispatch } from 'react-redux';
import { useHistory } from 'react-router'; import { useHistory } from 'react-router';
import Card from 'antd/lib/card'; import Card from 'antd/lib/card';
import Empty from 'antd/lib/empty';
import Descriptions from 'antd/lib/descriptions'; import Descriptions from 'antd/lib/descriptions';
import { MoreOutlined } from '@ant-design/icons'; import { MoreOutlined } from '@ant-design/icons';
import Dropdown from 'antd/lib/dropdown'; import Dropdown from 'antd/lib/dropdown';
@ -16,6 +15,7 @@ import Menu from 'antd/lib/menu';
import { MenuInfo } from 'rc-menu/lib/interface'; import { MenuInfo } from 'rc-menu/lib/interface';
import { useCardHeightHOC } from 'utils/hooks'; import { useCardHeightHOC } from 'utils/hooks';
import { exportActions } from 'actions/export-actions'; import { exportActions } from 'actions/export-actions';
import Preview from 'components/common/preview';
const useCardHeight = useCardHeightHOC({ const useCardHeight = useCardHeightHOC({
containerClassName: 'cvat-jobs-page', containerClassName: 'cvat-jobs-page',
@ -26,12 +26,11 @@ const useCardHeight = useCardHeightHOC({
interface Props { interface Props {
job: any; job: any;
preview: string;
} }
function JobCardComponent(props: Props): JSX.Element { function JobCardComponent(props: Props): JSX.Element {
const dispatch = useDispatch(); const dispatch = useDispatch();
const { job, preview } = props; const { job } = props;
const [expanded, setExpanded] = useState<boolean>(false); const [expanded, setExpanded] = useState<boolean>(false);
const history = useHistory(); const history = useHistory();
const height = useCardHeight(); const height = useCardHeight();
@ -53,19 +52,14 @@ function JobCardComponent(props: Props): JSX.Element {
className='cvat-job-page-list-item' className='cvat-job-page-list-item'
cover={( cover={(
<> <>
{preview ? ( <Preview
<img job={job}
className='cvat-jobs-page-job-item-card-preview' onClick={onClick}
src={preview} loadingClassName='cvat-job-item-loading-preview'
alt='Preview' emptyPreviewClassName='cvat-job-item-empty-preview'
onClick={onClick} previewWrapperClassName='cvat-jobs-page-job-item-card-preview-wrapper'
aria-hidden previewClassName='cvat-jobs-page-job-item-card-preview'
/> />
) : (
<div className='cvat-jobs-page-job-item-card-preview' onClick={onClick} aria-hidden>
<Empty description='Preview not found' />
</div>
)}
<div className='cvat-job-page-list-item-id'> <div className='cvat-job-page-list-item-id'>
ID: ID:
{` ${job.id}`} {` ${job.id}`}

@ -1,4 +1,5 @@
// Copyright (C) 2022 Intel Corporation // Copyright (C) 2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -59,17 +60,34 @@
} }
} }
.cvat-jobs-page-job-item-card-preview { .cvat-jobs-page-job-item-card-preview-wrapper {
.ant-empty-image { height: 100%;
height: $grid-unit-size * 10; width: 100%;
> .cvat-jobs-page-job-item-card-preview {
.ant-empty-image {
height: $grid-unit-size * 10;
}
height: 100%;
width: 100%;
display: flex;
align-items: center;
justify-content: space-around;
object-fit: cover;
cursor: pointer;
} }
}
height: 100%; .cvat-job-item-loading-preview,
display: flex; .cvat-job-item-empty-preview {
align-items: center; .ant-spin {
justify-content: space-around; position: inherit;
object-fit: cover; }
cursor: pointer;
font-size: $grid-unit-size * 15;
text-align: center;
height: $grid-unit-size * 24;
} }
.cvat-job-page-list-item-dimension { .cvat-job-page-list-item-dimension {

@ -1,4 +1,5 @@
// Copyright (C) 2021-2022 Intel Corporation // Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -23,8 +24,8 @@ const core = getCore();
export default function MoveTaskModal(): JSX.Element { export default function MoveTaskModal(): JSX.Element {
const visible = useSelector((state: CombinedState) => state.tasks.moveTask.modalVisible); const visible = useSelector((state: CombinedState) => state.tasks.moveTask.modalVisible);
const task = useSelector((state: CombinedState) => { const task = useSelector((state: CombinedState) => {
const [taskInstance] = state.tasks.current.filter((_task) => _task.instance.id === state.tasks.moveTask.taskId); const [taskInstance] = state.tasks.current.filter((_task) => _task.id === state.tasks.moveTask.taskId);
return taskInstance?.instance; return taskInstance;
}); });
const taskUpdating = useSelector((state: CombinedState) => state.tasks.updating); const taskUpdating = useSelector((state: CombinedState) => state.tasks.updating);
const dispatch = useDispatch(); const dispatch = useDispatch();

@ -48,7 +48,7 @@ export default function ProjectPageComponent(): JSX.Element {
const id = +useParams<ParamType>().id; const id = +useParams<ParamType>().id;
const dispatch = useDispatch(); const dispatch = useDispatch();
const history = useHistory(); const history = useHistory();
const projects = useSelector((state: CombinedState) => state.projects.current).map((project) => project.instance); const projects = useSelector((state: CombinedState) => state.projects.current);
const projectsFetching = useSelector((state: CombinedState) => state.projects.fetching); const projectsFetching = useSelector((state: CombinedState) => state.projects.fetching);
const deletes = useSelector((state: CombinedState) => state.projects.activities.deletes); const deletes = useSelector((state: CombinedState) => state.projects.activities.deletes);
const taskDeletes = useSelector((state: CombinedState) => state.tasks.activities.deletes); const taskDeletes = useSelector((state: CombinedState) => state.tasks.activities.deletes);
@ -77,7 +77,7 @@ export default function ProjectPageComponent(): JSX.Element {
const [project] = projects.filter((_project) => _project.id === id); const [project] = projects.filter((_project) => _project.id === id);
const projectSubsets: Array<string> = []; const projectSubsets: Array<string> = [];
for (const task of tasks) { for (const task of tasks) {
if (!projectSubsets.includes(task.instance.subset)) projectSubsets.push(task.instance.subset); if (!projectSubsets.includes(task.subset)) projectSubsets.push(task.subset);
} }
useEffect(() => { useEffect(() => {
@ -121,18 +121,17 @@ export default function ProjectPageComponent(): JSX.Element {
<React.Fragment key={subset}> <React.Fragment key={subset}>
{subset && <Title level={4}>{subset}</Title>} {subset && <Title level={4}>{subset}</Title>}
{tasks {tasks
.filter((task) => task.instance.projectId === project.id && task.instance.subset === subset) .filter((task) => task.projectId === project.id && task.subset === subset)
.map((task: Task) => ( .map((task: Task) => (
<TaskItem <TaskItem
key={task.instance.id} key={task.id}
deleted={task.instance.id in taskDeletes ? taskDeletes[task.instance.id] : false} deleted={task.id in taskDeletes ? taskDeletes[task.id] : false}
hidden={false} hidden={false}
activeInference={tasksActiveInferences[task.instance.id] || null} activeInference={tasksActiveInferences[task.id] || null}
cancelAutoAnnotation={() => { cancelAutoAnnotation={() => {
dispatch(cancelInferenceAsync(task.instance.id)); dispatch(cancelInferenceAsync(task.id));
}} }}
previewImage={task.preview} taskInstance={task}
taskInstance={task.instance}
/> />
))} ))}
</React.Fragment> </React.Fragment>

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -7,7 +8,6 @@ import moment from 'moment';
import { useSelector } from 'react-redux'; import { useSelector } from 'react-redux';
import { useHistory } from 'react-router'; import { useHistory } from 'react-router';
import Text from 'antd/lib/typography/Text'; import Text from 'antd/lib/typography/Text';
import Empty from 'antd/lib/empty';
import Card from 'antd/lib/card'; import Card from 'antd/lib/card';
import Meta from 'antd/lib/card/Meta'; import Meta from 'antd/lib/card/Meta';
import Dropdown from 'antd/lib/dropdown'; import Dropdown from 'antd/lib/dropdown';
@ -16,6 +16,7 @@ import { MoreOutlined } from '@ant-design/icons';
import { CombinedState, Project } from 'reducers'; import { CombinedState, Project } from 'reducers';
import { useCardHeightHOC } from 'utils/hooks'; import { useCardHeightHOC } from 'utils/hooks';
import Preview from 'components/common/preview';
import ProjectActionsMenuComponent from './actions-menu'; import ProjectActionsMenuComponent from './actions-menu';
interface Props { interface Props {
@ -31,7 +32,7 @@ const useCardHeight = useCardHeightHOC({
export default function ProjectItemComponent(props: Props): JSX.Element { export default function ProjectItemComponent(props: Props): JSX.Element {
const { const {
projectInstance: { instance, preview }, projectInstance: instance,
} = props; } = props;
const history = useHistory(); const history = useHistory();
@ -53,21 +54,16 @@ export default function ProjectItemComponent(props: Props): JSX.Element {
return ( return (
<Card <Card
cover={ cover={(
preview ? ( <Preview
<img project={instance}
className='cvat-projects-project-item-card-preview' loadingClassName='cvat-project-item-loading-preview'
src={preview} emptyPreviewClassName='cvat-project-item-empty-preview'
alt='Preview' previewWrapperClassName='cvat-projects-project-item-card-preview-wrapper'
onClick={onOpenProject} previewClassName='cvat-projects-project-item-card-preview'
aria-hidden onClick={onOpenProject}
/> />
) : ( )}
<div className='cvat-projects-project-item-card-preview' onClick={onOpenProject} aria-hidden>
<Empty description='No tasks' />
</div>
)
}
size='small' size='small'
style={style} style={style}
className='cvat-projects-project-item-card' className='cvat-projects-project-item-card'

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -41,7 +42,7 @@ export default function ProjectListComponent(): JSX.Element {
<Col className='cvat-projects-list' {...dimensions}> <Col className='cvat-projects-list' {...dimensions}>
{projects.map( {projects.map(
(project: Project): JSX.Element => ( (project: Project): JSX.Element => (
<ProjectItem key={project.instance.id} projectInstance={project} /> <ProjectItem key={project.id} projectInstance={project} />
), ),
)} )}
</Col> </Col>

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -118,27 +119,29 @@
} }
.cvat-projects-project-item-card { .cvat-projects-project-item-card {
.cvat-projects-project-item-card-preview { .cvat-projects-project-item-card-preview-wrapper {
.ant-empty { height: 100%;
margin: $grid-unit-size;
height: inherit;
display: grid;
.ant-empty-image {
height: $grid-unit-size * 10;
}
> div:first-child { .cvat-projects-project-item-card-preview {
margin: auto; height: 100%;
} width: 100%;
display: flex;
align-items: center;
justify-content: space-around;
object-fit: cover;
cursor: pointer;
} }
}
height: 100%; .cvat-project-item-loading-preview,
display: flex; .cvat-project-item-empty-preview {
align-items: center; .ant-spin {
justify-content: space-around; position: inherit;
object-fit: cover; }
cursor: pointer;
font-size: $grid-unit-size * 15;
text-align: center;
height: $grid-unit-size * 24;
} }
.cvat-projects-project-item-title { .cvat-projects-project-item-title {

@ -1,4 +1,5 @@
// Copyright (C) 2019-2022 Intel Corporation // Copyright (C) 2019-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -18,6 +19,7 @@ import { getCore } from 'cvat-core-wrapper';
import { getReposData, syncRepos, changeRepo } from 'utils/git-utils'; import { getReposData, syncRepos, changeRepo } from 'utils/git-utils';
import { ActiveInference } from 'reducers'; import { ActiveInference } from 'reducers';
import AutomaticAnnotationProgress from 'components/tasks-page/automatic-annotation-progress'; import AutomaticAnnotationProgress from 'components/tasks-page/automatic-annotation-progress';
import Preview from 'components/common/preview';
import Descriptions from 'antd/lib/descriptions'; import Descriptions from 'antd/lib/descriptions';
import Space from 'antd/lib/space'; import Space from 'antd/lib/space';
import UserSelector, { User } from './user-selector'; import UserSelector, { User } from './user-selector';
@ -30,7 +32,6 @@ const { Option } = Select;
const core = getCore(); const core = getCore();
interface Props { interface Props {
previewImage: string;
taskInstance: any; taskInstance: any;
installedGit: boolean; // change to git repos url installedGit: boolean; // change to git repos url
activeInference: ActiveInference | null; activeInference: ActiveInference | null;
@ -53,8 +54,6 @@ interface State {
export default class DetailsComponent extends React.PureComponent<Props, State> { export default class DetailsComponent extends React.PureComponent<Props, State> {
private mounted: boolean; private mounted: boolean;
private previewImageElement: HTMLImageElement;
private previewWrapperRef: React.RefObject<HTMLDivElement>;
constructor(props: Props) { constructor(props: Props) {
super(props); super(props);
@ -62,8 +61,6 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
const { taskInstance } = props; const { taskInstance } = props;
this.mounted = false; this.mounted = false;
this.previewImageElement = new Image();
this.previewWrapperRef = React.createRef<HTMLDivElement>();
this.state = { this.state = {
name: taskInstance.name, name: taskInstance.name,
subset: taskInstance.subset, subset: taskInstance.subset,
@ -76,25 +73,9 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
} }
public componentDidMount(): void { public componentDidMount(): void {
const { taskInstance, previewImage } = this.props; const { taskInstance } = this.props;
const { previewImageElement, previewWrapperRef } = this;
this.mounted = true; this.mounted = true;
previewImageElement.onload = () => {
const { height, width } = previewImageElement;
if (width > height) {
previewImageElement.style.width = '100%';
} else {
previewImageElement.style.height = '100%';
}
};
previewImageElement.src = previewImage;
previewImageElement.alt = 'Preview';
if (previewWrapperRef.current) {
previewWrapperRef.current.appendChild(previewImageElement);
}
getReposData(taskInstance.id) getReposData(taskInstance.id)
.then((data): void => { .then((data): void => {
if (data !== null && this.mounted) { if (data !== null && this.mounted) {
@ -212,13 +193,6 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
); );
} }
private renderPreview(): JSX.Element {
const { previewWrapperRef } = this;
// Add image on mount after get its width and height to fit it into wrapper
return <div ref={previewWrapperRef} className='cvat-task-preview-wrapper' />;
}
private renderParameters(): JSX.Element { private renderParameters(): JSX.Element {
const { taskInstance } = this.props; const { taskInstance } = this.props;
const { overlap, segmentSize, imageQuality } = taskInstance; const { overlap, segmentSize, imageQuality } = taskInstance;
@ -414,7 +388,14 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
<Row justify='space-between' align='top'> <Row justify='space-between' align='top'>
<Col md={8} lg={7} xl={7} xxl={6}> <Col md={8} lg={7} xl={7} xxl={6}>
<Row justify='start' align='middle'> <Row justify='start' align='middle'>
<Col span={24}>{this.renderPreview()}</Col> <Col span={24}>
<Preview
task={taskInstance}
loadingClassName='cvat-task-item-loading-preview'
emptyPreviewClassName='cvat-task-item-empty-preview'
previewClassName='cvat-task-item-preview'
/>
</Col>
</Row> </Row>
<Row> <Row>
<Col span={24}>{this.renderParameters()}</Col> <Col span={24}>{this.renderParameters()}</Col>

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -36,6 +37,25 @@
.cvat-project-search-field { .cvat-project-search-field {
width: $grid-unit-size * 20; width: $grid-unit-size * 20;
} }
.cvat-task-item-loading-preview,
.cvat-task-item-empty-preview {
.ant-spin {
position: inherit;
}
height: $grid-unit-size * 18;
font-size: $grid-unit-size * 10;
text-align: center;
margin-bottom: $grid-unit-size * 3;
}
.cvat-task-item-preview {
width: 100%;
object-fit: cover;
margin-bottom: $grid-unit-size * 3;
height: $grid-unit-size * 18;
}
} }
.cvat-task-page-actions-button { .cvat-task-page-actions-button {

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -81,7 +82,7 @@ class TaskPageComponent extends React.PureComponent<Props> {
className='cvat-task-details-wrapper' className='cvat-task-details-wrapper'
> >
<Col md={22} lg={18} xl={16} xxl={14}> <Col md={22} lg={18} xl={16} xxl={14}>
<TopBarComponent taskInstance={(task as Task).instance} /> <TopBarComponent taskInstance={task as Task} />
<DetailsContainer task={task as Task} /> <DetailsContainer task={task as Task} />
<JobListContainer task={task as Task} /> <JobListContainer task={task as Task} />
</Col> </Col>

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -91,18 +92,28 @@
&:hover { &:hover {
border: 1px solid $border-color-hover; border: 1px solid $border-color-hover;
} }
}
.cvat-task-item-preview-wrapper { .cvat-task-item-loading-preview,
display: flex; .cvat-task-item-empty-preview {
justify-content: center; .ant-spin {
overflow: hidden; position: inherit;
margin: 20px; }
margin-top: 0;
> .cvat-task-item-preview { font-size: $grid-unit-size * 6;
max-width: 140px; text-align: center;
max-height: 80px; }
.cvat-task-item-preview-wrapper {
display: flex;
justify-content: center;
overflow: hidden;
margin: $grid-unit-size * 3;
margin-top: 0;
> .cvat-task-item-preview {
max-width: 140px;
max-height: 80px;
}
} }
} }

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -14,12 +15,12 @@ import Progress from 'antd/lib/progress';
import moment from 'moment'; import moment from 'moment';
import ActionsMenuContainer from 'containers/actions-menu/actions-menu'; import ActionsMenuContainer from 'containers/actions-menu/actions-menu';
import Preview from 'components/common/preview';
import { ActiveInference } from 'reducers'; import { ActiveInference } from 'reducers';
import AutomaticAnnotationProgress from './automatic-annotation-progress'; import AutomaticAnnotationProgress from './automatic-annotation-progress';
export interface TaskItemProps { export interface TaskItemProps {
taskInstance: any; taskInstance: any;
previewImage: string;
deleted: boolean; deleted: boolean;
hidden: boolean; hidden: boolean;
activeInference: ActiveInference | null; activeInference: ActiveInference | null;
@ -28,12 +29,16 @@ export interface TaskItemProps {
class TaskItemComponent extends React.PureComponent<TaskItemProps & RouteComponentProps> { class TaskItemComponent extends React.PureComponent<TaskItemProps & RouteComponentProps> {
private renderPreview(): JSX.Element { private renderPreview(): JSX.Element {
const { previewImage } = this.props; const { taskInstance } = this.props;
return ( return (
<Col span={4}> <Col span={4}>
<div className='cvat-task-item-preview-wrapper'> <Preview
<img alt='Preview' className='cvat-task-item-preview' src={previewImage} /> task={taskInstance}
</div> loadingClassName='cvat-task-item-loading-preview'
emptyPreviewClassName='cvat-task-item-empty-preview'
previewWrapperClassName='cvat-task-item-preview-wrapper'
previewClassName='cvat-task-item-preview'
/>
</Col> </Col>
); );
} }

@ -1,4 +1,5 @@
// Copyright (C) 2019-2022 Intel Corporation // Copyright (C) 2019-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -29,16 +30,16 @@ interface DispatchToProps {
function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const { list } = state.plugins; const { list } = state.plugins;
const [taskProject] = state.projects.current.filter((project) => project.id === own.task.instance.projectId); const [taskProject] = state.projects.current.filter((project) => project.id === own.task.projectId);
return { return {
dumpers: state.formats.annotationFormats.dumpers, dumpers: state.formats.annotationFormats.dumpers,
user: state.auth.user, user: state.auth.user,
installedGit: list.GIT_INTEGRATION, installedGit: list.GIT_INTEGRATION,
activeInference: state.models.inferences[own.task.instance.id] || null, activeInference: state.models.inferences[own.task.id] || null,
projectSubsets: taskProject ? projectSubsets: taskProject ?
([ ([
...new Set(taskProject.tasks.map((task: any) => task.subset).filter((subset: string) => subset)), ...new Set(taskProject.subsets),
] as string[]) : ] as string[]) :
[], [],
}; };
@ -50,7 +51,7 @@ function mapDispatchToProps(dispatch: any, own: OwnProps): DispatchToProps {
dispatch(updateTaskAsync(taskInstance)); dispatch(updateTaskAsync(taskInstance));
}, },
cancelAutoAnnotation(): void { cancelAutoAnnotation(): void {
dispatch(cancelInferenceAsync(own.task.instance.id)); dispatch(cancelInferenceAsync(own.task.id));
}, },
}; };
} }
@ -64,8 +65,7 @@ function TaskPageContainer(props: StateToProps & DispatchToProps & OwnProps): JS
<DetailsComponent <DetailsComponent
dumpers={dumpers} dumpers={dumpers}
user={user} user={user}
previewImage={task.preview} taskInstance={task}
taskInstance={task.instance}
installedGit={installedGit} installedGit={installedGit}
activeInference={activeInference} activeInference={activeInference}
projectSubsets={projectSubsets} projectSubsets={projectSubsets}

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -26,7 +27,7 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
function TaskPageContainer(props: DispatchToProps & OwnProps): JSX.Element { function TaskPageContainer(props: DispatchToProps & OwnProps): JSX.Element {
const { task, onJobUpdate } = props; const { task, onJobUpdate } = props;
return <JobListComponent taskInstance={task.instance} onJobUpdate={onJobUpdate} />; return <JobListComponent taskInstance={task} onJobUpdate={onJobUpdate} />;
} }
export default connect(null, mapDispatchToProps)(TaskPageContainer); export default connect(null, mapDispatchToProps)(TaskPageContainer);

@ -37,7 +37,7 @@ function mapStateToProps(state: CombinedState, own: Props): StateToProps {
const id = +own.match.params.id; const id = +own.match.params.id;
const filteredTasks = state.tasks.current.filter((task) => task.instance.id === id); const filteredTasks = state.tasks.current.filter((task) => task.id === id);
const task = filteredTasks[0] || (gettingQuery.id === id || Number.isNaN(id) ? undefined : null); const task = filteredTasks[0] || (gettingQuery.id === id || Number.isNaN(id) ? undefined : null);
@ -46,7 +46,7 @@ function mapStateToProps(state: CombinedState, own: Props): StateToProps {
deleteActivity = deletes[id]; deleteActivity = deletes[id];
} }
const jobIDs = task ? Object.fromEntries(task.instance.jobs.map((job:any) => [job.id])) : {}; const jobIDs = task ? Object.fromEntries(task.jobs.map((job:any) => [job.id])) : {};
const updatingJobs = Object.keys(jobUpdates); const updatingJobs = Object.keys(jobUpdates);
const jobUpdating = updatingJobs.some((jobID) => jobID in jobIDs); const jobUpdating = updatingJobs.some((jobID) => jobID in jobIDs);

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -14,7 +15,6 @@ import { cancelInferenceAsync } from 'actions/models-actions';
interface StateToProps { interface StateToProps {
deleted: boolean; deleted: boolean;
hidden: boolean; hidden: boolean;
previewImage: string;
taskInstance: any; taskInstance: any;
activeInference: ActiveInference | null; activeInference: ActiveInference | null;
} }
@ -35,10 +35,9 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const id = own.taskID; const id = own.taskID;
return { return {
hidden: state.tasks.hideEmpty && task.instance.jobs.length === 0, hidden: state.tasks.hideEmpty && task.jobs.length === 0,
deleted: id in deletes ? deletes[id] === true : false, deleted: id in deletes ? deletes[id] === true : false,
previewImage: task.preview, taskInstance: task,
taskInstance: task.instance,
activeInference: state.models.inferences[id] || null, activeInference: state.models.inferences[id] || null,
}; };
} }

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation // Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -37,7 +38,7 @@ function TasksListContainer(props: TasksListContainerProps): JSX.Element {
return ( return (
<TasksListComponent <TasksListComponent
currentTasksIndexes={tasks.current.map((task): number => task.instance.id)} currentTasksIndexes={tasks.current.map((task): number => task.id)}
/> />
); );
} }

@ -23,7 +23,7 @@ function mapStateToProps(state: CombinedState): StateToProps {
query: tasks.gettingQuery, query: tasks.gettingQuery,
count: state.tasks.count, count: state.tasks.count,
countInvisible: tasks.hideEmpty ? countInvisible: tasks.hideEmpty ?
tasks.current.filter((task: Task): boolean => !task.instance.jobs.length).length : tasks.current.filter((task: Task): boolean => !task.jobs.length).length :
0, 0,
importing: state.import.tasks.backup.importing, importing: state.import.tasks.backup.importing,
}; };

@ -1,4 +1,5 @@
// Copyright (C) 2021-2022 Intel Corporation // Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// //
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
@ -306,41 +307,46 @@ export default (
case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW: { case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW: {
const { cloudStorageID } = action.payload; const { cloudStorageID } = action.payload;
const { previews } = state; const { previews } = state;
previews[cloudStorageID] = {
preview: '',
fetching: true,
initialized: false,
};
return { return {
...state, ...state,
previews, previews: {
...previews,
[cloudStorageID]: {
preview: '',
fetching: true,
initialized: false,
},
},
}; };
} }
case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW_SUCCESS: { case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW_SUCCESS: {
const { cloudStorageID, preview } = action.payload; const { cloudStorageID, preview } = action.payload;
const { previews } = state; const { previews } = state;
previews[cloudStorageID] = {
...previews[cloudStorageID],
preview,
initialized: true,
fetching: false,
};
return { return {
...state, ...state,
previews, previews: {
...previews,
[cloudStorageID]: {
preview,
fetching: false,
initialized: true,
},
},
}; };
} }
case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW_FAILED: { case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW_FAILED: {
const { cloudStorageID } = action.payload; const { cloudStorageID } = action.payload;
const { previews } = state; const { previews } = state;
previews[cloudStorageID] = {
...previews[cloudStorageID],
initialized: true,
fetching: false,
};
return { return {
...state, ...state,
previews, previews: {
...previews,
[cloudStorageID]: {
...previews[cloudStorageID],
fetching: false,
initialized: true,
},
},
}; };
} }
case AuthActionTypes.LOGOUT_SUCCESS: { case AuthActionTypes.LOGOUT_SUCCESS: {

@ -46,16 +46,22 @@ export interface ProjectsQuery {
sort: string | null; sort: string | null;
} }
export interface Project { interface Preview {
instance: any; fetching: boolean;
initialized: boolean;
preview: string; preview: string;
} }
export type Project = any;
export interface ProjectsState { export interface ProjectsState {
initialized: boolean; initialized: boolean;
fetching: boolean; fetching: boolean;
count: number; count: number;
current: Project[]; current: Project[];
previews: {
[index: number]: Preview;
};
gettingQuery: ProjectsQuery; gettingQuery: ProjectsQuery;
tasksGettingQuery: TasksQuery & { ordering: string }; tasksGettingQuery: TasksQuery & { ordering: string };
activities: { activities: {
@ -78,10 +84,7 @@ export interface TasksQuery {
projectId: number | null; projectId: number | null;
} }
export interface Task { export type Task = any; // cvat-core instance
instance: any; // cvat-core instance
preview: string;
}
export interface JobsQuery { export interface JobsQuery {
page: number; page: number;
@ -90,12 +93,16 @@ export interface JobsQuery {
filter: string | null; filter: string | null;
} }
export type Job = any;
export interface JobsState { export interface JobsState {
query: JobsQuery; query: JobsQuery;
fetching: boolean; fetching: boolean;
count: number; count: number;
current: any[]; current: Job[];
previews: string[]; previews: {
[index: number]: Preview;
};
} }
export interface TasksState { export interface TasksState {
@ -110,6 +117,9 @@ export interface TasksState {
gettingQuery: TasksQuery; gettingQuery: TasksQuery;
count: number; count: number;
current: Task[]; current: Task[];
previews: {
[index: number]: Preview;
};
activities: { activities: {
deletes: { deletes: {
[tid: number]: boolean; // deleted (deleting if in dictionary) [tid: number]: boolean; // deleted (deleting if in dictionary)
@ -214,14 +224,11 @@ export interface CloudStoragesQuery {
filter: string | null; filter: string | null;
} }
interface CloudStorageAdditional { interface CloudStorageStatus {
fetching: boolean; fetching: boolean;
initialized: boolean; initialized: boolean;
status: string | null; status: string | null;
preview: string;
} }
type CloudStorageStatus = Pick<CloudStorageAdditional, 'fetching' | 'initialized' | 'status'>;
type CloudStoragePreview = Pick<CloudStorageAdditional, 'fetching' | 'initialized' | 'preview'>;
export type CloudStorage = any; export type CloudStorage = any;
@ -234,7 +241,7 @@ export interface CloudStoragesState {
[index: number]: CloudStorageStatus; [index: number]: CloudStorageStatus;
}; };
previews: { previews: {
[index: number]: CloudStoragePreview; [index: number]: Preview;
}; };
gettingQuery: CloudStoragesQuery; gettingQuery: CloudStoragesQuery;
activities: { activities: {

@ -15,7 +15,7 @@ const defaultState: JobsState = {
search: null, search: null,
}, },
current: [], current: [],
previews: [], previews: {},
}; };
export default (state: JobsState = defaultState, action: JobsActions): JobsState => { export default (state: JobsState = defaultState, action: JobsActions): JobsState => {
@ -36,7 +36,6 @@ export default (state: JobsState = defaultState, action: JobsActions): JobsState
fetching: false, fetching: false,
count: action.payload.jobs.count, count: action.payload.jobs.count,
current: action.payload.jobs, current: action.payload.jobs,
previews: action.payload.previews,
}; };
} }
case JobsActionTypes.GET_JOBS_FAILED: { case JobsActionTypes.GET_JOBS_FAILED: {
@ -45,6 +44,51 @@ export default (state: JobsState = defaultState, action: JobsActions): JobsState
fetching: false, fetching: false,
}; };
} }
case JobsActionTypes.GET_JOB_PREVIEW: {
const { jobID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[jobID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case JobsActionTypes.GET_JOB_PREVIEW_SUCCESS: {
const { jobID, preview } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[jobID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case JobsActionTypes.GET_JOB_PREVIEW_FAILED: {
const { jobID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[jobID]: {
...previews[jobID],
fetching: false,
initialized: true,
},
},
};
}
default: { default: {
return state; return state;
} }

@ -15,6 +15,7 @@ const defaultState: ProjectsState = {
fetching: false, fetching: false,
count: 0, count: 0,
current: [], current: [],
previews: {},
gettingQuery: { gettingQuery: {
page: 1, page: 1,
id: null, id: null,
@ -63,19 +64,12 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
current: [], current: [],
}; };
case ProjectsActionTypes.GET_PROJECTS_SUCCESS: { case ProjectsActionTypes.GET_PROJECTS_SUCCESS: {
const combinedWithPreviews = action.payload.array.map(
(project: any, index: number): Project => ({
instance: project,
preview: action.payload.previews[index],
}),
);
return { return {
...state, ...state,
initialized: true, initialized: true,
fetching: false, fetching: false,
count: action.payload.count, count: action.payload.count,
current: combinedWithPreviews, current: action.payload.array,
}; };
} }
case ProjectsActionTypes.GET_PROJECTS_FAILED: { case ProjectsActionTypes.GET_PROJECTS_FAILED: {
@ -130,13 +124,11 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
return { return {
...state, ...state,
current: state.current.map( current: state.current.map(
(project): Project => ({ (project): Project => (
...project, project.id === action.payload.project.id ?
instance: action.payload.project :
project.instance.id === action.payload.project.id ? project
action.payload.project : ),
project.instance,
}),
), ),
}; };
} }
@ -144,13 +136,9 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
return { return {
...state, ...state,
current: state.current.map( current: state.current.map(
(project): Project => ({ (project): Project => (project.id === action.payload.project.id ?
...project, action.payload.project :
instance: project),
project.instance.id === action.payload.project.id ?
action.payload.project :
project.instance,
}),
), ),
}; };
} }
@ -206,6 +194,51 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
case AuthActionTypes.LOGOUT_SUCCESS: { case AuthActionTypes.LOGOUT_SUCCESS: {
return { ...defaultState }; return { ...defaultState };
} }
case ProjectsActionTypes.GET_PROJECT_PREVIEW: {
const { projectID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[projectID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case ProjectsActionTypes.GET_PROJECT_PREVIEW_SUCCESS: {
const { projectID, preview } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[projectID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case ProjectsActionTypes.GET_PROJECT_PREVIEW_FAILED: {
const { projectID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[projectID]: {
...previews[projectID],
fetching: false,
initialized: true,
},
},
};
}
default: default:
return state; return state;
} }

@ -23,6 +23,7 @@ const defaultState: TasksState = {
}, },
count: 0, count: 0,
current: [], current: [],
previews: {},
gettingQuery: { gettingQuery: {
page: 1, page: 1,
id: null, id: null,
@ -56,20 +57,13 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
} : state.gettingQuery, } : state.gettingQuery,
}; };
case TasksActionTypes.GET_TASKS_SUCCESS: { case TasksActionTypes.GET_TASKS_SUCCESS: {
const combinedWithPreviews = action.payload.array.map(
(task: any, index: number): Task => ({
instance: task,
preview: action.payload.previews[index],
}),
);
return { return {
...state, ...state,
initialized: true, initialized: true,
fetching: false, fetching: false,
updating: false, updating: false,
count: action.payload.count, count: action.payload.count,
current: combinedWithPreviews, current: action.payload.array,
}; };
} }
case TasksActionTypes.GET_TASKS_FAILED: case TasksActionTypes.GET_TASKS_FAILED:
@ -140,7 +134,7 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
return { return {
...state, ...state,
updating: false, updating: false,
current: state.current.filter((_task: Task): boolean => _task.instance.id !== taskID), current: state.current.filter((_task: Task): boolean => _task.id !== taskID),
}; };
} }
@ -149,11 +143,8 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
updating: false, updating: false,
current: state.current.map( current: state.current.map(
(_task): Task => { (_task): Task => {
if (_task.instance.id === task.id) { if (_task.id === task.id) {
return { return task;
..._task,
instance: task,
};
} }
return _task; return _task;
@ -167,11 +158,8 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
updating: false, updating: false,
current: state.current.map( current: state.current.map(
(task): Task => { (task): Task => {
if (task.instance.id === action.payload.task.id) { if (task.id === action.payload.task.id) {
return { return action.payload.task;
...task,
instance: action.payload.task,
};
} }
return task; return task;
@ -236,6 +224,51 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
case AuthActionTypes.LOGOUT_SUCCESS: { case AuthActionTypes.LOGOUT_SUCCESS: {
return { ...defaultState }; return { ...defaultState };
} }
case TasksActionTypes.GET_TASK_PREVIEW: {
const { taskID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[taskID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case TasksActionTypes.GET_TASK_PREVIEW_SUCCESS: {
const { taskID, preview } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[taskID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case TasksActionTypes.GET_TASK_PREVIEW_FAILED: {
const { taskID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[taskID]: {
...previews[taskID],
fetching: false,
initialized: true,
},
},
};
}
default: default:
return state; return state;
} }

@ -4,10 +4,13 @@
import os import os
from io import BytesIO from io import BytesIO
from datetime import datetime
from tempfile import NamedTemporaryFile
import pytz
from diskcache import Cache from diskcache import Cache
from django.conf import settings from django.conf import settings
from tempfile import NamedTemporaryFile from rest_framework.exceptions import ValidationError, NotFound
from cvat.apps.engine.log import slogger from cvat.apps.engine.log import slogger
from cvat.apps.engine.media_extractors import (Mpeg4ChunkWriter, from cvat.apps.engine.media_extractors import (Mpeg4ChunkWriter,
@ -17,6 +20,11 @@ from cvat.apps.engine.models import DataChoice, StorageChoice
from cvat.apps.engine.models import DimensionType from cvat.apps.engine.models import DimensionType
from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials
from cvat.apps.engine.utils import md5_hash from cvat.apps.engine.utils import md5_hash
from cvat.apps.engine.cloud_provider import db_storage_to_storage_instance
from cvat.apps.engine.mime_types import mimetypes
from utils.dataset_manifest import ImageManifestManager
class CacheInteraction: class CacheInteraction:
def __init__(self, dimension=DimensionType.DIM_2D): def __init__(self, dimension=DimensionType.DIM_2D):
self._cache = Cache(settings.CACHE_ROOT) self._cache = Cache(settings.CACHE_ROOT)
@ -25,16 +33,44 @@ class CacheInteraction:
def __del__(self): def __del__(self):
self._cache.close() self._cache.close()
def get_buff_mime(self, chunk_number, quality, db_data): def get_buf_chunk_with_mime(self, chunk_number, quality, db_data):
chunk, tag = self._cache.get('{}_{}_{}'.format(db_data.id, chunk_number, quality), tag=True) cache_key = f'{db_data.id}_{chunk_number}_{quality}'
chunk, tag = self._cache.get(cache_key, tag=True)
if not chunk: if not chunk:
chunk, tag = self.prepare_chunk_buff(db_data, quality, chunk_number) chunk, tag = self._prepare_chunk_buff(db_data, quality, chunk_number)
self.save_chunk(db_data.id, chunk_number, quality, chunk, tag) self._cache.set(cache_key, chunk, tag=tag)
return chunk, tag return chunk, tag
def prepare_chunk_buff(self, db_data, quality, chunk_number): def get_local_preview_with_mime(self, frame_number, db_data):
key = f'data_{db_data.id}_{frame_number}_preview'
buf, mime = self._cache.get(key, tag=True)
if not buf:
buf, mime = self._prepare_local_preview(frame_number, db_data)
self._cache.set(key, buf, tag=mime)
return buf, mime
def get_cloud_preview_with_mime(self, db_storage):
key = f'cloudstorage_{db_storage.id}_preview'
preview, mime = self._cache.get(key, tag=True)
if not preview:
preview, mime = self._prepare_cloud_preview(db_storage)
self._cache.set(key, preview, tag=mime)
return preview, mime
@staticmethod
def _get_frame_provider():
from cvat.apps.engine.frame_provider import FrameProvider # TODO: remove circular dependency from cvat.apps.engine.frame_provider import FrameProvider # TODO: remove circular dependency
return FrameProvider
def _prepare_chunk_buff(self, db_data, quality, chunk_number):
FrameProvider = self._get_frame_provider()
writer_classes = { writer_classes = {
FrameProvider.Quality.COMPRESSED : Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter, FrameProvider.Quality.COMPRESSED : Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter,
FrameProvider.Quality.ORIGINAL : Mpeg4ChunkWriter if db_data.original_chunk_type == DataChoice.VIDEO else ZipChunkWriter, FrameProvider.Quality.ORIGINAL : Mpeg4ChunkWriter if db_data.original_chunk_type == DataChoice.VIDEO else ZipChunkWriter,
@ -108,5 +144,42 @@ class CacheInteraction:
os.remove(image_path) os.remove(image_path)
return buff, mime_type return buff, mime_type
def save_chunk(self, db_data_id, chunk_number, quality, buff, mime_type): def _prepare_local_preview(self, frame_number, db_data):
self._cache.set('{}_{}_{}'.format(db_data_id, chunk_number, quality), buff, tag=mime_type) FrameProvider = self._get_frame_provider()
frame_provider = FrameProvider(db_data, self._dimension)
buf, mime = frame_provider.get_preview(frame_number)
return buf, mime
def _prepare_cloud_preview(self, db_storage):
storage = db_storage_to_storage_instance(db_storage)
if not db_storage.manifests.count():
raise ValidationError('Cannot get the cloud storage preview. There is no manifest file')
preview_path = None
for manifest_model in db_storage.manifests.all():
manifest_prefix = os.path.dirname(manifest_model.filename)
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_model.filename)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_model.filename):
storage.download_file(manifest_model.filename, full_manifest_path)
manifest = ImageManifestManager(
os.path.join(db_storage.get_storage_dirname(), manifest_model.filename),
db_storage.get_storage_dirname()
)
# need to update index
manifest.set_index()
if not len(manifest):
continue
preview_info = manifest[0]
preview_filename = ''.join([preview_info['name'], preview_info['extension']])
preview_path = os.path.join(manifest_prefix, preview_filename)
break
if not preview_path:
msg = 'Cloud storage {} does not contain any images'.format(db_storage.pk)
slogger.cloud_storage[db_storage.pk].info(msg)
raise NotFound(msg)
preview = storage.download_fileobj(preview_path)
mime = mimetypes.guess_type(preview_path)[0]
return preview, mime

@ -6,6 +6,7 @@
import math import math
from enum import Enum from enum import Enum
from io import BytesIO from io import BytesIO
import os
import cv2 import cv2
import numpy as np import numpy as np
@ -15,6 +16,7 @@ from cvat.apps.engine.cache import CacheInteraction
from cvat.apps.engine.media_extractors import VideoReader, ZipReader from cvat.apps.engine.media_extractors import VideoReader, ZipReader
from cvat.apps.engine.mime_types import mimetypes from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.models import DataChoice, StorageMethodChoice, DimensionType from cvat.apps.engine.models import DataChoice, StorageMethodChoice, DimensionType
from cvat.apps.engine.media_extractors import rotate_within_exif
from rest_framework.exceptions import ValidationError from rest_framework.exceptions import ValidationError
class RandomAccessIterator: class RandomAccessIterator:
@ -86,6 +88,7 @@ class FrameProvider:
def __init__(self, db_data, dimension=DimensionType.DIM_2D): def __init__(self, db_data, dimension=DimensionType.DIM_2D):
self._db_data = db_data self._db_data = db_data
self._dimension = dimension
self._loaders = {} self._loaders = {}
reader_class = { reader_class = {
@ -98,12 +101,12 @@ class FrameProvider:
self._loaders[self.Quality.COMPRESSED] = self.BuffChunkLoader( self._loaders[self.Quality.COMPRESSED] = self.BuffChunkLoader(
reader_class[db_data.compressed_chunk_type], reader_class[db_data.compressed_chunk_type],
cache.get_buff_mime, cache.get_buf_chunk_with_mime,
self.Quality.COMPRESSED, self.Quality.COMPRESSED,
self._db_data) self._db_data)
self._loaders[self.Quality.ORIGINAL] = self.BuffChunkLoader( self._loaders[self.Quality.ORIGINAL] = self.BuffChunkLoader(
reader_class[db_data.original_chunk_type], reader_class[db_data.original_chunk_type],
cache.get_buff_mime, cache.get_buf_chunk_with_mime,
self.Quality.ORIGINAL, self.Quality.ORIGINAL,
self._db_data) self._db_data)
else: else:
@ -162,8 +165,23 @@ class FrameProvider:
else: else:
raise RuntimeError('unsupported output type') raise RuntimeError('unsupported output type')
def get_preview(self): def get_preview(self, frame_number):
return self._db_data.get_preview_path() PREVIEW_SIZE = (256, 256)
PREVIEW_MIME = 'image/jpeg'
if self._dimension == DimensionType.DIM_3D:
# TODO
preview = Image.open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'))
else:
preview, _ = self.get_frame(frame_number, self.Quality.COMPRESSED, self.Type.PIL)
preview = rotate_within_exif(preview)
preview.thumbnail(PREVIEW_SIZE)
output_buf = BytesIO()
preview.convert('RGB').save(output_buf, format="JPEG")
return output_buf, PREVIEW_MIME
def get_chunk(self, chunk_number, quality=Quality.ORIGINAL): def get_chunk(self, chunk_number, quality=Quality.ORIGINAL):
chunk_number = self._validate_chunk_number(chunk_number) chunk_number = self._validate_chunk_number(chunk_number)
@ -186,3 +204,7 @@ class FrameProvider:
def get_frames(self, start_frame, stop_frame, quality=Quality.ORIGINAL, out_type=Type.BUFFER): def get_frames(self, start_frame, stop_frame, quality=Quality.ORIGINAL, out_type=Type.BUFFER):
for idx in range(start_frame, stop_frame): for idx in range(start_frame, stop_frame):
yield self.get_frame(idx, quality=quality, out_type=out_type) yield self.get_frame(idx, quality=quality, out_type=out_type)
@property
def data_id(self):
return self._db_data.id

@ -4,9 +4,13 @@
import logging import logging
import sys import sys
import os.path as osp
from typing import Dict from typing import Dict
from contextlib import contextmanager
from attr import define, field from attr import define, field
from django.conf import settings
from cvat.settings.base import LOGGING from cvat.settings.base import LOGGING
from .models import Job, Task, Project, CloudStorage from .models import Job, Task, Project, CloudStorage
@ -175,3 +179,25 @@ def close_all():
for logger in _opened_loggers.values(): for logger in _opened_loggers.values():
_close_logger(logger) _close_logger(logger)
@contextmanager
def get_migration_logger(migration_name):
migration_log_file = '{}.log'.format(migration_name)
stdout = sys.stdout
stderr = sys.stderr
# redirect all stdout to the file
log_file_object = open(osp.join(settings.MIGRATIONS_LOGS_ROOT, migration_log_file), 'w')
sys.stdout = log_file_object
sys.stderr = log_file_object
log = logging.getLogger(migration_name)
log.addHandler(logging.StreamHandler(stdout))
log.addHandler(logging.StreamHandler(log_file_object))
log.setLevel(logging.INFO)
try:
yield log
finally:
log_file_object.close()
sys.stdout = stdout
sys.stderr = stderr

@ -4,7 +4,6 @@ import os
import re import re
import shutil import shutil
import glob import glob
import logging
import sys import sys
import traceback import traceback
import itertools import itertools
@ -19,17 +18,18 @@ from cvat.apps.engine.media_extractors import (VideoReader, ArchiveReader, ZipRe
PdfReader , ImageListReader, Mpeg4ChunkWriter, PdfReader , ImageListReader, Mpeg4ChunkWriter,
ZipChunkWriter, ZipCompressedChunkWriter, get_mime) ZipChunkWriter, ZipCompressedChunkWriter, get_mime)
from cvat.apps.engine.models import DataChoice from cvat.apps.engine.models import DataChoice
from cvat.apps.engine.log import get_migration_logger
MIGRATION_THREAD_COUNT = 2 MIGRATION_THREAD_COUNT = 2
def fix_path(path): def fix_path(path):
ind = path.find('.upload') ind = path.find('.upload')
if ind != -1: if ind != -1:
path = path[ind + len('.upload') + 1:] path = path[ind + len('.upload') + 1:]
return path return path
def get_frame_step(frame_filter): def get_frame_step(frame_filter):
match = re.search("step\s*=\s*([1-9]\d*)", frame_filter) match = re.search(r"step\s*=\s*([1-9]\d*)", frame_filter)
return int(match.group(1)) if match else 1 return int(match.group(1)) if match else 1
def get_task_on_disk(): def get_task_on_disk():
@ -235,126 +235,110 @@ def migrate_task_schema(db_task, Data, log):
def create_data_objects(apps, schema_editor): def create_data_objects(apps, schema_editor):
migration_name = os.path.splitext(os.path.basename(__file__))[0] migration_name = os.path.splitext(os.path.basename(__file__))[0]
migration_log_file = '{}.log'.format(migration_name) with get_migration_logger(migration_name) as log:
stdout = sys.stdout disk_tasks = get_task_on_disk()
stderr = sys.stderr
# redirect all stdout to the file Task = apps.get_model('engine', 'Task')
log_file_object = open(os.path.join(settings.MIGRATIONS_LOGS_ROOT, migration_log_file), 'w') Data = apps.get_model('engine', 'Data')
sys.stdout = log_file_object
sys.stderr = log_file_object db_tasks = Task.objects
task_count = db_tasks.count()
log = logging.getLogger(migration_name) log.info('\nStart schema migration...')
log.addHandler(logging.StreamHandler(stdout)) migrated_db_tasks = []
log.addHandler(logging.StreamHandler(log_file_object)) for counter, db_task in enumerate(db_tasks.all().iterator()):
log.setLevel(logging.INFO) res = migrate_task_schema(db_task, Data, log)
log.info('Schema migration for the task {} completed. Progress {}/{}'.format(db_task.id, counter+1, task_count))
disk_tasks = get_task_on_disk() if res:
migrated_db_tasks.append(res)
Task = apps.get_model('engine', 'Task')
Data = apps.get_model('engine', 'Data') log.info('\nSchema migration is finished...')
log.info('\nStart data migration...')
db_tasks = Task.objects
task_count = db_tasks.count() manager = multiprocessing.Manager()
log.info('\nStart schema migration...') return_dict = manager.dict()
migrated_db_tasks = []
for counter, db_task in enumerate(db_tasks.all().iterator()): def create_process(db_task_id, db_data_id):
res = migrate_task_schema(db_task, Data, log) db_data = Data.objects.get(pk=db_data_id)
log.info('Schema migration for the task {} completed. Progress {}/{}'.format(db_task.id, counter+1, task_count)) db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data_id))
if res: new_raw_dir = os.path.join(db_data_dir, 'raw')
migrated_db_tasks.append(res)
original_video = None
log.info('\nSchema migration is finished...') original_images = None
log.info('\nStart data migration...') if hasattr(db_data, 'video'):
original_video = os.path.join(new_raw_dir, db_data.video.path)
manager = multiprocessing.Manager() else:
return_dict = manager.dict() original_images = [os.path.realpath(os.path.join(new_raw_dir, db_image.path)) for db_image in db_data.images.all()]
def create_process(db_task_id, db_data_id): args = (db_task_id, db_data_id, original_video, original_images, db_data.size,
db_data = Data.objects.get(pk=db_data_id) db_data.start_frame, db_data.stop_frame, db_data.frame_filter, db_data.image_quality, db_data.chunk_size, return_dict)
db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data_id))
new_raw_dir = os.path.join(db_data_dir, 'raw') return multiprocessing.Process(target=migrate_task_data, args=args)
original_video = None results = {}
original_images = None task_idx = 0
if hasattr(db_data, 'video'): while True:
original_video = os.path.join(new_raw_dir, db_data.video.path) for res_idx in list(results.keys()):
else: res = results[res_idx]
original_images = [os.path.realpath(os.path.join(new_raw_dir, db_image.path)) for db_image in db_data.images.all()] if not res.is_alive():
del results[res_idx]
args = (db_task_id, db_data_id, original_video, original_images, db_data.size, if res.exitcode == 0:
db_data.start_frame, db_data.stop_frame, db_data.frame_filter, db_data.image_quality, db_data.chunk_size, return_dict) ret_code, message = return_dict[res_idx]
if ret_code:
return multiprocessing.Process(target=migrate_task_data, args=args) counter = (task_idx - len(results))
progress = (100 * counter) / task_count
results = {} log.info('Data migration for the task {} completed. Progress: {:.02f}% | {}/{}.'.format(res_idx, progress, counter, task_count))
task_idx = 0 else:
while True: log.error('Cannot migrate data for the task: {}'.format(res_idx))
for res_idx in list(results.keys()): log.error(str(message))
res = results[res_idx] if res_idx in disk_tasks:
if not res.is_alive(): disk_tasks.remove(res_idx)
del results[res_idx]
if res.exitcode == 0:
ret_code, message = return_dict[res_idx]
if ret_code:
counter = (task_idx - len(results))
progress = (100 * counter) / task_count
log.info('Data migration for the task {} completed. Progress: {:.02f}% | {}/{}.'.format(res_idx, progress, counter, task_count))
else: else:
log.error('Cannot migrate data for the task: {}'.format(res_idx)) log.error('#Cannot migrate data for the task: {}'.format(res_idx))
log.error(str(message))
if res_idx in disk_tasks: while task_idx < len(migrated_db_tasks) and len(results) < MIGRATION_THREAD_COUNT:
disk_tasks.remove(res_idx) log.info('Start data migration for the task {}, data ID {}'.format(migrated_db_tasks[task_idx][0], migrated_db_tasks[task_idx][1]))
else: results[migrated_db_tasks[task_idx][0]] = create_process(*migrated_db_tasks[task_idx])
log.error('#Cannot migrate data for the task: {}'.format(res_idx)) results[migrated_db_tasks[task_idx][0]].start()
task_idx += 1
while task_idx < len(migrated_db_tasks) and len(results) < MIGRATION_THREAD_COUNT:
log.info('Start data migration for the task {}, data ID {}'.format(migrated_db_tasks[task_idx][0], migrated_db_tasks[task_idx][1])) if len(results) == 0:
results[migrated_db_tasks[task_idx][0]] = create_process(*migrated_db_tasks[task_idx]) break
results[migrated_db_tasks[task_idx][0]].start()
task_idx += 1 time.sleep(5)
if len(results) == 0: if disk_tasks:
break suspicious_tasks_dir = os.path.join(settings.DATA_ROOT, 'suspicious_tasks')
os.makedirs(suspicious_tasks_dir, exist_ok=True)
time.sleep(5) for tid in disk_tasks:
suspicious_task_path = os.path.join(settings.DATA_ROOT, str(tid))
if disk_tasks: try:
suspicious_tasks_dir = os.path.join(settings.DATA_ROOT, 'suspicious_tasks') shutil.move(suspicious_task_path, suspicious_tasks_dir)
os.makedirs(suspicious_tasks_dir, exist_ok=True) except Exception as e:
for tid in disk_tasks: log.error('Cannot move data for the suspicious task {}, \
suspicious_task_path = os.path.join(settings.DATA_ROOT, str(tid)) that is not represented in the database.'.format(suspicious_task_path))
try: log.error(str(e))
shutil.move(suspicious_task_path, suspicious_tasks_dir)
except Exception as e: # DL models migration
log.error('Cannot move data for the suspicious task {}, \ if apps.is_installed('auto_annotation'):
that is not represented in the database.'.format(suspicious_task_path)) DLModel = apps.get_model('auto_annotation', 'AnnotationModel')
log.error(str(e))
for db_model in DLModel.objects.all():
# DL models migration try:
if apps.is_installed('auto_annotation'): old_location = os.path.join(settings.BASE_DIR, 'models', str(db_model.id))
DLModel = apps.get_model('auto_annotation', 'AnnotationModel') new_location = os.path.join(settings.BASE_DIR, 'data', 'models', str(db_model.id))
for db_model in DLModel.objects.all(): if os.path.isdir(old_location):
try: shutil.move(old_location, new_location)
old_location = os.path.join(settings.BASE_DIR, 'models', str(db_model.id))
new_location = os.path.join(settings.BASE_DIR, 'data', 'models', str(db_model.id)) db_model.model_file.name = db_model.model_file.name.replace(old_location, new_location)
db_model.weights_file.name = db_model.weights_file.name.replace(old_location, new_location)
if os.path.isdir(old_location): db_model.labelmap_file.name = db_model.labelmap_file.name.replace(old_location, new_location)
shutil.move(old_location, new_location) db_model.interpretation_file.name = db_model.interpretation_file.name.replace(old_location, new_location)
db_model.model_file.name = db_model.model_file.name.replace(old_location, new_location) db_model.save()
db_model.weights_file.name = db_model.weights_file.name.replace(old_location, new_location) except Exception as e:
db_model.labelmap_file.name = db_model.labelmap_file.name.replace(old_location, new_location) log.error('Cannot migrate data for the DL model: {}'.format(db_model.id))
db_model.interpretation_file.name = db_model.interpretation_file.name.replace(old_location, new_location) log.error(str(e))
db_model.save()
except Exception as e:
log.error('Cannot migrate data for the DL model: {}'.format(db_model.id))
log.error(str(e))
log_file_object.close()
sys.stdout = stdout
sys.stderr = stderr
class Migration(migrations.Migration): class Migration(migrations.Migration):

@ -0,0 +1,43 @@
import os
import sys
import traceback
from django.db import migrations
from django.conf import settings
from cvat.apps.engine.log import get_migration_logger
def delete_previews(apps, schema_editor):
migration_name = os.path.splitext(os.path.basename(__file__))[0]
with get_migration_logger(migration_name) as log:
def delete_object_previews(db_objects, root_path):
for db_obj in db_objects:
preview_path = os.path.join(root_path, str(db_obj.id), 'preview.jpeg')
try:
os.remove(preview_path)
except Exception as e:
log.error(f'Cannot delete path {preview_path}')
log.error(str(e))
traceback.print_exc(file=sys.stderr)
log.info('\nDeleting Data previews...')
Data = apps.get_model('engine', 'Data')
delete_object_previews(Data.objects.all(), settings.MEDIA_DATA_ROOT)
log.info('\nDeleting Job previews...')
Job = apps.get_model('engine', 'Job')
delete_object_previews(Job.objects.all(), settings.JOBS_ROOT)
log.info('\nDeleting CloudStorage previews...')
CloudStorage = apps.get_model('engine', 'CloudStorage')
delete_object_previews(CloudStorage.objects.all(), settings.CLOUD_STORAGE_ROOT)
class Migration(migrations.Migration):
dependencies = [
('engine', '0061_auto_20221130_0844'),
]
operations = [
migrations.RunPython(
code=delete_previews
),
]

@ -249,9 +249,6 @@ class Data(models.Model):
return os.path.join(self.get_compressed_cache_dirname(), return os.path.join(self.get_compressed_cache_dirname(),
self._get_compressed_chunk_name(chunk_number)) self._get_compressed_chunk_name(chunk_number))
def get_preview_path(self):
return os.path.join(self.get_data_dirname(), 'preview.jpeg')
def get_manifest_path(self): def get_manifest_path(self):
return os.path.join(self.get_upload_dirname(), 'manifest.jsonl') return os.path.join(self.get_upload_dirname(), 'manifest.jsonl')
@ -501,9 +498,6 @@ class Job(models.Model):
}) })
db_commit.save() db_commit.save()
def get_preview_path(self):
return os.path.join(self.get_dirname(), "preview.jpeg")
class Meta: class Meta:
default_permissions = () default_permissions = ()
@ -810,9 +804,6 @@ class CloudStorage(models.Model):
def get_log_path(self): def get_log_path(self):
return os.path.join(self.get_storage_logs_dirname(), "storage.log") return os.path.join(self.get_storage_logs_dirname(), "storage.log")
def get_preview_path(self):
return os.path.join(self.get_storage_dirname(), 'preview.jpeg')
def get_specific_attributes(self): def get_specific_attributes(self):
return parse_specific_attributes(self.specific_attributes) return parse_specific_attributes(self.specific_attributes)

@ -124,9 +124,6 @@ def _save_task_to_db(db_task, extractor):
shutil.rmtree(job_path) shutil.rmtree(job_path)
os.makedirs(job_path) os.makedirs(job_path)
preview = extractor.get_preview(frame=start_frame)
preview.save(db_job.get_preview_path())
db_task.data.save() db_task.data.save()
db_task.save() db_task.save()
@ -688,8 +685,5 @@ def _create_thread(db_task, data, isBackupRestore=False, isDatasetImport=False):
db_data.stop_frame = min(db_data.stop_frame, \ db_data.stop_frame = min(db_data.stop_frame, \
db_data.start_frame + (db_data.size - 1) * db_data.get_frame_step()) db_data.start_frame + (db_data.size - 1) * db_data.get_frame_step())
task_preview = extractor.get_preview(frame=0)
task_preview.save(db_data.get_preview_path())
slogger.glob.info("Found frames {} for Data #{}".format(db_data.size, db_data.id)) slogger.glob.info("Found frames {} for Data #{}".format(db_data.size, db_data.id))
_save_task_to_db(db_task, extractor) _save_task_to_db(db_task, extractor)

@ -17,6 +17,7 @@ from glob import glob
from io import BytesIO from io import BytesIO
from unittest import mock from unittest import mock
import logging import logging
import copy
import av import av
import numpy as np import numpy as np
@ -3095,7 +3096,12 @@ def generate_manifest_file(data_type, manifest_path, sources):
manifest.create() manifest.create()
class TaskDataAPITestCase(APITestCase): class TaskDataAPITestCase(APITestCase):
_image_sizes = {} _share_image_sizes = {}
_client_images = {}
_client_mp4_video = {}
_client_archive = {}
_client_pdf = {}
_client_mxf_video = {}
class ChunkType(str, Enum): class ChunkType(str, Enum):
IMAGESET = 'imageset' IMAGESET = 'imageset'
@ -3119,28 +3125,28 @@ class TaskDataAPITestCase(APITestCase):
img_size, data = generate_image_file(filename) img_size, data = generate_image_file(filename)
with open(path, "wb") as image: with open(path, "wb") as image:
image.write(data.read()) image.write(data.read())
cls._image_sizes[filename] = img_size cls._share_image_sizes[filename] = img_size
filename = "test_2.jpg" filename = "test_2.jpg"
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
img_size, data = generate_image_file(filename) img_size, data = generate_image_file(filename)
with open(path, "wb") as image: with open(path, "wb") as image:
image.write(data.read()) image.write(data.read())
cls._image_sizes[filename] = img_size cls._share_image_sizes[filename] = img_size
filename = "test_3.jpg" filename = "test_3.jpg"
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
img_size, data = generate_image_file(filename) img_size, data = generate_image_file(filename)
with open(path, "wb") as image: with open(path, "wb") as image:
image.write(data.read()) image.write(data.read())
cls._image_sizes[filename] = img_size cls._share_image_sizes[filename] = img_size
filename = "test_10.jpg" filename = "test_10.jpg"
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
img_size, data = generate_image_file(filename) img_size, data = generate_image_file(filename)
with open(path, "wb") as image: with open(path, "wb") as image:
image.write(data.read()) image.write(data.read())
cls._image_sizes[filename] = img_size cls._share_image_sizes[filename] = img_size
filename = os.path.join("data", "test_3.jpg") filename = os.path.join("data", "test_3.jpg")
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
@ -3148,14 +3154,14 @@ class TaskDataAPITestCase(APITestCase):
img_size, data = generate_image_file(filename) img_size, data = generate_image_file(filename)
with open(path, "wb") as image: with open(path, "wb") as image:
image.write(data.read()) image.write(data.read())
cls._image_sizes[filename] = img_size cls._share_image_sizes[filename] = img_size
filename = "test_video_1.mp4" filename = "test_video_1.mp4"
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
img_sizes, data = generate_video_file(filename, width=1280, height=720) img_sizes, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video: with open(path, "wb") as video:
video.write(data.read()) video.write(data.read())
cls._image_sizes[filename] = img_sizes cls._share_image_sizes[filename] = img_sizes
filename = "test_rotated_90_video.mp4" filename = "test_rotated_90_video.mp4"
path = os.path.join(os.path.dirname(__file__), 'assets', 'test_rotated_90_video.mp4') path = os.path.join(os.path.dirname(__file__), 'assets', 'test_rotated_90_video.mp4')
@ -3165,7 +3171,7 @@ class TaskDataAPITestCase(APITestCase):
img_sizes = [(frame.height, frame.width)] * container.streams.video[0].frames img_sizes = [(frame.height, frame.width)] * container.streams.video[0].frames
break break
container.close() container.close()
cls._image_sizes[filename] = img_sizes cls._share_image_sizes[filename] = img_sizes
filename = os.path.join("videos", "test_video_1.mp4") filename = os.path.join("videos", "test_video_1.mp4")
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
@ -3173,14 +3179,14 @@ class TaskDataAPITestCase(APITestCase):
img_sizes, data = generate_video_file(filename, width=1280, height=720) img_sizes, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video: with open(path, "wb") as video:
video.write(data.read()) video.write(data.read())
cls._image_sizes[filename] = img_sizes cls._share_image_sizes[filename] = img_sizes
filename = os.path.join("test_archive_1.zip") filename = os.path.join("test_archive_1.zip")
path = os.path.join(settings.SHARE_ROOT, filename) path = os.path.join(settings.SHARE_ROOT, filename)
img_sizes, data = generate_zip_archive_file(filename, count=5) img_sizes, data = generate_zip_archive_file(filename, count=5)
with open(path, "wb") as zip_archive: with open(path, "wb") as zip_archive:
zip_archive.write(data.read()) zip_archive.write(data.read())
cls._image_sizes[filename] = img_sizes cls._share_image_sizes[filename] = img_sizes
filename = "test_pointcloud_pcd.zip" filename = "test_pointcloud_pcd.zip"
path = os.path.join(os.path.dirname(__file__), 'assets', filename) path = os.path.join(os.path.dirname(__file__), 'assets', filename)
@ -3192,7 +3198,7 @@ class TaskDataAPITestCase(APITestCase):
with zip_file.open(info, "r") as file: with zip_file.open(info, "r") as file:
data = ValidateDimension.get_pcd_properties(file) data = ValidateDimension.get_pcd_properties(file)
image_sizes.append((int(data["WIDTH"]), int(data["HEIGHT"]))) image_sizes.append((int(data["WIDTH"]), int(data["HEIGHT"])))
cls._image_sizes[filename] = image_sizes cls._share_image_sizes[filename] = image_sizes
filename = "test_velodyne_points.zip" filename = "test_velodyne_points.zip"
path = os.path.join(os.path.dirname(__file__), 'assets', filename) path = os.path.join(os.path.dirname(__file__), 'assets', filename)
@ -3221,14 +3227,14 @@ class TaskDataAPITestCase(APITestCase):
root_path = os.path.abspath(os.path.join(root_path, filename.split(".")[0])) root_path = os.path.abspath(os.path.join(root_path, filename.split(".")[0]))
shutil.rmtree(root_path) shutil.rmtree(root_path)
cls._image_sizes[filename] = image_sizes cls._share_image_sizes[filename] = image_sizes
file_name = 'test_1.pdf' file_name = 'test_1.pdf'
path = os.path.join(settings.SHARE_ROOT, file_name) path = os.path.join(settings.SHARE_ROOT, file_name)
img_sizes, data = generate_pdf_file(file_name, page_count=5) img_sizes, data = generate_pdf_file(file_name, page_count=5)
with open(path, "wb") as pdf_file: with open(path, "wb") as pdf_file:
pdf_file.write(data.read()) pdf_file.write(data.read())
cls._image_sizes[file_name] = img_sizes cls._share_image_sizes[file_name] = img_sizes
generate_manifest_file(data_type='video', manifest_path=os.path.join(settings.SHARE_ROOT, 'videos', 'manifest.jsonl'), generate_manifest_file(data_type='video', manifest_path=os.path.join(settings.SHARE_ROOT, 'videos', 'manifest.jsonl'),
sources=[os.path.join(settings.SHARE_ROOT, 'videos', 'test_video_1.mp4')]) sources=[os.path.join(settings.SHARE_ROOT, 'videos', 'test_video_1.mp4')])
@ -3236,6 +3242,36 @@ class TaskDataAPITestCase(APITestCase):
generate_manifest_file(data_type='images', manifest_path=os.path.join(settings.SHARE_ROOT, 'manifest.jsonl'), generate_manifest_file(data_type='images', manifest_path=os.path.join(settings.SHARE_ROOT, 'manifest.jsonl'),
sources=[os.path.join(settings.SHARE_ROOT, f'test_{i}.jpg') for i in range(1,4)]) sources=[os.path.join(settings.SHARE_ROOT, f'test_{i}.jpg') for i in range(1,4)])
image_sizes, images = generate_image_files("test_1.jpg", "test_2.jpg", "test_3.jpg")
cls._client_images = {
'images': images,
'image_sizes': image_sizes,
}
image_sizes, video = generate_video_file(filename="test_video_1.mp4", width=1280, height=720)
cls._client_mp4_video = {
'video': video,
'image_sizes': image_sizes,
}
image_sizes, archive = generate_zip_archive_file("test_archive_2.zip", 7)
cls._client_archive = {
'archive': archive,
'image_sizes': image_sizes
}
image_sizes, document = generate_pdf_file("test_pdf_1.pdf", 5)
cls._client_pdf = {
'pdf': document,
'image_sizes': image_sizes
}
image_sizes, video = generate_video_file(filename="test_video_1.mxf", width=1280, height=720, codec_name='mpeg2video')
cls._client_mxf_video = {
'video': video,
'image_sizes': image_sizes,
}
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls):
super().tearDownClass() super().tearDownClass()
@ -3296,7 +3332,9 @@ class TaskDataAPITestCase(APITestCase):
return self.client.get(url) return self.client.get(url)
def _get_preview(self, tid, user): def _get_preview(self, tid, user):
return self._run_api_v2_task_id_data_get(tid, user, "preview") url = '/api/tasks/{}/preview'.format(tid)
with ForceLogin(user, self.client):
return self.client.get(url)
def _get_compressed_chunk(self, tid, user, number): def _get_compressed_chunk(self, tid, user, number):
return self._run_api_v2_task_id_data_get(tid, user, "chunk", "compressed", number) return self._run_api_v2_task_id_data_get(tid, user, "chunk", "compressed", number)
@ -3364,7 +3402,7 @@ class TaskDataAPITestCase(APITestCase):
self.assertEqual(response.status_code, expected_status_code) self.assertEqual(response.status_code, expected_status_code)
if expected_status_code == status.HTTP_200_OK: if expected_status_code == status.HTTP_200_OK:
if dimension == DimensionType.DIM_2D: if dimension == DimensionType.DIM_2D:
preview = Image.open(io.BytesIO(b"".join(response.streaming_content))) preview = Image.open(io.BytesIO(response.content))
self.assertLessEqual(preview.size, image_sizes[0]) self.assertLessEqual(preview.size, image_sizes[0])
# check compressed chunk # check compressed chunk
@ -3458,7 +3496,8 @@ class TaskDataAPITestCase(APITestCase):
] ]
} }
image_sizes, images = generate_image_files("test_1.jpg", "test_2.jpg", "test_3.jpg") images = copy.deepcopy(self._client_images['images'])
image_sizes = self._client_images['image_sizes']
task_data = { task_data = {
"client_files[0]": images[0], "client_files[0]": images[0],
"client_files[1]": images[1], "client_files[1]": images[1],
@ -3486,10 +3525,10 @@ class TaskDataAPITestCase(APITestCase):
"image_quality": 75, "image_quality": 75,
} }
image_sizes = [ image_sizes = [
self._image_sizes[task_data["server_files[3]"]], self._share_image_sizes[task_data["server_files[3]"]],
self._image_sizes[task_data["server_files[0]"]], self._share_image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[1]"]], self._share_image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[2]"]], self._share_image_sizes[task_data["server_files[2]"]],
] ]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, image_sizes, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, image_sizes,
@ -3509,7 +3548,8 @@ class TaskDataAPITestCase(APITestCase):
{"name": "person"}, {"name": "person"},
] ]
} }
image_sizes, video = generate_video_file(filename="test_video_1.mp4", width=1280, height=720) video = copy.deepcopy(self._client_mp4_video['video'])
image_sizes = self._client_mp4_video['image_sizes']
task_data = { task_data = {
"client_files[0]": video, "client_files[0]": video,
"image_quality": 43, "image_quality": 43,
@ -3531,7 +3571,7 @@ class TaskDataAPITestCase(APITestCase):
"server_files[0]": "test_video_1.mp4", "server_files[0]": "test_video_1.mp4",
"image_quality": 57, "image_quality": 57,
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self.ChunkType.VIDEO, image_sizes, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self.ChunkType.VIDEO, image_sizes,
expected_uploaded_data_location=StorageChoice.SHARE) expected_uploaded_data_location=StorageChoice.SHARE)
@ -3554,7 +3594,7 @@ class TaskDataAPITestCase(APITestCase):
"server_files[0]": os.path.join("videos", "test_video_1.mp4"), "server_files[0]": os.path.join("videos", "test_video_1.mp4"),
"image_quality": 57, "image_quality": 57,
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self.ChunkType.VIDEO, image_sizes, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self.ChunkType.VIDEO, image_sizes,
expected_uploaded_data_location=StorageChoice.SHARE) expected_uploaded_data_location=StorageChoice.SHARE)
@ -3579,7 +3619,7 @@ class TaskDataAPITestCase(APITestCase):
"image_quality": 12, "image_quality": 12,
"use_zip_chunks": True, "use_zip_chunks": True,
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.VIDEO, image_sizes, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.VIDEO, image_sizes,
expected_uploaded_data_location=StorageChoice.SHARE) expected_uploaded_data_location=StorageChoice.SHARE)
@ -3602,7 +3642,7 @@ class TaskDataAPITestCase(APITestCase):
"server_files[0]": "test_archive_1.zip", "server_files[0]": "test_archive_1.zip",
"image_quality": 88, "image_quality": 88,
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, image_sizes, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, image_sizes,
expected_uploaded_data_location=StorageChoice.LOCAL) expected_uploaded_data_location=StorageChoice.LOCAL)
@ -3621,7 +3661,8 @@ class TaskDataAPITestCase(APITestCase):
{"name": "person"}, {"name": "person"},
] ]
} }
image_sizes, archive = generate_zip_archive_file("test_archive_2.zip", 7) archive = copy.deepcopy(self._client_archive['archive'])
image_sizes = self._client_archive['image_sizes']
task_data = { task_data = {
"client_files[0]": archive, "client_files[0]": archive,
"image_quality": 100, "image_quality": 100,
@ -3645,7 +3686,7 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True, "use_cache": True,
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE, StorageChoice.SHARE) self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE, StorageChoice.SHARE)
@ -3673,9 +3714,9 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True, "use_cache": True,
} }
image_sizes = [ image_sizes = [
self._image_sizes[task_data["server_files[0]"]], self._share_image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[2]"]], self._share_image_sizes[task_data["server_files[2]"]],
self._image_sizes[task_data["server_files[1]"]], self._share_image_sizes[task_data["server_files[1]"]],
] ]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
@ -3702,7 +3743,7 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True "use_cache": True
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.IMAGESET, image_sizes, StorageMethodChoice.CACHE, StorageChoice.LOCAL) self.ChunkType.IMAGESET, image_sizes, StorageMethodChoice.CACHE, StorageChoice.LOCAL)
@ -3722,7 +3763,8 @@ class TaskDataAPITestCase(APITestCase):
] ]
} }
image_sizes, document = generate_pdf_file("test_pdf_1.pdf", 5) document = copy.deepcopy(self._client_pdf['pdf'])
image_sizes = self._client_pdf['image_sizes']
task_data = { task_data = {
"client_files[0]": document, "client_files[0]": document,
@ -3744,8 +3786,7 @@ class TaskDataAPITestCase(APITestCase):
] ]
} }
image_sizes, document = generate_pdf_file("test_pdf_2.pdf", 4) document = copy.deepcopy(self._client_pdf['pdf'])
task_data = { task_data = {
"client_files[0]": document, "client_files[0]": document,
"image_quality": 70, "image_quality": 70,
@ -3769,7 +3810,7 @@ class TaskDataAPITestCase(APITestCase):
"image_quality": 70, "image_quality": 70,
"use_cache": True "use_cache": True
} }
image_sizes = self._image_sizes[task_data['server_files[0]']] image_sizes = self._share_image_sizes[task_data['server_files[0]']]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE, self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE,
@ -3796,7 +3837,7 @@ class TaskDataAPITestCase(APITestCase):
"use_zip_chunks": True "use_zip_chunks": True
} }
image_sizes = self._image_sizes['test_rotated_90_video.mp4'] image_sizes = self._share_image_sizes['test_rotated_90_video.mp4']
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.FILE_SYSTEM) self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.FILE_SYSTEM)
@ -3817,7 +3858,7 @@ class TaskDataAPITestCase(APITestCase):
"use_zip_chunks": True "use_zip_chunks": True
} }
image_sizes = self._image_sizes['test_rotated_90_video.mp4'] image_sizes = self._share_image_sizes['test_rotated_90_video.mp4']
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE) self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE)
@ -3830,7 +3871,8 @@ class TaskDataAPITestCase(APITestCase):
], ],
} }
image_sizes, video = generate_video_file(filename="test_video_1.mxf", width=1280, height=720, codec_name='mpeg2video') video = copy.deepcopy(self._client_mxf_video['video'])
image_sizes = self._client_mxf_video['image_sizes']
task_data = { task_data = {
"client_files[0]": video, "client_files[0]": video,
"image_quality": 51, "image_quality": 51,
@ -3852,7 +3894,7 @@ class TaskDataAPITestCase(APITestCase):
"client_files[0]": open(os.path.join(os.path.dirname(__file__), 'assets', 'test_pointcloud_pcd.zip'), 'rb'), "client_files[0]": open(os.path.join(os.path.dirname(__file__), 'assets', 'test_pointcloud_pcd.zip'), 'rb'),
"image_quality": 100, "image_quality": 100,
} }
image_sizes = self._image_sizes["test_pointcloud_pcd.zip"] image_sizes = self._share_image_sizes["test_pointcloud_pcd.zip"]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
image_sizes, dimension=DimensionType.DIM_3D) image_sizes, dimension=DimensionType.DIM_3D)
@ -3872,7 +3914,7 @@ class TaskDataAPITestCase(APITestCase):
'rb'), 'rb'),
"image_quality": 100, "image_quality": 100,
} }
image_sizes = self._image_sizes["test_velodyne_points.zip"] image_sizes = self._share_image_sizes["test_velodyne_points.zip"]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
image_sizes, dimension=DimensionType.DIM_3D) image_sizes, dimension=DimensionType.DIM_3D)
@ -3896,9 +3938,9 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True "use_cache": True
} }
image_sizes = [ image_sizes = [
self._image_sizes[task_data["server_files[0]"]], self._share_image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[1]"]], self._share_image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[2]"]], self._share_image_sizes[task_data["server_files[2]"]],
] ]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
@ -3920,9 +3962,9 @@ class TaskDataAPITestCase(APITestCase):
"sorting_method": SortingMethod.PREDEFINED "sorting_method": SortingMethod.PREDEFINED
} }
image_sizes = [ image_sizes = [
self._image_sizes[task_data["server_files[0]"]], self._share_image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[1]"]], self._share_image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[2]"]], self._share_image_sizes[task_data["server_files[2]"]],
] ]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
@ -3939,9 +3981,9 @@ class TaskDataAPITestCase(APITestCase):
"sorting_method": SortingMethod.NATURAL "sorting_method": SortingMethod.NATURAL
} }
image_sizes = [ image_sizes = [
self._image_sizes[task_data["server_files[2]"]], self._share_image_sizes[task_data["server_files[2]"]],
self._image_sizes[task_data["server_files[1]"]], self._share_image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[0]"]], self._share_image_sizes[task_data["server_files[0]"]],
] ]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
@ -3954,7 +3996,7 @@ class TaskDataAPITestCase(APITestCase):
"copy_data": False, "copy_data": False,
"use_cache": True, "use_cache": True,
} }
image_sizes = self._image_sizes[task_data["server_files[0]"]] image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
image_sizes, StorageMethodChoice.CACHE, StorageChoice.LOCAL) image_sizes, StorageMethodChoice.CACHE, StorageChoice.LOCAL)

@ -11,7 +11,7 @@ import shutil
import traceback import traceback
from datetime import datetime from datetime import datetime
from distutils.util import strtobool from distutils.util import strtobool
from tempfile import mkstemp, NamedTemporaryFile from tempfile import mkstemp
import cv2 import cv2
from django.db.models.query import Prefetch from django.db.models.query import Prefetch
@ -45,8 +45,6 @@ from cvat.apps.engine.cloud_provider import db_storage_to_storage_instance
from cvat.apps.dataset_manager.bindings import CvatImportError from cvat.apps.dataset_manager.bindings import CvatImportError
from cvat.apps.dataset_manager.serializers import DatasetFormatsSerializer from cvat.apps.dataset_manager.serializers import DatasetFormatsSerializer
from cvat.apps.engine.frame_provider import FrameProvider from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.media_extractors import ImageListReader
from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.media_extractors import get_mime from cvat.apps.engine.media_extractors import get_mime
from cvat.apps.engine.models import ( from cvat.apps.engine.models import (
Job, Task, Project, Issue, Data, Job, Task, Project, Issue, Data,
@ -77,6 +75,7 @@ from .log import clogger, slogger
from cvat.apps.iam.permissions import (CloudStoragePermission, from cvat.apps.iam.permissions import (CloudStoragePermission,
CommentPermission, IssuePermission, JobPermission, ProjectPermission, CommentPermission, IssuePermission, JobPermission, ProjectPermission,
TaskPermission, UserPermission) TaskPermission, UserPermission)
from cvat.apps.engine.cache import CacheInteraction
@extend_schema(tags=['server']) @extend_schema(tags=['server'])
@ -622,6 +621,30 @@ class ProjectViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
def append_backup_chunk(self, request, file_id): def append_backup_chunk(self, request, file_id):
return self.append_tus_chunk(request, file_id) return self.append_tus_chunk(request, file_id)
@extend_schema(summary='Method returns a preview image for the project',
responses={
'200': OpenApiResponse(description='Project image preview'),
'404': OpenApiResponse(description='Project image preview not found'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
self._object = self.get_object() # call check_object_permissions as well
first_task = self._object.tasks.order_by('-id').first()
if not first_task:
return HttpResponseNotFound('Project image preview not found')
data_getter = DataChunkGetter(
data_type='preview',
data_quality='compressed',
data_num=first_task.data.start_frame,
task_dim=first_task.dimension
)
return data_getter(request, first_task.data.start_frame,
first_task.data.stop_frame, first_task.data)
@staticmethod @staticmethod
def _get_rq_response(queue, job_id): def _get_rq_response(queue, job_id):
queue = django_rq.get_queue(queue) queue = django_rq.get_queue(queue)
@ -648,21 +671,20 @@ class DataChunkGetter:
if not data_type or data_type not in possible_data_type_values: if not data_type or data_type not in possible_data_type_values:
raise ValidationError('Data type not specified or has wrong value') raise ValidationError('Data type not specified or has wrong value')
elif data_type == 'chunk' or data_type == 'frame': elif data_type == 'chunk' or data_type == 'frame' or data_type == 'preview':
if not data_num: if data_num is None:
raise ValidationError('Number is not specified') raise ValidationError('Number is not specified')
elif data_quality not in possible_quality_values: elif data_quality not in possible_quality_values:
raise ValidationError('Wrong quality value') raise ValidationError('Wrong quality value')
self.type = data_type self.type = data_type
self.number = int(data_num) if data_num else None self.number = int(data_num) if data_num is not None else None
self.quality = FrameProvider.Quality.COMPRESSED \ self.quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
self.dimension = task_dim self.dimension = task_dim
def __call__(self, request, start, stop, db_data):
def __call__(self, request, start, stop, db_data, db_object):
if not db_data: if not db_data:
raise NotFound(detail='Cannot find requested data') raise NotFound(detail='Cannot find requested data')
@ -687,16 +709,18 @@ class DataChunkGetter:
path = os.path.realpath(frame_provider.get_chunk(self.number, self.quality)) path = os.path.realpath(frame_provider.get_chunk(self.number, self.quality))
return sendfile(request, path) return sendfile(request, path)
elif self.type == 'frame': elif self.type == 'frame' or self.type == 'preview':
if not (start <= self.number <= stop): if not (start <= self.number <= stop):
raise ValidationError('The frame number should be in ' + raise ValidationError('The frame number should be in ' +
f'[{start}, {stop}] range') f'[{start}, {stop}] range')
buf, mime = frame_provider.get_frame(self.number, self.quality) if self.type == 'preview':
return HttpResponse(buf.getvalue(), content_type=mime) cache = CacheInteraction(self.dimension)
buf, mime = cache.get_local_preview_with_mime(self.number, db_data)
else:
buf, mime = frame_provider.get_frame(self.number, self.quality)
elif self.type == 'preview': return HttpResponse(buf.getvalue(), content_type=mime)
return sendfile(request, db_object.get_preview_path())
elif self.type == 'context_image': elif self.type == 'context_image':
if not (start <= self.number <= stop): if not (start <= self.number <= stop):
@ -982,13 +1006,13 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@extend_schema(methods=['GET'], summary='Method returns data for a specific task', @extend_schema(methods=['GET'], summary='Method returns data for a specific task',
parameters=[ parameters=[
OpenApiParameter('type', location=OpenApiParameter.QUERY, required=False, OpenApiParameter('type', location=OpenApiParameter.QUERY, required=False,
type=OpenApiTypes.STR, enum=['chunk', 'frame', 'preview', 'context_image'], type=OpenApiTypes.STR, enum=['chunk', 'frame', 'context_image'],
description='Specifies the type of the requested data'), description='Specifies the type of the requested data'),
OpenApiParameter('quality', location=OpenApiParameter.QUERY, required=False, OpenApiParameter('quality', location=OpenApiParameter.QUERY, required=False,
type=OpenApiTypes.STR, enum=['compressed', 'original'], type=OpenApiTypes.STR, enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"), description="Specifies the quality level of the requested data"),
OpenApiParameter('number', location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.INT, OpenApiParameter('number', location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.INT,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"), description="A unique number value identifying chunk or frame"),
], ],
responses={ responses={
'200': OpenApiResponse(description='Data of a specific type'), '200': OpenApiResponse(description='Data of a specific type'),
@ -1017,7 +1041,7 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
self._object.dimension) self._object.dimension)
return data_getter(request, self._object.data.start_frame, return data_getter(request, self._object.data.start_frame,
self._object.data.stop_frame, self._object.data, self._object.data) self._object.data.stop_frame, self._object.data)
@extend_schema(methods=['PATCH'], @extend_schema(methods=['PATCH'],
operation_id='tasks_partial_update_data_file', operation_id='tasks_partial_update_data_file',
@ -1317,6 +1341,28 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
return Response(data="Exporting a dataset from a task without data is not allowed", return Response(data="Exporting a dataset from a task without data is not allowed",
status=status.HTTP_400_BAD_REQUEST) status=status.HTTP_400_BAD_REQUEST)
@extend_schema(summary='Method returns a preview image for the task',
responses={
'200': OpenApiResponse(description='Task image preview'),
'404': OpenApiResponse(description='Task image preview not found'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
self._object = self.get_object() # call check_object_permissions as well
if not self._object.data:
return HttpResponseNotFound('Task image preview not found')
data_getter = DataChunkGetter(
data_type='preview',
data_quality='compressed',
data_num=self._object.data.start_frame,
task_dim=self._object.dimension
)
return data_getter(request, self._object.data.start_frame,
self._object.data.stop_frame, self._object.data)
@extend_schema(tags=['jobs']) @extend_schema(tags=['jobs'])
@extend_schema_view( @extend_schema_view(
retrieve=extend_schema( retrieve=extend_schema(
@ -1625,12 +1671,12 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
parameters=[ parameters=[
OpenApiParameter('type', description='Specifies the type of the requested data', OpenApiParameter('type', description='Specifies the type of the requested data',
location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.STR, location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.STR,
enum=['chunk', 'frame', 'preview', 'context_image']), enum=['chunk', 'frame', 'context_image']),
OpenApiParameter('quality', location=OpenApiParameter.QUERY, required=False, OpenApiParameter('quality', location=OpenApiParameter.QUERY, required=False,
type=OpenApiTypes.STR, enum=['compressed', 'original'], type=OpenApiTypes.STR, enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"), description="Specifies the quality level of the requested data"),
OpenApiParameter('number', location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.INT, OpenApiParameter('number', location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.INT,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"), description="A unique number value identifying chunk or frame"),
], ],
responses={ responses={
'200': OpenApiResponse(OpenApiTypes.BINARY, description='Data of a specific type'), '200': OpenApiResponse(OpenApiTypes.BINARY, description='Data of a specific type'),
@ -1646,7 +1692,7 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
db_job.segment.task.dimension) db_job.segment.task.dimension)
return data_getter(request, db_job.segment.start_frame, return data_getter(request, db_job.segment.start_frame,
db_job.segment.stop_frame, db_job.segment.task.data, db_job) db_job.segment.stop_frame, db_job.segment.task.data)
@extend_schema(summary='Method provides a meta information about media files which are related with the job', @extend_schema(summary='Method provides a meta information about media files which are related with the job',
@ -1737,6 +1783,24 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
serializer = JobCommitSerializer(queryset, context={'request': request}, many=True) serializer = JobCommitSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data) return Response(serializer.data)
@extend_schema(summary='Method returns a preview image for the job',
responses={
'200': OpenApiResponse(description='Job image preview'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
self._object = self.get_object() # call check_object_permissions as well
data_getter = DataChunkGetter(
data_type='preview',
data_quality='compressed',
data_num=self._object.segment.start_frame,
task_dim=self._object.segment.task.dimension
)
return data_getter(request, self._object.segment.start_frame,
self._object.segment.stop_frame, self._object.segment.task.data)
@extend_schema(tags=['issues']) @extend_schema(tags=['issues'])
@extend_schema_view( @extend_schema_view(
retrieve=extend_schema( retrieve=extend_schema(
@ -2095,47 +2159,16 @@ class CloudStorageViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@extend_schema(summary='Method returns a preview image from a cloud storage', @extend_schema(summary='Method returns a preview image from a cloud storage',
responses={ responses={
'200': OpenApiResponse(description='Cloud Storage preview'), '200': OpenApiResponse(description='Cloud Storage preview'),
'400': OpenApiResponse(description='Failed to get cloud storage preview'),
'404': OpenApiResponse(description='Cloud Storage preview not found'),
}) })
@action(detail=True, methods=['GET'], url_path='preview') @action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk): def preview(self, request, pk):
storage = None
try: try:
db_storage = self.get_object() db_storage = self.get_object()
if not os.path.exists(db_storage.get_preview_path()): cache = CacheInteraction()
storage = db_storage_to_storage_instance(db_storage) preview, mime = cache.get_cloud_preview_with_mime(db_storage)
if not db_storage.manifests.count(): return HttpResponse(preview, mime)
raise ValidationError('Cannot get the cloud storage preview. There is no manifest file')
preview_path = None
for manifest_model in db_storage.manifests.all():
manifest_prefix = os.path.dirname(manifest_model.filename)
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_model.filename)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_model.filename):
storage.download_file(manifest_model.filename, full_manifest_path)
manifest = ImageManifestManager(
os.path.join(db_storage.get_storage_dirname(), manifest_model.filename),
db_storage.get_storage_dirname()
)
# need to update index
manifest.set_index()
if not len(manifest):
continue
preview_info = manifest[0]
preview_filename = ''.join([preview_info['name'], preview_info['extension']])
preview_path = os.path.join(manifest_prefix, preview_filename)
break
if not preview_path:
msg = 'Cloud storage {} does not contain any images'.format(pk)
slogger.cloud_storage[pk].info(msg)
return HttpResponseBadRequest(msg)
with NamedTemporaryFile() as temp_image:
storage.download_file(preview_path, temp_image.name)
reader = ImageListReader([temp_image.name])
preview = reader.get_preview(frame=0)
preview.save(db_storage.get_preview_path())
content_type = mimetypes.guess_type(db_storage.get_preview_path())[0]
return HttpResponse(open(db_storage.get_preview_path(), 'rb').read(), content_type)
except CloudStorageModel.DoesNotExist: except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist" message = f"Storage {pk} does not exist"
slogger.glob.error(message) slogger.glob.error(message)

@ -507,6 +507,7 @@ class ProjectPermission(OpenPolicyAgentPermission):
('import_backup', 'POST'): 'import:backup', ('import_backup', 'POST'): 'import:backup',
('append_backup_chunk', 'PATCH'): 'import:backup', ('append_backup_chunk', 'PATCH'): 'import:backup',
('append_backup_chunk', 'HEAD'): 'import:backup', ('append_backup_chunk', 'HEAD'): 'import:backup',
('preview', 'GET'): 'view',
}.get((view.action, request.method)) }.get((view.action, request.method))
scopes = [] scopes = []
@ -669,6 +670,7 @@ class TaskPermission(OpenPolicyAgentPermission):
('append_backup_chunk', 'PATCH'): 'import:backup', ('append_backup_chunk', 'PATCH'): 'import:backup',
('append_backup_chunk', 'HEAD'): 'import:backup', ('append_backup_chunk', 'HEAD'): 'import:backup',
('export_backup', 'GET'): 'export:backup', ('export_backup', 'GET'): 'export:backup',
('preview', 'GET'): 'view',
}.get((view.action, request.method)) }.get((view.action, request.method))
scopes = [] scopes = []
@ -914,7 +916,8 @@ class JobPermission(OpenPolicyAgentPermission):
('metadata','GET'): 'view:metadata', ('metadata','GET'): 'view:metadata',
('metadata','PATCH'): 'update:metadata', ('metadata','PATCH'): 'update:metadata',
('issues', 'GET'): 'view', ('issues', 'GET'): 'view',
('commits', 'GET'): 'view:commits' ('commits', 'GET'): 'view:commits',
('preview', 'GET'): 'view',
}.get((view.action, request.method)) }.get((view.action, request.method))
scopes = [] scopes = []

@ -41,7 +41,7 @@ tensorflow==2.9.3 # Optional requirement of Datumaro. Use tensorflow-macos==2.8.
# The package is used by pyunpack as a command line tool to support multiple # The package is used by pyunpack as a command line tool to support multiple
# archives. Don't use as a python module because it has GPL license. # archives. Don't use as a python module because it has GPL license.
patool==1.12 patool==1.12
diskcache==5.0.2 diskcache==5.4.0
boto3==1.17.61 boto3==1.17.61
azure-storage-blob==12.13.0 azure-storage-blob==12.13.0
google-cloud-storage==1.42.0 google-cloud-storage==1.42.0

Loading…
Cancel
Save