Add simple filters for collections (#5575)

Added simple filter queries for collections in the server API. Such
queries should cover simpler cases, while more complex filters should
use the JSON syntax.

- Added simple filters for collection endpoints in API (e.g.
`/tasks?project_id=42&user=myusername`)
- Removed duplicating collection endpoints in API in favor of their full
versions:
  - `/projects/{id}/tasks` -> `/tasks?project_id={id}`
  - `/tasks/{id}/jobs` -> `/jobs?task_id={id}`
  - `/jobs/{id}/issues` -> `/issues?job_id={id}`
  - `/issue/{id}/comments` -> `/comments?issue_id={id}`
  - Corresponding owning objects now return a link to the collection:
    - `/projects/{id}`: `.tasks`
    - `/tasks/{id}`: `.jobs`
    - `/jobs/{id}`: `.issues`
    - `/issue/{id}`: `.comments`
- Fixed errors in the generated server API schema:
- Input and output types in all the basic model methods (`Create`,
`Update`, `Retrieve`, `Delete`)
- Removed invalid separation for the project list operation response
type
- File structure for projects in the dataset cache is updated to store
the new project representation. Old caches will be invalidated.
- Added tests
main
Maxim Zhiltsov 3 years ago committed by GitHub
parent ba49fc4029
commit 0280b5fb64
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -22,6 +22,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- YOLO v7 serverless feature added using ONNX backend (<https://github.com/opencv/cvat/pull/5552>)
- Cypress test for social account authentication (<https://github.com/opencv/cvat/pull/5444>)
- Dummy github and google authentication servers (<https://github.com/opencv/cvat/pull/5444>)
- \[Server API\] Simple filters for object collection endpoints
(<https://github.com/opencv/cvat/pull/5575>)
### Changed
- The Docker Compose files now use the Compose Specification version
@ -31,8 +33,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
The corresponding arguments are keyword-only now.
(<https://github.com/opencv/cvat/pull/5502>)
- \[Server API\] Added missing pagination or pagination parameters in
`/project/{id}/tasks`, `/tasks/{id}/jobs`, `/jobs/{id}/issues`,
`/jobs/{id}/commits`, `/issues/{id}/comments`, `/organizations`
`/jobs/{id}/commits`, `/organizations`
(<https://github.com/opencv/cvat/pull/5557>)
- Windows Installation Instructions adjusted to work around <https://github.com/nuclio/nuclio/issues/1821>
- The contour detection function for semantic segmentation (<https://github.com/opencv/cvat/pull/4665>)
@ -40,9 +41,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- DL models UI (<https://github.com/opencv/cvat/pull/5635>)
### Deprecated
- TDB
- TBD
### Removed
- \[Server API\] Endpoints with collections are removed in favor of their full variants
`/project/{id}/tasks`, `/tasks/{id}/jobs`, `/jobs/{id}/issues`, `/issues/{id}/comments`.
Corresponding fields are added or changed to provide a link to the child collection
in `/projects/{id}`, `/tasks/{id}`, `/jobs/{id}`, `/issues/{id}`
(<https://github.com/opencv/cvat/pull/5575>)
- Limit on the maximum number of manifest files that can be added for cloud storage (<https://github.com/opencv/cvat/pull/5660>)
### Fixed
@ -53,6 +59,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fix the type of the credentials parameter of make_client from the Python SDK
- Reduced number of noisy information on ortho views for 3D canvas (<https://github.com/opencv/cvat/pull/5608>)
- Clean up disk space after a project is removed (<https://github.com/opencv/cvat/pull/5632>)
- \[Server API\] Various errors in the generated schema (<https://github.com/opencv/cvat/pull/5575>)
- SiamMask and TransT serverless functions (<https://github.com/opencv/cvat/pull/5658>)
### Security

@ -20,7 +20,6 @@ import {
import User from './user';
import { AnnotationFormats } from './annotation-formats';
import { ArgumentError } from './exceptions';
import { Task, Job } from './session';
import Project from './project';
import CloudStorage from './cloud-storage';
@ -167,8 +166,8 @@ export default function implementAPI(cvat) {
return users;
};
cvat.jobs.get.implementation = async (filter) => {
checkFilter(filter, {
cvat.jobs.get.implementation = async (query) => {
checkFilter(query, {
page: isInteger,
filter: isString,
sort: isString,
@ -177,30 +176,24 @@ export default function implementAPI(cvat) {
jobID: isInteger,
});
if ('taskID' in filter && 'jobID' in filter) {
throw new ArgumentError('Filter fields "taskID" and "jobID" are not permitted to be used at the same time');
}
if ('taskID' in filter) {
const [task] = await serverProxy.tasks.get({ id: filter.taskID });
if (task) {
return new Task(task).jobs;
checkExclusiveFields(query, ['jobID', 'taskID', 'filter', 'search'], ['page', 'sort']);
if ('jobID' in query) {
const job = await serverProxy.jobs.get({ id: query.jobID });
if (job) {
return [new Job(job)];
}
return [];
}
if ('jobID' in filter) {
const job = await serverProxy.jobs.get({ id: filter.jobID });
if (job) {
return [new Job(job)];
}
if ('taskID' in query) {
query.filter = JSON.stringify({ and: [{ '==': [{ var: 'task_id' }, query.taskID] }] });
}
const searchParams = {};
for (const key of Object.keys(filter)) {
for (const key of Object.keys(query)) {
if (['page', 'sort', 'search', 'filter'].includes(key)) {
searchParams[key] = filter[key];
searchParams[key] = query[key];
}
}
@ -229,8 +222,7 @@ export default function implementAPI(cvat) {
}
}
let tasksData = null;
if (filter.projectId) {
if ('projectId' in filter) {
if (searchParams.filter) {
const parsed = JSON.parse(searchParams.filter);
searchParams.filter = JSON.stringify({ and: [parsed, { '==': [{ var: 'project_id' }, filter.projectId] }] });
@ -239,8 +231,19 @@ export default function implementAPI(cvat) {
}
}
tasksData = await serverProxy.tasks.get(searchParams);
const tasks = tasksData.map((task) => new Task(task));
const tasksData = await serverProxy.tasks.get(searchParams);
const tasks = await Promise.all(tasksData.map(async (taskItem) => {
// Temporary workaround for UI
// Fixme: too much requests on tasks page
let jobs = { results: [] };
if ('id' in filter) {
jobs = await serverProxy.jobs.get({
filter: JSON.stringify({ and: [{ '==': [{ var: 'task_id' }, taskItem.id] }] }),
}, true);
}
return new Task({ ...taskItem, jobs: jobs.results });
}));
tasks.count = tasksData.count;
return tasks;
};
@ -263,11 +266,7 @@ export default function implementAPI(cvat) {
}
const projectsData = await serverProxy.projects.get(searchParams);
const projects = projectsData.map((project) => {
project.task_ids = project.tasks;
return project;
}).map((project) => new Project(project));
const projects = projectsData.map((project) => new Project(project));
projects.count = projectsData.count;
return projects;

@ -1,14 +1,14 @@
// Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// Copyright (C) 2022-2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
import { isBrowser, isNode } from 'browser-or-node';
import PluginRegistry from './plugins';
import serverProxy from './server-proxy';
import { ArgumentError } from './exceptions';
import { CloudStorageCredentialsType, CloudStorageProviderType, CloudStorageStatus } from './enums';
import User from './user';
import { decodePreview } from './frames';
function validateNotEmptyString(value: string): void {
if (typeof value !== 'string') {
@ -362,17 +362,7 @@ Object.defineProperties(CloudStorage.prototype.getPreview, {
return new Promise((resolve, reject) => {
serverProxy.cloudStorages
.getPreview(this.id)
.then((result) => {
if (isNode) {
resolve(global.Buffer.from(result, 'binary').toString('base64'));
} else if (isBrowser) {
const reader = new FileReader();
reader.onload = () => {
resolve(reader.result);
};
reader.readAsDataURL(result);
}
})
.then((result) => decodePreview(result))
.catch((error) => {
reject(error);
});

@ -51,10 +51,10 @@ export function checkExclusiveFields(obj, exclusive, ignore): void {
exclusive: [],
other: [],
};
for (const field in Object.keys(obj)) {
if (!(field in ignore)) {
if (field in exclusive) {
if (fields.other.length) {
for (const field in obj) {
if (!(ignore.includes(field))) {
if (exclusive.includes(field)) {
if (fields.other.length || fields.exclusive.length) {
throw new ArgumentError(`Do not use the filter field "${field}" with others`);
}
fields.exclusive.push(field);

@ -618,26 +618,20 @@ export async function getContextImage(jobID, frame) {
return frameDataCache[jobID].frameBuffer.getContextImage(frame);
}
export async function getPreview(taskID = null, jobID = null) {
export function decodePreview(preview: Blob): Promise<string> {
return new Promise((resolve, reject) => {
// Just go to server and get preview (no any cache)
serverProxy.frames
.getPreview(taskID, jobID)
.then((result) => {
if (isNode) {
// eslint-disable-next-line no-undef
resolve(global.Buffer.from(result, 'binary').toString('base64'));
} else if (isBrowser) {
const reader = new FileReader();
reader.onload = () => {
resolve(reader.result);
};
reader.readAsDataURL(result);
}
})
.catch((error) => {
if (isNode) {
resolve(global.Buffer.from(preview, 'binary').toString('base64'));
} else if (isBrowser) {
const reader = new FileReader();
reader.onload = () => {
resolve(reader.result as string);
};
reader.onerror = (error) => {
reject(error);
});
};
reader.readAsDataURL(preview);
}
});
}

@ -1,11 +1,10 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
// Copyright (C) 2022-2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
import quickhull from 'quickhull';
import { Job } from 'session';
import PluginRegistry from './plugins';
import Comment, { RawCommentData } from './comment';
import User from './user';
@ -13,36 +12,37 @@ import { ArgumentError } from './exceptions';
import serverProxy from './server-proxy';
interface RawIssueData {
job: number;
position: number[];
frame: number;
id?: number;
job?: any;
position?: number[];
comments?: any;
frame?: number;
comments?: RawCommentData[];
owner?: any;
resolved?: boolean;
created_date?: string;
}
export default class Issue {
public readonly id: number;
public readonly job: Job;
public readonly comments: Comment[];
public readonly id?: number;
public readonly job: number;
public readonly frame: number;
public readonly owner: User;
public readonly resolved: boolean;
public readonly createdDate: string;
public position: number[];
public readonly owner?: User;
public readonly comments: Comment[];
public readonly resolved?: boolean;
public readonly createdDate?: string;
public position?: number[];
private readonly __internal: RawIssueData & { comments: Comment[] };
constructor(initialData: RawIssueData) {
const data: RawIssueData = {
const data: RawIssueData & { comments: Comment[] } = {
id: undefined,
job: undefined,
position: undefined,
comments: [],
frame: undefined,
created_date: undefined,
owner: undefined,
resolved: undefined,
comments: undefined,
};
for (const property in data) {
@ -53,14 +53,16 @@ export default class Issue {
if (data.owner && !(data.owner instanceof User)) data.owner = new User(data.owner);
if (data.comments) {
data.comments = data.comments.map((comment) => new Comment(comment));
}
if (typeof data.created_date === 'undefined') {
data.created_date = new Date().toISOString();
}
if (Array.isArray(initialData.comments)) {
data.comments = initialData.comments.map((comment: RawCommentData): Comment => new Comment(comment));
} else {
data.comments = [];
}
Object.defineProperties(
this,
Object.freeze({
@ -80,7 +82,7 @@ export default class Issue {
get: () => data.job,
},
comments: {
get: () => [...data.comments],
get: () => data.comments,
},
frame: {
get: () => data.frame,
@ -144,19 +146,15 @@ export default class Issue {
}
public serialize(): RawIssueData {
const { comments } = this;
const data: RawIssueData = {
job: this.job,
position: this.position,
frame: this.frame,
comments: comments.map((comment) => comment.serialize()),
};
if (typeof this.id === 'number') {
data.id = this.id;
}
if (typeof this.job === 'number') {
data.job = this.job;
}
if (typeof this.createdDate === 'string') {
data.created_date = this.createdDate;
}
@ -175,7 +173,7 @@ Object.defineProperties(Issue.prototype.comment, {
implementation: {
writable: false,
enumerable: false,
value: async function implementation(data: RawCommentData) {
value: async function implementation(this: Issue, data: RawCommentData) {
if (typeof data !== 'object' || data === null) {
throw new ArgumentError(`The argument "data" must be an object. Got "${data}"`);
}
@ -183,15 +181,16 @@ Object.defineProperties(Issue.prototype.comment, {
throw new ArgumentError(`Comment message must be a not empty string. Got "${data.message}"`);
}
const internalData = Object.getOwnPropertyDescriptor(this, '__internal').get();
const comment = new Comment(data);
if (typeof this.id === 'number') {
const serialized = comment.serialize();
serialized.issue = this.id;
const response = await serverProxy.comments.create(serialized);
const savedComment = new Comment(response);
this.__internal.comments.push(savedComment);
internalData.comments.push(savedComment);
} else {
this.__internal.comments.push(comment);
internalData.comments.push(comment);
}
},
},

@ -6,7 +6,7 @@
import { Storage } from './storage';
import serverProxy from './server-proxy';
import { getPreview } from './frames';
import { decodePreview } from './frames';
import Project from './project';
import { exportDataset, importDataset } from './annotations';
@ -16,7 +16,6 @@ export default function implementProject(projectClass) {
if (typeof this.id !== 'undefined') {
const projectData = this._updateTrigger.getUpdated(this, {
bugTracker: 'bug_tracker',
trainingProject: 'training_project',
assignee: 'assignee_id',
});
if (projectData.assignee_id) {
@ -41,10 +40,6 @@ export default function implementProject(projectClass) {
projectSpec.bug_tracker = this.bugTracker;
}
if (this.trainingProject) {
projectSpec.training_project = this.trainingProject;
}
if (this.targetStorage) {
projectSpec.target_storage = this.targetStorage.toJSON();
}
@ -63,11 +58,9 @@ export default function implementProject(projectClass) {
};
projectClass.prototype.preview.implementation = async function () {
if (!this._internalData.task_ids.length) {
return '';
}
const frameData = await getPreview(this._internalData.task_ids[0]);
return frameData;
const preview = await serverProxy.projects.getPreview(this.id);
const decoded = await decodePreview(preview);
return decoded;
};
projectClass.prototype.annotations.exportDataset.implementation = async function (

@ -19,13 +19,12 @@ export default class Project {
name: undefined,
status: undefined,
assignee: undefined,
organization: undefined,
owner: undefined,
bug_tracker: undefined,
created_date: undefined,
updated_date: undefined,
task_subsets: undefined,
training_project: undefined,
task_ids: undefined,
dimension: undefined,
source_storage: undefined,
target_storage: undefined,
@ -47,10 +46,6 @@ export default class Project {
.map((labelData) => new Label(labelData)).filter((label) => !label.hasParent);
}
if (typeof initialData.training_project === 'object') {
data.training_project = { ...initialData.training_project };
}
Object.defineProperties(
this,
Object.freeze({
@ -83,6 +78,9 @@ export default class Project {
owner: {
get: () => data.owner,
},
organization: {
get: () => data.organization,
},
bugTracker: {
get: () => data.bug_tracker,
set: (tracker) => {
@ -125,22 +123,6 @@ export default class Project {
subsets: {
get: () => [...data.task_subsets],
},
trainingProject: {
get: () => {
if (typeof data.training_project === 'object') {
return { ...data.training_project };
}
return data.training_project;
},
set: (updatedProject) => {
if (typeof training === 'object') {
data.training_project = { ...updatedProject };
} else {
data.training_project = updatedProject;
}
updateTrigger.update('trainingProject');
},
},
sourceStorage: {
get: () => (
new Storage({

@ -1232,47 +1232,25 @@ async function createTask(taskSpec, taskDataSpec, onUpdate) {
return createdTask[0];
}
async function getJobs(filter = {}) {
const { backendAPI } = config;
const id = filter.id || null;
let response = null;
try {
if (id !== null) {
response = await Axios.get(`${backendAPI}/jobs/${id}`, {
proxy: config.proxy,
});
} else {
response = await Axios.get(`${backendAPI}/jobs`, {
proxy: config.proxy,
params: {
...filter,
page_size: 12,
},
});
}
} catch (errorData) {
throw generateError(errorData);
}
return response.data;
}
function fetchAll(url): Promise<any[]> {
function fetchAll(url, filter = {}): Promise<any> {
const pageSize = 500;
let collection = [];
const result = {
count: 0,
results: [],
};
return new Promise((resolve, reject) => {
Axios.get(url, {
params: {
...filter,
page_size: pageSize,
page: 1,
},
proxy: config.proxy,
}).then((initialData) => {
const { count, results } = initialData.data;
collection = collection.concat(results);
result.results = result.results.concat(results);
if (count <= pageSize) {
resolve(collection);
resolve(result);
return;
}
@ -1281,6 +1259,7 @@ function fetchAll(url): Promise<any[]> {
if (i) {
return Axios.get(url, {
params: {
...filter,
page_size: pageSize,
page: i + 1,
},
@ -1294,33 +1273,94 @@ function fetchAll(url): Promise<any[]> {
Promise.all(promises).then((responses: AxiosResponse<any, any>[]) => {
responses.forEach((resp) => {
if (resp) {
collection = collection.concat(resp.data.results);
result.results = result.results.concat(resp.data.results);
}
});
// removing possible dublicates
const obj = collection.reduce((acc: Record<string, any>, item: any) => {
const obj = result.results.reduce((acc: Record<string, any>, item: any) => {
acc[item.id] = item;
return acc;
}, {});
resolve(Object.values(obj));
result.results = Object.values(obj);
result.count = result.results.length;
resolve(result);
}).catch((error) => reject(error));
}).catch((error) => reject(error));
});
}
async function getJobIssues(jobID) {
async function getJobs(filter = {}, aggregate = false) {
const { backendAPI } = config;
const id = filter.id || null;
let response = null;
try {
response = await fetchAll(`${backendAPI}/jobs/${jobID}/issues`);
if (id !== null) {
response = await Axios.get(`${backendAPI}/jobs/${id}`, {
proxy: config.proxy,
});
} else {
if (aggregate) {
return await fetchAll(`${backendAPI}/jobs`, {
...filter,
...enableOrganization(),
});
}
response = await Axios.get(`${backendAPI}/jobs`, {
proxy: config.proxy,
params: {
...filter,
page_size: 12,
},
});
}
} catch (errorData) {
throw generateError(errorData);
}
return response;
return response.data;
}
async function getJobIssues(jobID: number) {
const { backendAPI } = config;
let response = null;
try {
const organization = enableOrganization();
response = await fetchAll(`${backendAPI}/issues`, {
job_id: jobID,
...organization,
});
const commentsResponse = await fetchAll(`${backendAPI}/comments`, {
job_id: jobID,
...organization,
});
const issuesById = response.results.reduce((acc, val: { id: number }) => {
acc[val.id] = val;
return acc;
}, {});
const commentsByIssue = commentsResponse.results.reduce((acc, val) => {
acc[val.issue] = acc[val.issue] || [];
acc[val.issue].push(val);
return acc;
}, {});
for (const issue of Object.keys(commentsByIssue)) {
commentsByIssue[issue].sort((a, b) => a.id - b.id);
issuesById[issue].comments = commentsByIssue[issue];
}
} catch (errorData) {
throw generateError(errorData);
}
return response.results;
}
async function createComment(data) {
@ -1346,12 +1386,21 @@ async function createIssue(data) {
let response = null;
try {
const organization = enableOrganization();
response = await Axios.post(`${backendAPI}/issues`, JSON.stringify(data), {
proxy: config.proxy,
params: { ...organization },
headers: {
'Content-Type': 'application/json',
},
});
const commentsResponse = await fetchAll(`${backendAPI}/comments`, {
issue_id: response.data.id,
...organization,
});
response.data.comments = commentsResponse.results;
} catch (errorData) {
throw generateError(errorData);
}
@ -1423,22 +1472,24 @@ async function getUsers(filter = { page_size: 'all' }) {
return response.data.results;
}
async function getPreview(tid, jid) {
const { backendAPI } = config;
function getPreview(instance) {
return async function (id: number) {
const { backendAPI } = config;
let response = null;
try {
const url = `${backendAPI}/${jid !== null ? 'jobs' : 'tasks'}/${jid || tid}/preview`;
response = await Axios.get(url, {
proxy: config.proxy,
responseType: 'blob',
});
} catch (errorData) {
const code = errorData.response ? errorData.response.status : errorData.code;
throw new ServerError(`Could not get preview frame for the task ${tid} from the server`, code);
}
let response = null;
try {
const url = `${backendAPI}/${instance}/${id}/preview`;
response = await Axios.get(url, {
proxy: config.proxy,
responseType: 'blob',
});
} catch (errorData) {
const code = errorData.response ? errorData.response.status : errorData.code;
throw new ServerError(`Could not get preview for "${instance}/${id}"`, code);
}
return response.data;
return response.data;
};
}
async function getImageContext(jid, frame) {
@ -2139,30 +2190,6 @@ async function getCloudStorageContent(id, manifestPath) {
return response.data;
}
async function getCloudStoragePreview(id) {
const { backendAPI } = config;
let response = null;
try {
const url = `${backendAPI}/cloudstorages/${id}/preview`;
response = await workerAxios.get(url, {
params: enableOrganization(),
proxy: config.proxy,
responseType: 'arraybuffer',
});
} catch (errorData) {
throw generateError({
message: '',
response: {
...errorData.response,
data: String.fromCharCode.apply(null, new Uint8Array(errorData.response.data)),
},
});
}
return new Blob([new Uint8Array(response)]);
}
async function getCloudStorageStatus(id) {
const { backendAPI } = config;
@ -2196,12 +2223,12 @@ async function getOrganizations() {
let response = null;
try {
response = await fetchAll(`${backendAPI}/organizations?page_size`);
response = await fetchAll(`${backendAPI}/organizations`);
} catch (errorData) {
throw generateError(errorData);
}
return response;
return response.results;
}
async function createOrganization(data) {
@ -2530,6 +2557,7 @@ export default Object.freeze({
create: createProject,
delete: deleteProject,
exportDataset: exportDataset('projects'),
getPreview: getPreview('projects'),
backup: backupProject,
restore: restoreProject,
importDataset,
@ -2541,12 +2569,14 @@ export default Object.freeze({
create: createTask,
delete: deleteTask,
exportDataset: exportDataset('tasks'),
getPreview: getPreview('tasks'),
backup: backupTask,
restore: restoreTask,
}),
jobs: Object.freeze({
get: getJobs,
getPreview: getPreview('jobs'),
save: saveJob,
exportDataset: exportDataset('jobs'),
}),
@ -2616,7 +2646,7 @@ export default Object.freeze({
cloudStorages: Object.freeze({
get: getCloudStorages,
getContent: getCloudStorageContent,
getPreview: getCloudStoragePreview,
getPreview: getPreview('cloudstorages'),
getStatus: getCloudStorageStatus,
create: createCloudStorage,
delete: deleteCloudStorage,

@ -7,12 +7,12 @@ import {
deleteFrame,
restoreFrame,
getRanges,
getPreview,
clear as clearFrames,
findNotDeletedFrame,
getContextImage,
patchMeta,
getDeletedFrames,
decodePreview,
} from './frames';
import Issue from './issue';
import { checkObjectType } from './common';
@ -146,8 +146,9 @@ export function implementJob(Job) {
return '';
}
const frameData = await getPreview(this.taskId, this.id);
return frameData;
const preview = await serverProxy.jobs.getPreview(this.id);
const decoded = await decodePreview(preview);
return decoded;
};
Job.prototype.frames.contextImage.implementation = async function (frameId) {
@ -418,8 +419,12 @@ export function implementTask(Task) {
}
const data = await serverProxy.tasks.save(this.id, taskData);
// Temporary workaround for UI
const jobs = await serverProxy.jobs.get({
filter: JSON.stringify({ and: [{ '==': [{ var: 'task_id' }, data.id] }] }),
}, true);
this._updateTrigger.reset();
return new Task(data);
return new Task({ ...data, jobs: jobs.results });
}
const taskSpec: any = {
@ -481,7 +486,11 @@ export function implementTask(Task) {
}
const task = await serverProxy.tasks.create(taskSpec, taskDataSpec, onUpdate);
return new Task(task);
// Temporary workaround for UI
const jobs = await serverProxy.jobs.get({
filter: JSON.stringify({ and: [{ '==': [{ var: 'task_id' }, task.id] }] }),
}, true);
return new Task({ ...task, jobs: jobs.results });
};
Task.prototype.delete.implementation = async function () {
@ -547,8 +556,9 @@ export function implementTask(Task) {
return '';
}
const frameData = await getPreview(this.id);
return frameData;
const preview = await serverProxy.tasks.getPreview(this.id);
const decoded = await decodePreview(preview);
return decoded;
};
Task.prototype.frames.delete.implementation = async function (frame) {

@ -573,6 +573,7 @@ export class Task extends Session {
sorting_method: undefined,
source_storage: undefined,
target_storage: undefined,
progress: undefined,
};
const updateTrigger = new FieldUpdateTrigger();
@ -588,6 +589,21 @@ export class Task extends Session {
data.labels = [];
data.jobs = [];
// FIX ME: progress shoud come from server, not from segments
const progress = {
completedJobs: 0,
totalJobs: 0,
};
if (Array.isArray(initialData.segments)) {
for (const segment of initialData.segments) {
for (const job of segment.jobs) {
progress.totalJobs += 1;
if (job.stage === 'acceptance') progress.completedJobs += 1;
}
}
}
data.progress = progress;
data.files = Object.freeze({
server_files: [],
client_files: [],
@ -599,33 +615,29 @@ export class Task extends Session {
.map((labelData) => new Label(labelData)).filter((label) => !label.hasParent);
}
if (Array.isArray(initialData.segments)) {
for (const segment of initialData.segments) {
if (Array.isArray(segment.jobs)) {
for (const job of segment.jobs) {
const jobInstance = new Job({
url: job.url,
id: job.id,
assignee: job.assignee,
state: job.state,
stage: job.stage,
start_frame: segment.start_frame,
stop_frame: segment.stop_frame,
// following fields also returned when doing API request /jobs/<id>
// here we know them from task and append to constructor
task_id: data.id,
project_id: data.project_id,
labels: data.labels,
bug_tracker: data.bug_tracker,
mode: data.mode,
dimension: data.dimension,
data_compressed_chunk_type: data.data_compressed_chunk_type,
data_chunk_size: data.data_chunk_size,
});
data.jobs.push(jobInstance);
}
}
if (Array.isArray(initialData.jobs)) {
for (const job of initialData.jobs) {
const jobInstance = new Job({
url: job.url,
id: job.id,
assignee: job.assignee,
state: job.state,
stage: job.stage,
start_frame: job.start_frame,
stop_frame: job.stop_frame,
// following fields also returned when doing API request /jobs/<id>
// here we know them from task and append to constructor
task_id: data.id,
project_id: data.project_id,
labels: data.labels,
bug_tracker: data.bug_tracker,
mode: data.mode,
dimension: data.dimension,
data_compressed_chunk_type: data.data_compressed_chunk_type,
data_chunk_size: data.data_chunk_size,
});
data.jobs.push(jobInstance);
}
}
@ -922,6 +934,9 @@ export class Task extends Session {
})
),
},
progress: {
get: () => data.progress,
},
_internalData: {
get: () => data,
},

@ -51,7 +51,7 @@ describe('Feature: get a list of jobs', () => {
test('get jobs by an unknown job id', async () => {
const result = await window.cvat.jobs.get({
taskID: 50,
jobID: 50,
});
expect(Array.isArray(result)).toBeTruthy();
expect(result).toHaveLength(0);

@ -35,8 +35,6 @@ describe('Feature: get projects', () => {
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(Project);
expect(result[0].id).toBe(2);
// eslint-disable-next-line no-underscore-dangle
expect(result[0]._internalData.task_ids).toHaveLength(1);
});
test('get a project by an unknown id', async () => {

@ -233,78 +233,7 @@ const projectsDummyData = {
],
},
],
segments: [
{
start_frame: 0,
stop_frame: 99,
jobs: [
{
url: 'http://192.168.0.139:7000/api/jobs/1',
id: 1,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
},
],
},
{
start_frame: 95,
stop_frame: 194,
jobs: [
{
url: 'http://192.168.0.139:7000/api/jobs/2',
id: 2,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
},
],
},
{
start_frame: 190,
stop_frame: 289,
jobs: [
{
url: 'http://192.168.0.139:7000/api/jobs/3',
id: 3,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
},
],
},
{
start_frame: 285,
stop_frame: 384,
jobs: [
{
url: 'http://192.168.0.139:7000/api/jobs/4',
id: 4,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
},
],
},
{
start_frame: 380,
stop_frame: 431,
jobs: [
{
url: 'http://192.168.0.139:7000/api/jobs/5',
id: 5,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=2",
data_chunk_size: 36,
data_compressed_chunk_type: 'imageset',
data_original_chunk_type: 'video',
@ -360,22 +289,7 @@ const tasksDummyData = {
attributes: [],
},
],
segments: [
{
start_frame: 0,
stop_frame: 0,
jobs: [
{
url: 'http://localhost:7000/api/jobs/112',
id: 112,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=102",
image_quality: 50,
start_frame: 0,
stop_frame: 0,
@ -414,22 +328,7 @@ const tasksDummyData = {
attributes: [],
},
],
segments: [
{
start_frame: 0,
stop_frame: 8,
jobs: [
{
url: 'http://localhost:7000/api/jobs/100',
id: 100,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=100",
image_quality: 50,
start_frame: 0,
stop_frame: 0,
@ -622,162 +521,7 @@ const tasksDummyData = {
attributes: [],
},
],
segments: [
{
start_frame: 0,
stop_frame: 499,
jobs: [
{
url: 'http://localhost:7000/api/jobs/10',
id: 101,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 495,
stop_frame: 994,
jobs: [
{
url: 'http://localhost:7000/api/jobs/11',
id: 102,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 990,
stop_frame: 1489,
jobs: [
{
url: 'http://localhost:7000/api/jobs/12',
id: 103,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 1485,
stop_frame: 1984,
jobs: [
{
url: 'http://localhost:7000/api/jobs/13',
id: 104,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 1980,
stop_frame: 2479,
jobs: [
{
url: 'http://localhost:7000/api/jobs/14',
id: 105,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 2475,
stop_frame: 2974,
jobs: [
{
url: 'http://localhost:7000/api/jobs/15',
id: 106,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 2970,
stop_frame: 3469,
jobs: [
{
url: 'http://localhost:7000/api/jobs/16',
id: 107,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 3465,
stop_frame: 3964,
jobs: [
{
url: 'http://localhost:7000/api/jobs/17',
id: 108,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 3960,
stop_frame: 4459,
jobs: [
{
url: 'http://localhost:7000/api/jobs/18',
id: 109,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 4455,
stop_frame: 4954,
jobs: [
{
url: 'http://localhost:7000/api/jobs/19',
id: 110,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 4950,
stop_frame: 5001,
jobs: [
{
url: 'http://localhost:7000/api/jobs/20',
id: 111,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=101",
image_quality: 50,
start_frame: 0,
stop_frame: 5001,
@ -858,18 +602,7 @@ const tasksDummyData = {
<circle r="1.5" stroke="black" fill="#b3b3b3" cx="61.10367965698242" cy="40.00627136230469" stroke-width="0.1" data-type="element node" data-element-id="4" data-node-id="4" data-label-id="58"></circle>
<circle r="1.5" stroke="black" fill="#b3b3b3" cx="27.49163818359375" cy="39.504600524902344" stroke-width="0.1" data-type="element node" data-element-id="5" data-node-id="5" data-label-id="59"></circle>`
}],
segments: [{
start_frame: 0,
stop_frame: 3,
jobs: [{
url: 'http://localhost:7000/api/jobs/40',
id: 40,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
}]
}],
jobs: "http://localhost:7000/api/jobs?task_id=40",
data_chunk_size: 17,
data_compressed_chunk_type: 'imageset',
data_original_chunk_type: 'imageset',
@ -1069,36 +802,7 @@ const tasksDummyData = {
attributes: [],
},
],
segments: [
{
start_frame: 0,
stop_frame: 4999,
jobs: [
{
url: 'http://localhost:7000/api/jobs/3',
id: 3,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
{
start_frame: 4995,
stop_frame: 5001,
jobs: [
{
url: 'http://localhost:7000/api/jobs/4',
id: 4,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=3",
image_quality: 50,
},
{
@ -1289,22 +993,7 @@ const tasksDummyData = {
attributes: [],
},
],
segments: [
{
start_frame: 0,
stop_frame: 74,
jobs: [
{
url: 'http://localhost:7000/api/jobs/2',
id: 2,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=2",
image_quality: 50,
},
{
@ -1494,27 +1183,273 @@ const tasksDummyData = {
attributes: [],
},
],
segments: [
{
start_frame: 0,
stop_frame: 8,
jobs: [
{
url: 'http://localhost:7000/api/jobs/1',
id: 1,
assignee: null,
status: 'annotation',
stage: "annotation",
state: "new",
},
],
},
],
jobs: "http://localhost:7000/api/jobs?task_id=1",
image_quality: 95,
},
],
};
const jobsDummyData = {
count: 2,
next: null,
previous: null,
results: [
{
url: 'http://localhost:7000/api/jobs/112',
id: 112,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 0,
stop_frame: 0,
task_id: 102,
},
{
url: 'http://localhost:7000/api/jobs/100',
id: 100,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 0,
stop_frame: 8,
task_id: 100,
},
{
url: 'http://localhost:7000/api/jobs/40',
id: 40,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 0,
stop_frame: 3,
task_id: 40,
},
{
url: 'http://localhost:7000/api/jobs/20',
id: 111,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 4950,
stop_frame: 5001,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/19',
id: 110,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 4455,
stop_frame: 4954,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/18',
id: 109,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 3960,
stop_frame: 4459,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/17',
id: 108,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 3465,
stop_frame: 3964,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/16',
id: 107,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 2970,
stop_frame: 3469,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/15',
id: 106,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 2475,
stop_frame: 2974,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/14',
id: 105,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 1980,
stop_frame: 2479,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/13',
id: 104,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 1485,
stop_frame: 1984,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/12',
id: 103,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 990,
stop_frame: 1489,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/11',
id: 102,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 495,
stop_frame: 994,
task_id: 101,
},
{
url: 'http://localhost:7000/api/jobs/10',
id: 101,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 0,
stop_frame: 499,
task_id: 101,
},
{
url: 'http://192.168.0.139:7000/api/jobs/9',
id: 9,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
start_frame: 0,
stop_frame: 99,
task_id: 2,
},
{
url: 'http://192.168.0.139:7000/api/jobs/8',
id: 8,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
start_frame: 95,
stop_frame: 194,
task_id: 2,
},
{
url: 'http://192.168.0.139:7000/api/jobs/7',
id: 7,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
start_frame: 190,
stop_frame: 289,
task_id: 2,
},
{
url: 'http://192.168.0.139:7000/api/jobs/6',
id: 6,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
start_frame: 285,
stop_frame: 384,
task_id: 2,
},
{
url: 'http://192.168.0.139:7000/api/jobs/5',
id: 5,
assignee: null,
status: 'completed',
stage: 'acceptance',
state: 'completed',
start_frame: 380,
stop_frame: 431,
task_id: 2,
},
{
url: 'http://localhost:7000/api/jobs/4',
id: 4,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 4995,
stop_frame: 5001,
task_id: 3,
},
{
url: 'http://localhost:7000/api/jobs/3',
id: 3,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 0,
stop_frame: 4999,
task_id: 3,
},
{
url: 'http://localhost:7000/api/jobs/2',
id: 2,
assignee: null,
status: 'annotation',
stage: 'annotation',
state: 'new',
start_frame: 0,
stop_frame: 74,
task_id: 2,
},
{
url: 'http://localhost:7000/api/jobs/1',
id: 1,
assignee: null,
status: 'annotation',
stage: "annotation",
state: "new",
start_frame: 0,
stop_frame: 8,
task_id: 1,
},
]
}
const taskAnnotationsDummyData = {
112: {
version: 21,
@ -3454,4 +3389,5 @@ module.exports = {
cloudStoragesDummyData,
webhooksDummyData,
webhooksEventsDummyData,
jobsDummyData,
};

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022-2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
@ -15,6 +16,7 @@ const {
cloudStoragesDummyData,
webhooksDummyData,
webhooksEventsDummyData,
jobsDummyData,
} = require('./dummy-data.mock');
function QueryStringToJSON(query, ignoreList = []) {
@ -215,48 +217,54 @@ class ServerProxy {
}
async function getJobs(filter = {}) {
function makeJsonFilter(jsonExpr) {
if (!jsonExpr) {
return (job) => true;
}
// This function only covers test cases. Extend it if needed.
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
let pattern = JSON.stringify({
and: [{ '==': [{ var: 'task_id' }, '<id>'] }]
});
pattern = escapeRegExp(pattern).replace('"<id>"', '(\\d+)');
const matches = jsonExpr.match(pattern);
const task_id = Number.parseInt(matches[1]);
return (job) => job.task_id === task_id;
};
const id = filter.id || null;
const jobs = tasksDummyData.results
.reduce((acc, task) => {
for (const segment of task.segments) {
for (const job of segment.jobs) {
const copy = JSON.parse(JSON.stringify(job));
copy.start_frame = segment.start_frame;
copy.stop_frame = segment.stop_frame;
copy.task_id = task.id;
copy.dimension = task.dimension;
copy.data_compressed_chunk_type = task.data_compressed_chunk_type;
copy.data_chunk_size = task.data_chunk_size;
copy.bug_tracker = task.bug_tracker;
copy.mode = task.mode;
copy.labels = task.labels;
acc.push(copy);
}
}
const jobs = jobsDummyData.results.filter(makeJsonFilter(filter.filter || null));
for (const job of jobs) {
const task = tasksDummyData.results.find((task) => task.id === job.task_id);
job.dimension = task.dimension;
job.data_compressed_chunk_type = task.data_compressed_chunk_type;
job.data_chunk_size = task.data_chunk_size;
job.bug_tracker = task.bug_tracker;
job.mode = task.mode;
job.labels = task.labels;
}
return acc;
}, [])
.filter((job) => job.id === id);
if (id !== null) {
// A specific object is requested
return jobs.filter((job) => job.id === id)[0] || null;
}
return (
jobs[0] || {
jobs ? {
results: jobs,
count: jobs.length,
} : {
detail: 'Not found.',
}
);
}
async function saveJob(id, jobData) {
const object = tasksDummyData.results
.reduce((acc, task) => {
for (const segment of task.segments) {
for (const job of segment.jobs) {
acc.push(job);
}
}
return acc;
}, [])
const object = jobsDummyData.results
.filter((job) => job.id === id)[0];
for (const prop in jobData) {
@ -510,6 +518,7 @@ class ServerProxy {
save: saveTask,
create: createTask,
delete: deleteTask,
getPreview: getPreview,
}),
writable: false,
},
@ -518,6 +527,7 @@ class ServerProxy {
value: Object.freeze({
get: getJobs,
save: saveJob,
getPreview: getPreview,
}),
writable: false,
},

@ -1,4 +1,4 @@
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -34,7 +34,12 @@ def get_paginated_collection(
else:
results.extend(page_contents.results)
if not page_contents.next:
if (
page_contents is not None
and not page_contents.next
or page_contents is None
and not json.loads(response.data).get("next")
):
break
page += 1

@ -56,7 +56,9 @@ class Issue(
def get_comments(self) -> List[Comment]:
return [
Comment(self._client, m)
for m in get_paginated_collection(self.api.list_comments_endpoint, id=self.id)
for m in get_paginated_collection(
self._client.api_client.comments_api.list_endpoint, issue_id=str(self.id)
)
]

@ -163,7 +163,9 @@ class Job(
def get_issues(self) -> List[Issue]:
return [
Issue(self._client, m)
for m in get_paginated_collection(self.api.list_issues_endpoint, id=self.id)
for m in get_paginated_collection(
self._client.api_client.issues_api.list_endpoint, job_id=str(self.id)
)
]
def get_commits(self) -> List[models.IJobCommit]:

@ -127,7 +127,9 @@ class Project(
def get_tasks(self) -> List[Task]:
return [
Task(self._client, m)
for m in get_paginated_collection(self.api.list_tasks_endpoint, id=self.id)
for m in get_paginated_collection(
self._client.api_client.tasks_api.list_endpoint, project_id=str(self.id)
)
]
def get_preview(

@ -305,7 +305,9 @@ class Task(
def get_jobs(self) -> List[Job]:
return [
Job(self._client, model=m)
for m in get_paginated_collection(self.api.list_jobs_endpoint, id=self.id)
for m in get_paginated_collection(
self._client.api_client.jobs_api.list_endpoint, task_id=str(self.id)
)
]
def get_meta(self) -> models.IDataMetaRead:

@ -8,7 +8,9 @@ import shutil
from abc import ABCMeta, abstractmethod
from enum import Enum, auto
from pathlib import Path
from typing import Callable, Mapping, Type, TypeVar
from typing import Any, Callable, Dict, List, Mapping, Type, TypeVar, Union, cast
from attrs import define
import cvat_sdk.models as models
from cvat_sdk.api_client.model_utils import OpenApiModel, to_json
@ -37,7 +39,21 @@ class UpdatePolicy(Enum):
"""
_ModelType = TypeVar("_ModelType", bound=OpenApiModel)
_CacheObject = Dict[str, Any]
class _CacheObjectModel(metaclass=ABCMeta):
@abstractmethod
def dump(self) -> _CacheObject:
...
@classmethod
@abstractmethod
def load(cls, obj: _CacheObject):
...
_ModelType = TypeVar("_ModelType", bound=Union[OpenApiModel, _CacheObjectModel])
class CacheManager(metaclass=ABCMeta):
@ -67,15 +83,37 @@ class CacheManager(metaclass=ABCMeta):
def project_json_path(self, project_id: int) -> Path:
return self.project_dir(project_id) / "project.json"
def load_model(self, path: Path, model_type: Type[_ModelType]) -> _ModelType:
def _load_object(self, path: Path) -> _CacheObject:
with open(path, "rb") as f:
return model_type._new_from_openapi_data(**json.load(f))
return json.load(f)
def save_model(self, path: Path, model: OpenApiModel) -> None:
def _save_object(self, path: Path, obj: _CacheObject) -> None:
with atomic_writer(path, "w", encoding="UTF-8") as f:
json.dump(to_json(model), f, indent=4)
json.dump(obj, f, indent=4)
print(file=f) # add final newline
def _deserialize_model(self, obj: _CacheObject, model_type: _ModelType) -> _ModelType:
if issubclass(model_type, OpenApiModel):
return cast(OpenApiModel, model_type)._new_from_openapi_data(**obj)
elif issubclass(model_type, _CacheObjectModel):
return cast(_CacheObjectModel, model_type).load(obj)
else:
raise NotImplementedError("Unexpected model type")
def _serialize_model(self, model: _ModelType) -> _CacheObject:
if isinstance(model, OpenApiModel):
return to_json(model)
elif isinstance(model, _CacheObjectModel):
return model.dump()
else:
raise NotImplementedError("Unexpected model type")
def load_model(self, path: Path, model_type: Type[_ModelType]) -> _ModelType:
return self._deserialize_model(self._load_object(path), model_type)
def save_model(self, path: Path, model: _ModelType) -> None:
return self._save_object(path, self._serialize_model(model))
@abstractmethod
def retrieve_task(self, task_id: int) -> Task:
...
@ -178,7 +216,7 @@ class _CacheManagerOnline(CacheManager):
# There are currently no files cached alongside project.json,
# so we don't need to check if we need to purge them.
self.save_model(project_json_path, project._model)
self.save_model(project_json_path, _OfflineProjectModel.from_entity(project))
return project
@ -207,10 +245,44 @@ class _CacheManagerOffline(CacheManager):
def retrieve_project(self, project_id: int) -> Project:
self._logger.info(f"Retrieving project {project_id} from cache...")
return Project(
self._client, self.load_model(self.project_json_path(project_id), models.ProjectRead)
cached_model = self.load_model(self.project_json_path(project_id), _OfflineProjectModel)
return _OfflineProjectProxy(self._client, cached_model, cache_manager=self)
@define
class _OfflineProjectModel(_CacheObjectModel):
api_model: models.IProjectRead
task_ids: List[int]
def dump(self) -> _CacheObject:
return {
"model": to_json(self.api_model),
"tasks": self.task_ids,
}
@classmethod
def load(cls, obj: _CacheObject):
return cls(
api_model=obj["model"],
task_ids=obj["tasks"],
)
@classmethod
def from_entity(cls, entity: Project):
return cls(api_model=entity._model, task_ids=[t.id for t in entity.get_tasks()])
class _OfflineProjectProxy(Project):
def __init__(
self, client: Client, cached_model: _OfflineProjectModel, *, cache_manager: CacheManager
) -> None:
super().__init__(client, cached_model.api_model)
self._offline_model = cached_model
self._cache_manager = cache_manager
def get_tasks(self) -> List[Task]:
return [self._cache_manager.retrieve_task(t) for t in self._offline_model.task_ids]
_CACHE_MANAGER_CLASSES: Mapping[UpdatePolicy, Type[CacheManager]] = {
UpdatePolicy.IF_MISSING_OR_STALE: _CacheManagerOnline,

@ -79,7 +79,7 @@ class ProjectVisionDataset(torchvision.datasets.VisionDataset):
)
self._logger.info("Fetching project tasks...")
tasks = [cache_manager.retrieve_task(task_id) for task_id in project.tasks]
tasks = project.get_tasks()
if task_filter is not None:
tasks = list(filter(task_filter, tasks))

@ -1,17 +1,20 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
import React, { ReactPortal, useEffect, useRef } from 'react';
import React, {
ReactPortal, useEffect, useRef,
} from 'react';
import ReactDOM from 'react-dom';
import Tag from 'antd/lib/tag';
import { CheckCircleOutlined, CloseCircleOutlined } from '@ant-design/icons';
import { CheckCircleOutlined, CloseCircleOutlined, WarningOutlined } from '@ant-design/icons';
import { Issue } from 'cvat-core-wrapper';
import CVATTooltip from 'components/common/cvat-tooltip';
interface Props {
id: number;
message: string;
issue: Issue;
top: number;
left: number;
angle: number;
@ -24,11 +27,11 @@ interface Props {
export default function HiddenIssueLabel(props: Props): ReactPortal {
const {
id, message, top, left, angle, scale, resolved, onClick, highlight, blur,
issue, top, left, angle, scale, resolved, onClick, highlight, blur,
} = props;
const { id, comments } = issue;
const ref = useRef<HTMLElement>(null);
useEffect(() => {
if (!resolved) {
setTimeout(highlight);
@ -39,7 +42,7 @@ export default function HiddenIssueLabel(props: Props): ReactPortal {
const elementID = `cvat-hidden-issue-label-${id}`;
return ReactDOM.createPortal(
<CVATTooltip title={message}>
<CVATTooltip title={comments[0]?.message || 'Messages not found'}>
<Tag
ref={ref}
id={elementID}
@ -64,7 +67,7 @@ export default function HiddenIssueLabel(props: Props): ReactPortal {
) : (
<CloseCircleOutlined className='cvat-hidden-issue-unsolved-indicator' />
)}
{message}
{comments[0]?.message || <WarningOutlined />}
</Tag>
</CVATTooltip>,
window.document.getElementById('cvat_canvas_attachment_board') as HTMLElement,

@ -1,12 +1,10 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
import React, {
useState,
useEffect,
useRef,
useCallback,
useState, useEffect, useRef, useCallback,
} from 'react';
import ReactDOM from 'react-dom';
import { useDispatch } from 'react-redux';
@ -17,14 +15,15 @@ import Comment from 'antd/lib/comment';
import Text from 'antd/lib/typography/Text';
import Title from 'antd/lib/typography/Title';
import Button from 'antd/lib/button';
import Spin from 'antd/lib/spin';
import Input from 'antd/lib/input';
import moment from 'moment';
import CVATTooltip from 'components/common/cvat-tooltip';
import { Issue, Comment as CommentModel } from 'cvat-core-wrapper';
import { deleteIssueAsync } from 'actions/review-actions';
interface Props {
id: number;
comments: any[];
issue: Issue;
left: number;
top: number;
resolved: boolean;
@ -44,8 +43,7 @@ export default function IssueDialog(props: Props): JSX.Element {
const [currentText, setCurrentText] = useState<string>('');
const dispatch = useDispatch();
const {
comments,
id,
issue,
left,
top,
scale,
@ -60,6 +58,8 @@ export default function IssueDialog(props: Props): JSX.Element {
blur,
} = props;
const { id, comments } = issue;
useEffect(() => {
if (!resolved) {
setTimeout(highlight);
@ -85,7 +85,7 @@ export default function IssueDialog(props: Props): JSX.Element {
}, []);
const lines = comments.map(
(_comment: any): JSX.Element => {
(_comment: CommentModel): JSX.Element => {
const created = _comment.createdDate ? moment(_comment.createdDate) : moment(moment.now());
const diff = created.fromNow();
@ -128,7 +128,9 @@ export default function IssueDialog(props: Props): JSX.Element {
</Col>
</Row>
<Row className='cvat-issue-dialog-chat' justify='start'>
<Col style={{ display: 'block' }}>{lines}</Col>
{
lines.length > 0 ? <Col style={{ display: 'block' }}>{lines}</Col> : <Spin />
}
</Row>
<Row className='cvat-issue-dialog-input' justify='start'>
<Col span={24}>

@ -118,13 +118,12 @@ export default function IssueAggregatorComponent(): JSX.Element | null {
issueDialogs.push(
<IssueDialog
key={issue.id}
id={issue.id}
issue={issue}
top={minY}
left={minX}
angle={-geometry.angle}
scale={1 / geometry.scale}
isFetching={issueFetching !== null}
comments={issue.comments}
resolved={issueResolved}
highlight={highlight}
blur={blur}
@ -143,17 +142,16 @@ export default function IssueAggregatorComponent(): JSX.Element | null {
}}
/>,
);
} else if (issue.comments.length) {
} else {
issueLabels.push(
<HiddenIssueLabel
key={issue.id}
id={issue.id}
issue={issue}
top={minY}
left={minX}
angle={-geometry.angle}
scale={1 / geometry.scale}
resolved={issueResolved}
message={issue.comments[issue.comments.length - 1].message}
highlight={highlight}
blur={blur}
onClick={() => {

@ -33,9 +33,9 @@ type Props = TaskPageComponentProps & RouteComponentProps<{ id: string }>;
class TaskPageComponent extends React.PureComponent<Props> {
public componentDidMount(): void {
const { task, fetching, getTask } = this.props;
const { fetching, getTask } = this.props;
if (task === null && !fetching) {
if (!fetching) {
getTask();
}
}

@ -75,8 +75,8 @@ class TaskItemComponent extends React.PureComponent<TaskItemProps & RouteCompone
private renderProgress(): JSX.Element {
const { taskInstance, activeInference, cancelAutoAnnotation } = this.props;
// Count number of jobs and performed jobs
const numOfJobs = taskInstance.jobs.length;
const numOfCompleted = taskInstance.jobs.filter((job: any): boolean => job.stage === 'acceptance').length;
const numOfJobs = taskInstance.progress.totalJobs;
const numOfCompleted = taskInstance.progress.completedJobs;
// Progress appearance depends on number of jobs
let progressColor = null;

@ -35,7 +35,7 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const id = own.taskID;
return {
hidden: state.tasks.hideEmpty && task.jobs.length === 0,
hidden: state.tasks.hideEmpty && task.progress.totalJobs === 0,
deleted: id in deletes ? deletes[id] === true : false,
taskInstance: task,
activeInference: state.models.inferences[id] || null,

@ -15,6 +15,8 @@ import {
ShapeType, LabelType, ModelKind, ModelProviders, ModelReturnType,
} from 'cvat-core/src/enums';
import { Storage, StorageData } from 'cvat-core/src/storage';
import Issue from 'cvat-core/src/issue';
import Comment from 'cvat-core/src/comment';
import { SocialAuthMethods, SocialAuthMethod } from 'cvat-core/src/auth-methods';
const cvat: any = _cvat;
@ -38,6 +40,8 @@ export {
Storage,
Webhook,
SocialAuthMethod,
Issue,
Comment,
MLModel,
ModelKind,
ModelProviders,

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -176,7 +176,7 @@ class _DbTestBase(APITestCase):
def _get_jobs(self, task_id):
with ForceLogin(self.admin, self.client):
values = get_paginated_collection(lambda page:
self.client.get("/api/tasks/{}/jobs?page={}".format(task_id, page))
self.client.get("/api/jobs?task_id={}&page={}".format(task_id, page))
)
return values

@ -1,4 +1,5 @@
# Copyright (C) 2018-2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -200,7 +201,7 @@ class GitDatasetRepoTest(APITestCase):
def _get_jobs(self, task_id):
with ForceLogin(self.admin, self.client):
values = get_paginated_collection(lambda page:
self.client.get("/api/tasks/{}/jobs?page={}".format(task_id, page))
self.client.get("/api/jobs?task_id={}&page={}".format(task_id, page))
)
return values

@ -1,28 +1,43 @@
# Copyright (C) 2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from rest_framework import filters
from typing import Any, Dict, Iterator, Optional
from functools import reduce
import operator
import json
from django_filters import FilterSet
from django_filters.filterset import BaseFilterSet
from django_filters.rest_framework import DjangoFilterBackend
from django.db.models import Q
from rest_framework.compat import coreapi, coreschema
from django.db.models.query import QuerySet
from django.utils.translation import gettext_lazy as _
from django.utils.encoding import force_str
from rest_framework import filters
from rest_framework.compat import coreapi, coreschema
from rest_framework.exceptions import ValidationError
class SearchFilter(filters.SearchFilter):
DEFAULT_FILTER_FIELDS_ATTR = 'filter_fields'
DEFAULT_LOOKUP_MAP_ATTR = 'lookup_fields'
def get_lookup_fields(view, fields: Optional[Iterator[str]] = None) -> Dict[str, str]:
if fields is None:
fields = getattr(view, DEFAULT_FILTER_FIELDS_ATTR, None) or []
lookup_overrides = getattr(view, DEFAULT_LOOKUP_MAP_ATTR, None) or {}
lookup_fields = {
field: lookup_overrides.get(field, field)
for field in fields
}
return lookup_fields
class SearchFilter(filters.SearchFilter):
def get_search_fields(self, view, request):
search_fields = getattr(view, 'search_fields') or []
lookup_fields = {field:field for field in search_fields}
view_lookup_fields = getattr(view, 'lookup_fields', {})
keys_to_update = set(search_fields) & set(view_lookup_fields.keys())
for key in keys_to_update:
lookup_fields[key] = view_lookup_fields[key]
return lookup_fields.values()
return get_lookup_fields(view, search_fields).values()
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
@ -61,6 +76,7 @@ class SearchFilter(filters.SearchFilter):
class OrderingFilter(filters.OrderingFilter):
ordering_param = 'sort'
def get_ordering(self, request, queryset, view):
ordering = []
lookup_fields = self._get_lookup_fields(request, queryset, view)
@ -75,10 +91,8 @@ class OrderingFilter(filters.OrderingFilter):
def _get_lookup_fields(self, request, queryset, view):
ordering_fields = self.get_valid_fields(queryset, view, {'request': request})
lookup_fields = {field:field for field, _ in ordering_fields}
lookup_fields.update(getattr(view, 'lookup_fields', {}))
return lookup_fields
ordering_fields = [v[0] for v in ordering_fields]
return get_lookup_fields(view, ordering_fields)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
@ -116,6 +130,7 @@ class OrderingFilter(filters.OrderingFilter):
}]
class JsonLogicFilter(filters.BaseFilterBackend):
Rules = Dict[str, Any]
filter_param = 'filter'
filter_title = _('Filter')
filter_description = _('A filter term.')
@ -156,21 +171,32 @@ class JsonLogicFilter(filters.BaseFilterBackend):
else:
raise ValidationError(f'filter: {op} operation with {args} arguments is not implemented')
def _parse_query(self, json_rules: str) -> Rules:
try:
rules = json.loads(json_rules)
if not len(rules):
raise ValidationError(f"filter shouldn't be empty")
except json.decoder.JSONDecodeError:
raise ValidationError(f'filter: Json syntax should be used')
return rules
def apply_filter(self,
queryset: QuerySet, parsed_rules: Rules, *, lookup_fields: Dict[str, Any]
) -> QuerySet:
try:
q_object = self._build_Q(parsed_rules, lookup_fields)
except KeyError as ex:
raise ValidationError(f'filter: {str(ex)} term is not supported')
return queryset.filter(q_object)
def filter_queryset(self, request, queryset, view):
json_rules = request.query_params.get(self.filter_param)
if json_rules:
try:
rules = json.loads(json_rules)
if not len(rules):
raise ValidationError(f"filter shouldn't be empty")
except json.decoder.JSONDecodeError:
raise ValidationError(f'filter: Json syntax should be used')
lookup_fields = self._get_lookup_fields(request, view)
try:
q_object = self._build_Q(rules, lookup_fields)
except KeyError as ex:
raise ValidationError(f'filter: {str(ex)} term is not supported')
return queryset.filter(q_object)
parsed_rules = self._parse_query(json_rules)
lookup_fields = self._get_lookup_fields(view)
queryset = self.apply_filter(queryset, parsed_rules, lookup_fields=lookup_fields)
return queryset
@ -210,9 +236,103 @@ class JsonLogicFilter(filters.BaseFilterBackend):
},
]
def _get_lookup_fields(self, request, view):
filter_fields = getattr(view, 'filter_fields', [])
lookup_fields = {field:field for field in filter_fields}
lookup_fields.update(getattr(view, 'lookup_fields', {}))
def _get_lookup_fields(self, view):
return get_lookup_fields(view)
class SimpleFilter(DjangoFilterBackend):
"""
A simple filter, useful for small search queries and manually-edited
requests.
Argument types are numbers and strings. The only available check is equality.
Operators are not supported (e.g. or, less, greater, not etc.).
Multiple filters are joined with '&' as separate query params.
"""
filter_desc = _('A simple equality filter for the {field_name} field')
reserved_names = (
JsonLogicFilter.filter_param,
OrderingFilter.ordering_param,
SearchFilter.search_param,
)
filter_fields_attr = 'simple_filters'
class MappingFiltersetBase(BaseFilterSet):
_filter_name_map_attr = 'filter_names'
@classmethod
def get_filter_name(cls, field_name, lookup_expr):
filter_names = getattr(cls, cls._filter_name_map_attr, {})
field_name = super().get_filter_name(field_name, lookup_expr)
if filter_names:
# Map names after a lookup suffix is applied to allow
# mapping specific filters with lookups
field_name = filter_names.get(field_name, field_name)
if field_name in SimpleFilter.reserved_names:
raise ValueError(f'Field name {field_name} is reserved')
return lookup_fields
return field_name
filterset_base = MappingFiltersetBase
def get_filterset_class(self, view, queryset=None):
lookup_fields = self.get_lookup_fields(view)
if not lookup_fields or not queryset:
return None
MetaBase = getattr(self.filterset_base, 'Meta', object)
class AutoFilterSet(self.filterset_base, metaclass=FilterSet.__class__):
filter_names = { v: k for k, v in lookup_fields.items() }
class Meta(MetaBase): # pylint: disable=useless-object-inheritance
model = queryset.model
fields = list(lookup_fields.values())
return AutoFilterSet
def get_lookup_fields(self, view):
simple_filters = getattr(view, self.filter_fields_attr, None)
if simple_filters:
for k in self.reserved_names:
assert k not in simple_filters, \
f"Query parameter '{k}' is reserved, try to change the filter name."
return get_lookup_fields(view, fields=simple_filters)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
lookup_fields = self.get_lookup_fields(view)
return [
coreapi.Field(
name=field_name,
location='query',
schema={
'type': 'string',
}
) for field_name in lookup_fields
]
def get_schema_operation_parameters(self, view):
lookup_fields = self.get_lookup_fields(view)
parameters = []
for field_name in lookup_fields:
parameters.append({
'name': field_name,
'in': 'query',
'description': force_str(self.filter_desc.format_map({'field_name': field_name})),
'schema': {
'type': 'string',
},
})
return parameters

@ -22,6 +22,41 @@ from cvat.apps.engine.utils import parse_specific_attributes
from drf_spectacular.utils import OpenApiExample, extend_schema_field, extend_schema_serializer
from cvat.apps.engine.view_utils import build_field_filter_params, get_list_view_name, reverse
@extend_schema_field(serializers.URLField)
class HyperlinkedModelViewSerializer(serializers.Serializer):
key_field = 'pk'
def __init__(self, view_name=None, *, filter_key=None, **kwargs):
if issubclass(view_name, models.models.Model):
view_name = get_list_view_name(view_name)
else:
assert isinstance(view_name, str)
kwargs['read_only'] = True
super().__init__(**kwargs)
self.view_name = view_name
self.filter_key = filter_key
def get_attribute(self, instance):
return instance
def to_representation(self, instance):
request = self.context.get('request')
if not request:
return None
return serializers.Hyperlink(
reverse(self.view_name, request=request,
query_params=build_field_filter_params(
self.filter_key, getattr(instance, self.key_field)
)),
instance
)
class BasicUserSerializer(serializers.ModelSerializer):
def validate(self, attrs):
if hasattr(self, 'initial_data'):
@ -193,13 +228,14 @@ class JobReadSerializer(serializers.ModelSerializer):
mode = serializers.ReadOnlyField(source='segment.task.mode')
bug_tracker = serializers.CharField(max_length=2000, source='get_bug_tracker',
allow_null=True, read_only=True)
issues = HyperlinkedModelViewSerializer(models.Issue, filter_key='job_id')
class Meta:
model = models.Job
fields = ('url', 'id', 'task_id', 'project_id', 'assignee',
'dimension', 'labels', 'bug_tracker', 'status', 'stage', 'state', 'mode',
'start_frame', 'stop_frame', 'data_chunk_size', 'data_compressed_chunk_type',
'updated_date',)
'updated_date', 'issues')
read_only_fields = fields
class JobWriteSerializer(serializers.ModelSerializer):
@ -526,6 +562,7 @@ class TaskReadSerializer(serializers.ModelSerializer):
dimension = serializers.CharField(allow_blank=True, required=False)
target_storage = StorageSerializer(required=False, allow_null=True)
source_storage = StorageSerializer(required=False, allow_null=True)
jobs = HyperlinkedModelViewSerializer(models.Job, filter_key='task_id')
class Meta:
model = models.Task
@ -533,7 +570,7 @@ class TaskReadSerializer(serializers.ModelSerializer):
'bug_tracker', 'created_date', 'updated_date', 'overlap', 'segment_size',
'status', 'labels', 'segments', 'data_chunk_size', 'data_compressed_chunk_type',
'data_original_chunk_type', 'size', 'image_quality', 'data', 'dimension',
'subset', 'organization', 'target_storage', 'source_storage',
'subset', 'organization', 'target_storage', 'source_storage', 'jobs',
)
read_only_fields = fields
extra_kwargs = {
@ -760,12 +797,6 @@ class TaskWriteSerializer(WriteOnceMixin, serializers.ModelSerializer):
return attrs
class ProjectSearchSerializer(serializers.ModelSerializer):
class Meta:
model = models.Project
fields = ('id', 'name')
read_only_fields = ('name',)
class ProjectReadSerializer(serializers.ModelSerializer):
labels = LabelSerializer(many=True, source='label_set', partial=True, default=[], read_only=True)
owner = BasicUserSerializer(required=False, read_only=True)
@ -774,17 +805,15 @@ class ProjectReadSerializer(serializers.ModelSerializer):
dimension = serializers.CharField(max_length=16, required=False, read_only=True, allow_null=True)
target_storage = StorageSerializer(required=False, allow_null=True, read_only=True)
source_storage = StorageSerializer(required=False, allow_null=True, read_only=True)
tasks = HyperlinkedModelViewSerializer(models.Task, filter_key='project_id')
class Meta:
model = models.Project
fields = ('url', 'id', 'name', 'labels', 'tasks', 'owner', 'assignee',
fields = ('url', 'id', 'name', 'labels', 'tasks', 'owner', 'assignee', 'tasks',
'bug_tracker', 'task_subsets', 'created_date', 'updated_date', 'status',
'dimension', 'organization', 'target_storage', 'source_storage',
)
read_only_fields = ('created_date', 'updated_date', 'status', 'owner',
'assignee', 'task_subsets', 'dimension', 'organization', 'tasks',
'target_storage', 'source_storage',
)
read_only_fields = fields
extra_kwargs = { 'organization': { 'allow_null': True } }
def to_representation(self, instance):
@ -1109,7 +1138,7 @@ class IssueReadSerializer(serializers.ModelSerializer):
position = serializers.ListField(
child=serializers.FloatField(), allow_empty=False
)
comments = CommentReadSerializer(many=True)
comments = HyperlinkedModelViewSerializer(models.Comment, filter_key='issue_id')
class Meta:
model = models.Issue

@ -1,4 +1,5 @@
# Copyright (C) 2020-2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -1224,7 +1225,7 @@ class ProjectListOfTasksAPITestCase(APITestCase):
def _run_api_v2_projects_id_tasks(self, user, pid):
with ForceLogin(user, self.client):
response = self.client.get('/api/projects/{}/tasks'.format(pid))
response = self.client.get('/api/tasks?project_id={}'.format(pid))
return response
@ -1247,7 +1248,8 @@ class ProjectListOfTasksAPITestCase(APITestCase):
def test_api_v2_projects_id_tasks_somebody(self):
project = self.projects[1]
response = self._run_api_v2_projects_id_tasks(self.somebody, project.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual([], response.data['results'])
def test_api_v2_projects_id_tasks_no_auth(self):
project = self.projects[1]
@ -2974,6 +2976,7 @@ class TaskImportExportAPITestCase(APITestCase):
"data",
"source_storage",
"target_storage",
"jobs",
),
)
@ -4227,7 +4230,7 @@ class JobAnnotationAPITestCase(APITestCase):
task = response.data
jobs = get_paginated_collection(lambda page:
self.client.get("/api/tasks/{}/jobs?page={}".format(tid, page))
self.client.get("/api/jobs?task_id={}&page={}".format(tid, page))
)
return (task, jobs)

@ -1,4 +1,5 @@
# Copyright (C) 2020-2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -143,7 +144,7 @@ class _DbTestBase(APITestCase):
def _get_jobs(self, task_id):
with ForceLogin(self.admin, self.client):
values = get_paginated_collection(lambda page:
self.client.get("/api/tasks/{}/jobs?page={}".format(task_id, page))
self.client.get("/api/jobs?task_id={}&page={}".format(task_id, page))
)
return values

@ -2,14 +2,20 @@
#
# SPDX-License-Identifier: MIT
# NOTE: importing in the header leads to circular importing
from typing import Optional, Type
# NOTE: importing in the utils.py header leads to circular importing
from typing import Any, Dict, Optional, Type
from django.db.models.query import QuerySet
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.utils.http import urlencode
from rest_framework.response import Response
from rest_framework.reverse import reverse as _reverse
from rest_framework.serializers import Serializer
from rest_framework.viewsets import GenericViewSet
def make_paginated_response(
queryset: QuerySet,
*,
@ -24,7 +30,6 @@ def make_paginated_response(
serializer_params.setdefault('many', True)
if response_type is None:
from rest_framework.response import Response
response_type = Response
if request is None:
@ -45,3 +50,37 @@ def make_paginated_response(
serializer = serializer_type(queryset, **serializer_params)
return response_type(serializer.data)
def reverse(viewname, *, args=None, kwargs=None,
query_params: Optional[Dict[str, str]] = None,
request: Optional[HttpRequest] = None,
) -> str:
"""
The same as rest_framework's reverse(), but adds custom query params support.
The original request can be passed in the 'request' parameter to
return absolute URLs.
"""
url = _reverse(viewname, args, kwargs, request)
if query_params:
return f'{url}?{urlencode(query_params)}'
return url
def build_field_filter_params(field: str, value: Any) -> Dict[str, str]:
"""
Builds a collection filter query params for a single field and value.
"""
return { field: value }
def get_list_view_name(model):
# Implemented after
# rest_framework/utils/field_mapping.py.get_detail_view_name()
"""
Given a model class, return the view name to use for URL relationships
that refer to instances of the model.
"""
return '%(model_name)s-list' % {
'model_name': model._meta.object_name.lower()
}

@ -54,7 +54,7 @@ from cvat.apps.engine.serializers import (
AboutSerializer, AnnotationFileSerializer, BasicUserSerializer,
DataMetaReadSerializer, DataMetaWriteSerializer, DataSerializer, ExceptionSerializer,
FileInfoSerializer, JobReadSerializer, JobWriteSerializer, LabeledDataSerializer,
LogEventSerializer, ProjectReadSerializer, ProjectWriteSerializer, ProjectSearchSerializer,
LogEventSerializer, ProjectReadSerializer, ProjectWriteSerializer,
RqStatusSerializer, TaskReadSerializer, TaskWriteSerializer, UserSerializer, PluginsSerializer, IssueReadSerializer,
IssueWriteSerializer, CommentReadSerializer, CommentWriteSerializer, CloudStorageWriteSerializer,
CloudStorageReadSerializer, DatasetFileSerializer, JobCommitSerializer,
@ -236,16 +236,13 @@ class ServerViewSet(viewsets.ViewSet):
@extend_schema(tags=['projects'])
@extend_schema_view(
list=extend_schema(
summary='Returns a paginated list of projects according to query parameters (12 projects per page)',
summary='Returns a paginated list of projects',
responses={
'200': PolymorphicProxySerializer(component_name='PolymorphicProject',
serializers=[
ProjectReadSerializer, ProjectSearchSerializer,
], resource_type_field_name=None, many=True),
'200': ProjectReadSerializer(many=True),
}),
create=extend_schema(
summary='Method creates a new project',
# request=ProjectWriteSerializer,
request=ProjectWriteSerializer,
responses={
'201': ProjectReadSerializer, # check ProjectWriteSerializer.to_representation
}),
@ -261,7 +258,7 @@ class ServerViewSet(viewsets.ViewSet):
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in a project',
# request=ProjectWriteSerializer,
request=ProjectWriteSerializer(partial=True),
responses={
'200': ProjectReadSerializer, # check ProjectWriteSerializer.to_representation
})
@ -270,28 +267,28 @@ class ProjectViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, CreateModelMixin, DestroyModelMixin,
PartialUpdateModelMixin, UploadMixin, AnnotationMixin, SerializeMixin
):
queryset = models.Project.objects.select_related('assignee', 'owner',
'target_storage', 'source_storage').prefetch_related(
queryset = models.Project.objects.select_related(
'assignee', 'owner', 'target_storage', 'source_storage'
).prefetch_related(
'tasks', 'label_set__sublabels__attributespec_set',
'label_set__attributespec_set')
'label_set__attributespec_set'
).all()
# NOTE: The search_fields attribute should be a list of names of text
# type fields on the model,such as CharField or TextField
search_fields = ('name', 'owner', 'assignee', 'status')
filter_fields = list(search_fields) + ['id', 'updated_date']
ordering_fields = filter_fields
simple_filters = list(search_fields)
ordering_fields = list(filter_fields)
ordering = "-id"
lookup_fields = {'owner': 'owner__username', 'assignee': 'assignee__username'}
iam_organization_field = 'organization'
def get_serializer_class(self):
if self.request.path.endswith('tasks'):
return TaskReadSerializer
if self.request.method in SAFE_METHODS:
return ProjectReadSerializer
else:
if self.request.method in SAFE_METHODS:
return ProjectReadSerializer
else:
return ProjectWriteSerializer
return ProjectWriteSerializer
def get_queryset(self):
queryset = super().get_queryset()
@ -307,21 +304,6 @@ class ProjectViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
organization=self.request.iam_context['organization']
)
@extend_schema(
summary='Method returns information of the tasks of the project with the selected id',
responses=TaskReadSerializer(many=True)) # Duplicate to still get 'list' op. nam
@action(detail=True, methods=['GET'], serializer_class=TaskReadSerializer,
pagination_class=viewsets.GenericViewSet.pagination_class,
# Remove regular list() parameters from the swagger schema.
# Unset, they would be taken from the enclosing class, which is wrong.
# https://drf-spectacular.readthedocs.io/en/latest/faq.html#my-action-is-erroneously-paginated-or-has-filter-parameters-that-i-do-not-want
filter_fields=None, search_fields=None, ordering_fields=None)
def tasks(self, request, pk):
self.get_object() # force to call check_object_permissions
return make_paginated_response(Task.objects.filter(project_id=pk).order_by('-id'),
viewset=self, serializer_type=self.serializer_class) # from @action
@extend_schema(methods=['GET'], summary='Export project as a dataset in a specific format',
parameters=[
OpenApiParameter('format', description='Desired output format name\n'
@ -374,7 +356,7 @@ class ProjectViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET', 'POST', 'OPTIONS'], serializer_class=None,
url_path=r'dataset/?$')
def dataset(self, request, pk):
self._object = self.get_object() # force to call check_object_permissions
self._object = self.get_object() # force call of check_object_permissions()
rq_id = f"import:dataset-for-project.id{pk}-by-{request.user}"
if request.method in {'POST', 'OPTIONS'}:
@ -514,7 +496,7 @@ class ProjectViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET'],
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
self._object = self.get_object() # force to call check_object_permissions
self._object = self.get_object() # force call of check_object_permissions()
return self.export_annotations(
request=request,
pk=pk,
@ -707,7 +689,7 @@ class DataChunkGetter:
@extend_schema(tags=['tasks'])
@extend_schema_view(
list=extend_schema(
summary='Returns a paginated list of tasks according to query parameters (10 tasks per page)',
summary='Returns a paginated list of tasks',
responses={
'200': TaskReadSerializer(many=True),
}),
@ -738,16 +720,29 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, CreateModelMixin, DestroyModelMixin,
PartialUpdateModelMixin, UploadMixin, AnnotationMixin, SerializeMixin
):
queryset = Task.objects.all().select_related('data', 'assignee', 'owner',
'target_storage', 'source_storage').prefetch_related(
queryset = Task.objects.select_related(
'data', 'assignee', 'owner',
'target_storage', 'source_storage'
).prefetch_related(
'segment_set__job_set__assignee', 'label_set__attributespec_set',
'project__label_set__attributespec_set',
'label_set__sublabels__attributespec_set',
'project__label_set__sublabels__attributespec_set')
lookup_fields = {'project_name': 'project__name', 'owner': 'owner__username', 'assignee': 'assignee__username'}
search_fields = ('project_name', 'name', 'owner', 'status', 'assignee', 'subset', 'mode', 'dimension')
'project__label_set__sublabels__attributespec_set'
).all()
lookup_fields = {
'project_name': 'project__name',
'owner': 'owner__username',
'assignee': 'assignee__username',
'tracker_link': 'bug_tracker',
}
search_fields = (
'project_name', 'name', 'owner', 'status', 'assignee',
'subset', 'mode', 'dimension', 'tracker_link'
)
filter_fields = list(search_fields) + ['id', 'project_id', 'updated_date']
ordering_fields = filter_fields
simple_filters = list(search_fields) + ['project_id']
ordering_fields = list(filter_fields)
ordering = "-id"
iam_organization_field = 'organization'
@ -846,19 +841,6 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
db_project.save()
assert serializer.instance.organization == db_project.organization
@extend_schema(summary='Method returns a list of jobs for a specific task',
responses=JobReadSerializer(many=True)) # Duplicate to still get 'list' op. name
@action(detail=True, methods=['GET'], serializer_class=JobReadSerializer,
pagination_class=viewsets.GenericViewSet.pagination_class,
# Remove regular list() parameters from the swagger schema.
# Unset, they would be taken from the enclosing class, which is wrong.
# https://drf-spectacular.readthedocs.io/en/latest/faq.html#my-action-is-erroneously-paginated-or-has-filter-parameters-that-i-do-not-want
filter_fields=None, search_fields=None, ordering_fields=None)
def jobs(self, request, pk):
self.get_object() # force to call check_object_permissions
return make_paginated_response(Job.objects.filter(segment__task_id=pk).order_by('id'),
viewset=self, serializer_type=self.serializer_class) # from @action
# UploadMixin method
def get_upload_dir(self):
if 'annotations' in self.action:
@ -1096,7 +1078,7 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH', 'POST', 'OPTIONS'], url_path=r'annotations/?$',
serializer_class=None)
def annotations(self, request, pk):
self._object = self.get_object() # force to call check_object_permissions
self._object = self.get_object() # force call of check_object_permissions()
if request.method == 'GET':
if self._object.data:
return self.export_annotations(
@ -1182,7 +1164,7 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
})
@action(detail=True, methods=['GET'], serializer_class=RqStatusSerializer)
def status(self, request, pk):
self.get_object() # force to call check_object_permissions
self.get_object() # force call of check_object_permissions()
response = self._get_rq_response(
queue=settings.CVAT_QUEUES.IMPORT_DATA.value,
job_id=f"create:task.id{pk}-by-{request.user}"
@ -1286,7 +1268,7 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
self._object = self.get_object() # force to call check_object_permissions
self._object = self.get_object() # force call of check_object_permissions()
if self._object.data:
return self.export_annotations(
@ -1329,13 +1311,13 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
'200': JobReadSerializer,
}),
list=extend_schema(
summary='Method returns a paginated list of jobs according to query parameters',
summary='Method returns a paginated list of jobs',
responses={
'200': JobReadSerializer(many=True),
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in a job',
request=JobWriteSerializer,
request=JobWriteSerializer(partial=True),
responses={
'200': JobReadSerializer, # check JobWriteSerializer.to_representation
})
@ -1344,16 +1326,19 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, PartialUpdateModelMixin, UploadMixin, AnnotationMixin
):
queryset = Job.objects.all().select_related('segment__task__data').prefetch_related(
queryset = Job.objects.select_related('segment__task__data').prefetch_related(
'segment__task__label_set', 'segment__task__project__label_set',
'segment__task__label_set__sublabels__attributespec_set',
'segment__task__project__label_set__sublabels__attributespec_set',
'segment__task__label_set__attributespec_set',
'segment__task__project__label_set__attributespec_set')
'segment__task__project__label_set__attributespec_set'
).all()
iam_organization_field = 'segment__task__organization'
search_fields = ('task_name', 'project_name', 'assignee', 'state', 'stage')
filter_fields = list(search_fields) + ['id', 'task_id', 'project_id', 'updated_date']
ordering_fields = filter_fields
filter_fields = list(search_fields) + ['id', 'task_id', 'project_id', 'updated_date', 'dimension']
simple_filters = list(set(filter_fields) - {'id', 'updated_date'})
ordering_fields = list(filter_fields)
ordering = "-id"
lookup_fields = {
'dimension': 'segment__task__dimension',
@ -1493,7 +1478,7 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET', 'DELETE', 'PUT', 'PATCH', 'POST', 'OPTIONS'], url_path=r'annotations/?$',
serializer_class=LabeledDataSerializer)
def annotations(self, request, pk):
self._object = self.get_object() # force to call check_object_permissions
self._object = self.get_object() # force call of check_object_permissions()
if request.method == 'GET':
return self.export_annotations(
request=request,
@ -1604,7 +1589,7 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET'], serializer_class=None,
url_path='dataset')
def dataset_export(self, request, pk):
self._object = self.get_object() # force to call check_object_permissions
self._object = self.get_object() # force call of check_object_permissions()
return self.export_annotations(
request=request,
@ -1614,19 +1599,6 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
callback=dm.views.export_job_as_dataset
)
@extend_schema(summary='Method returns list of issues for the job',
responses=IssueReadSerializer(many=True)) # Duplicate to still get 'list' op. name
@action(detail=True, methods=['GET'], serializer_class=IssueReadSerializer,
pagination_class=viewsets.GenericViewSet.pagination_class,
# Remove regular list() parameters from the swagger schema.
# Unset, they would be taken from the enclosing class, which is wrong.
# https://drf-spectacular.readthedocs.io/en/latest/faq.html#my-action-is-erroneously-paginated-or-has-filter-parameters-that-i-do-not-want
filter_fields=None, search_fields=None, ordering_fields=None)
def issues(self, request, pk):
self.get_object() # force to call check_object_permissions
return make_paginated_response(Issue.objects.filter(job_id=pk).order_by('id'),
viewset=self, serializer_type=self.serializer_class) # from @action
@extend_schema(summary='Method returns data for a specific job',
parameters=[
OpenApiParameter('type', description='Specifies the type of the requested data',
@ -1667,7 +1639,7 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@action(detail=True, methods=['GET', 'PATCH'], serializer_class=DataMetaReadSerializer,
url_path='data/meta')
def metadata(self, request, pk):
self.get_object() # force to call check_object_permissions
self.get_object() # force call of check_object_permissions()
db_job = models.Job.objects.prefetch_related(
'segment',
'segment__task',
@ -1730,12 +1702,13 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
responses=JobCommitSerializer(many=True)) # Duplicate to still get 'list' op. name
@action(detail=True, methods=['GET'], serializer_class=JobCommitSerializer,
pagination_class=viewsets.GenericViewSet.pagination_class,
# These non-root list endpoints do not suppose extra options, just the basic output
# Remove regular list() parameters from the swagger schema.
# Unset, they would be taken from the enclosing class, which is wrong.
# https://drf-spectacular.readthedocs.io/en/latest/faq.html#my-action-is-erroneously-paginated-or-has-filter-parameters-that-i-do-not-want
filter_fields=None, search_fields=None, ordering_fields=None)
filter_fields=None, ordering_fields=None, search_fields=None, simple_filters=None)
def commits(self, request, pk):
self.get_object() # force to call check_object_permissions
self.get_object() # force call of check_object_permissions()
return make_paginated_response(JobCommit.objects.filter(job_id=pk).order_by('-id'),
viewset=self, serializer_type=self.serializer_class) # from @action
@ -1765,13 +1738,13 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
'200': IssueReadSerializer,
}),
list=extend_schema(
summary='Method returns a paginated list of issues according to query parameters',
summary='Method returns a paginated list of issues',
responses={
'200': IssueReadSerializer(many=True),
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in an issue',
request=IssueWriteSerializer,
request=IssueWriteSerializer(partial=True),
responses={
'200': IssueReadSerializer, # check IssueWriteSerializer.to_representation
}),
@ -1791,17 +1764,22 @@ class IssueViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, CreateModelMixin, DestroyModelMixin,
PartialUpdateModelMixin
):
queryset = Issue.objects.all().order_by('-id')
queryset = Issue.objects.prefetch_related(
'job__segment__task', 'owner', 'assignee', 'job'
).all()
iam_organization_field = 'job__segment__task__organization'
search_fields = ('owner', 'assignee')
filter_fields = list(search_fields) + ['id', 'job_id', 'task_id', 'resolved']
filter_fields = list(search_fields) + ['id', 'job_id', 'task_id', 'resolved', 'frame_id']
simple_filters = list(search_fields) + ['job_id', 'task_id', 'resolved', 'frame_id']
ordering_fields = list(filter_fields)
lookup_fields = {
'owner': 'owner__username',
'assignee': 'assignee__username',
'job_id': 'job__id',
'job_id': 'job',
'task_id': 'job__segment__task__id',
'frame_id': 'frame',
}
ordering_fields = filter_fields
ordering = '-id'
def get_queryset(self):
@ -1821,19 +1799,6 @@ class IssueViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
def perform_create(self, serializer, **kwargs):
super().perform_create(serializer, owner=self.request.user)
@extend_schema(summary='The action returns all comments of a specific issue',
responses=CommentReadSerializer(many=True)) # Duplicate to still get 'list' op. name
@action(detail=True, methods=['GET'], serializer_class=CommentReadSerializer,
pagination_class=viewsets.GenericViewSet.pagination_class,
# Remove regular list() parameters from the swagger schema.
# Unset, they would be taken from the enclosing class, which is wrong.
# https://drf-spectacular.readthedocs.io/en/latest/faq.html#my-action-is-erroneously-paginated-or-has-filter-parameters-that-i-do-not-want
filter_fields=None, search_fields=None, ordering_fields=None)
def comments(self, request, pk):
self.get_object() # force to call check_object_permissions
return make_paginated_response(Comment.objects.filter(issue_id=pk).order_by('-id'),
viewset=self, serializer_type=self.serializer_class) # from @action
@extend_schema(tags=['comments'])
@extend_schema_view(
retrieve=extend_schema(
@ -1842,13 +1807,13 @@ class IssueViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
'200': CommentReadSerializer,
}),
list=extend_schema(
summary='Method returns a paginated list of comments according to query parameters',
summary='Method returns a paginated list of comments',
responses={
'200':CommentReadSerializer(many=True),
'200': CommentReadSerializer(many=True),
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in a comment',
request=CommentWriteSerializer,
request=CommentWriteSerializer(partial=True),
responses={
'200': CommentReadSerializer, # check CommentWriteSerializer.to_representation
}),
@ -1868,13 +1833,22 @@ class CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, CreateModelMixin, DestroyModelMixin,
PartialUpdateModelMixin
):
queryset = Comment.objects.all().order_by('-id')
queryset = Comment.objects.prefetch_related(
'issue', 'issue__job', 'owner'
).all()
iam_organization_field = 'issue__job__segment__task__organization'
search_fields = ('owner',)
filter_fields = list(search_fields) + ['id', 'issue_id']
ordering_fields = filter_fields
filter_fields = list(search_fields) + ['id', 'issue_id', 'frame_id', 'job_id']
simple_filters = list(search_fields) + ['issue_id', 'frame_id', 'job_id']
ordering_fields = list(filter_fields)
ordering = '-id'
lookup_fields = {'owner': 'owner__username', 'issue_id': 'issue__id'}
lookup_fields = {
'owner': 'owner__username',
'issue_id': 'issue__id',
'job_id': 'issue__job__id',
'frame_id': 'issue__frame',
}
def get_queryset(self):
queryset = super().get_queryset()
@ -1896,7 +1870,7 @@ class CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@extend_schema(tags=['users'])
@extend_schema_view(
list=extend_schema(
summary='Method provides a paginated list of users registered on the server',
summary='Method returns a paginated list of users',
responses={
'200': PolymorphicProxySerializer(component_name='MetaUser',
serializers=[
@ -1916,7 +1890,7 @@ class CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
responses={
'200': PolymorphicProxySerializer(component_name='MetaUser',
serializers=[
UserSerializer, BasicUserSerializer,
UserSerializer(partial=True), BasicUserSerializer(partial=True),
], resource_type_field_name=None),
}),
destroy=extend_schema(
@ -1928,11 +1902,12 @@ class CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, PartialUpdateModelMixin, mixins.DestroyModelMixin):
queryset = User.objects.prefetch_related('groups').all()
search_fields = ('username', 'first_name', 'last_name')
iam_organization_field = 'memberships__organization'
filter_fields = ('id', 'is_active', 'username')
ordering_fields = filter_fields
search_fields = ('username', 'first_name', 'last_name')
filter_fields = list(search_fields) + ['id', 'is_active']
simple_filters = list(search_fields) + ['is_active']
ordering_fields = list(filter_fields)
ordering = "-id"
def get_queryset(self):
@ -1983,7 +1958,7 @@ class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
'200': CloudStorageReadSerializer,
}),
list=extend_schema(
summary='Returns a paginated list of storages according to query parameters',
summary='Returns a paginated list of storages',
responses={
'200': CloudStorageReadSerializer(many=True),
}),
@ -1994,7 +1969,7 @@ class UserViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in a cloud storage instance',
request=CloudStorageWriteSerializer,
request=CloudStorageWriteSerializer(partial=True),
responses={
'200': CloudStorageReadSerializer, # check CloudStorageWriteSerializer.to_representation
}),
@ -2009,14 +1984,15 @@ class CloudStorageViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
mixins.RetrieveModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin,
PartialUpdateModelMixin
):
queryset = CloudStorageModel.objects.all().prefetch_related('data')
queryset = CloudStorageModel.objects.prefetch_related('data').all()
search_fields = ('provider_type', 'display_name', 'resource',
search_fields = ('provider_type', 'name', 'resource',
'credentials_type', 'owner', 'description')
filter_fields = list(search_fields) + ['id']
ordering_fields = filter_fields
simple_filters = list(set(search_fields) - {'description'})
ordering_fields = list(filter_fields)
ordering = "-id"
lookup_fields = {'owner': 'owner__username'}
lookup_fields = {'owner': 'owner__username', 'name': 'display_name'}
iam_organization_field = 'organization'
def get_serializer_class(self):

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -1048,7 +1048,7 @@ class Issue4996_Cases(_LambdaTestCaseBase):
jobs = get_paginated_collection(lambda page:
self._get_request(
f"/api/tasks/{self.task['id']}/jobs?page={page}",
f"/api/jobs?task_id={self.task['id']}&page={page}",
self.admin, org_id=self.org['id']
)
)

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT

@ -1,4 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -23,13 +24,12 @@ class OrganizationWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ['id', 'slug', 'name', 'description', 'created_date',
'updated_date', 'contact', 'owner']
fields = ['slug', 'name', 'description', 'contact', 'owner']
# TODO: at the moment isn't possible to change the owner. It should
# be a separate feature. Need to change it together with corresponding
# Membership. Also such operation should be well protected.
read_only_fields = ['created_date', 'updated_date', 'owner']
read_only_fields = ['owner']
def create(self, validated_data):
organization = super().create(validated_data)

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -27,19 +27,21 @@ from .serializers import (
'200': OrganizationReadSerializer,
}),
list=extend_schema(
summary='Method returns a paginated list of organizations according to query parameters',
summary='Method returns a paginated list of organizations',
responses={
'200': OrganizationReadSerializer(many=True),
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in an organization',
request=OrganizationWriteSerializer(partial=True),
responses={
'200': OrganizationWriteSerializer,
'200': OrganizationReadSerializer, # check OrganizationWriteSerializer.to_representation
}),
create=extend_schema(
summary='Method creates an organization',
request=OrganizationWriteSerializer,
responses={
'201': OrganizationWriteSerializer,
'201': OrganizationReadSerializer, # check OrganizationWriteSerializer.to_representation
}),
destroy=extend_schema(
summary='Method deletes an organization',
@ -57,8 +59,9 @@ class OrganizationViewSet(viewsets.GenericViewSet,
queryset = Organization.objects.all()
search_fields = ('name', 'owner')
filter_fields = list(search_fields) + ['id', 'slug']
simple_filters = list(search_fields) + ['slug']
lookup_fields = {'owner': 'owner__username'}
ordering_fields = filter_fields
ordering_fields = list(filter_fields)
ordering = '-id'
http_method_names = ['get', 'post', 'patch', 'delete', 'head', 'options']
iam_organization_field = None
@ -92,14 +95,15 @@ class OrganizationViewSet(viewsets.GenericViewSet,
'200': MembershipReadSerializer,
}),
list=extend_schema(
summary='Method returns a paginated list of memberships according to query parameters',
summary='Method returns a paginated list of memberships',
responses={
'200': MembershipReadSerializer(many=True),
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in a membership',
request=MembershipWriteSerializer(partial=True),
responses={
'200': MembershipWriteSerializer,
'200': MembershipReadSerializer, # check MembershipWriteSerializer.to_representation
}),
destroy=extend_schema(
summary='Method deletes a membership',
@ -112,10 +116,11 @@ class MembershipViewSet(mixins.RetrieveModelMixin, DestroyModelMixin,
queryset = Membership.objects.all()
ordering = '-id'
http_method_names = ['get', 'patch', 'delete', 'head', 'options']
search_fields = ('user_name', 'role')
filter_fields = list(search_fields) + ['id', 'user']
ordering_fields = filter_fields
lookup_fields = {'user': 'user__id', 'user_name': 'user__username'}
search_fields = ('user', 'role')
filter_fields = list(search_fields) + ['id']
simple_filters = list(search_fields)
ordering_fields = list(filter_fields)
lookup_fields = {'user': 'user__username'}
iam_organization_field = 'organization'
def get_serializer_class(self):
@ -137,24 +142,21 @@ class MembershipViewSet(mixins.RetrieveModelMixin, DestroyModelMixin,
'200': InvitationReadSerializer,
}),
list=extend_schema(
summary='Method returns a paginated list of invitations according to query parameters',
summary='Method returns a paginated list of invitations',
responses={
'200': InvitationReadSerializer(many=True),
}),
update=extend_schema(
summary='Method updates an invitation by id',
responses={
'200': InvitationWriteSerializer,
}),
partial_update=extend_schema(
summary='Methods does a partial update of chosen fields in an invitation',
request=InvitationWriteSerializer(partial=True),
responses={
'200': InvitationWriteSerializer,
'200': InvitationReadSerializer, # check InvitationWriteSerializer.to_representation
}),
create=extend_schema(
summary='Method creates an invitation',
request=InvitationWriteSerializer,
responses={
'201': InvitationWriteSerializer,
'201': InvitationReadSerializer, # check InvitationWriteSerializer.to_representation
}),
destroy=extend_schema(
summary='Method deletes an invitation',
@ -165,7 +167,7 @@ class MembershipViewSet(mixins.RetrieveModelMixin, DestroyModelMixin,
class InvitationViewSet(viewsets.GenericViewSet,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
PartialUpdateModelMixin,
CreateModelMixin,
DestroyModelMixin,
):
@ -174,7 +176,8 @@ class InvitationViewSet(viewsets.GenericViewSet,
iam_organization_field = 'membership__organization'
search_fields = ('owner',)
filter_fields = search_fields
filter_fields = list(search_fields)
simple_filters = list(search_fields)
ordering_fields = list(filter_fields) + ['created_date']
ordering = '-created_date'
lookup_fields = {'owner': 'owner__username'}

@ -1,4 +1,4 @@
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -93,6 +93,9 @@ class WebhookReadSerializer(serializers.ModelSerializer):
"last_delivery_date",
)
read_only_fields = fields
extra_kwargs = {
"organization": {"allow_null": True},
}
class WebhookWriteSerializer(WriteOnceMixin, serializers.ModelSerializer):

@ -1,4 +1,4 @@
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -14,6 +14,7 @@ from rest_framework.decorators import action
from rest_framework.permissions import SAFE_METHODS
from rest_framework.response import Response
from cvat.apps.engine.view_utils import make_paginated_response
from cvat.apps.iam.permissions import WebhookPermission
from .event_type import AllEvents, OrganizationEvents, ProjectEvents
@ -40,17 +41,23 @@ from .signals import signal_ping, signal_redelivery
update=extend_schema(
summary="Method updates a webhook by id",
request=WebhookWriteSerializer,
responses={"200": WebhookReadSerializer}, # check WebhookWriteSerializer.to_representation
responses={
"200": WebhookReadSerializer
}, # check WebhookWriteSerializer.to_representation
),
partial_update=extend_schema(
summary="Methods does a partial update of chosen fields in a webhook",
request=WebhookWriteSerializer,
responses={"200": WebhookReadSerializer}, # check WebhookWriteSerializer.to_representation
responses={
"200": WebhookReadSerializer
}, # check WebhookWriteSerializer.to_representation
),
create=extend_schema(
request=WebhookWriteSerializer,
summary="Method creates a webhook",
responses={"201": WebhookReadSerializer} # check WebhookWriteSerializer.to_representation
responses={
"201": WebhookReadSerializer
}, # check WebhookWriteSerializer.to_representation
),
destroy=extend_schema(
summary="Method deletes a webhook",
@ -58,22 +65,25 @@ from .signals import signal_ping, signal_redelivery
),
)
class WebhookViewSet(viewsets.ModelViewSet):
queryset = Webhook.objects.all()
queryset = Webhook.objects.prefetch_related("owner").all()
ordering = "-id"
http_method_names = ["get", "post", "delete", "patch", "put"]
search_fields = ("target_url", "owner", "type", "description")
filter_fields = list(search_fields) + ["id", "project_id", "updated_date"]
ordering_fields = filter_fields
simple_filters = list(set(search_fields) - {"description"} | {"project_id"})
ordering_fields = list(filter_fields)
lookup_fields = {"owner": "owner__username"}
iam_organization_field = "organization"
def get_serializer_class(self):
# Early exit for drf-spectacular compatibility
if getattr(self, 'swagger_fake_view', False):
if getattr(self, "swagger_fake_view", False):
return WebhookReadSerializer
if self.request.path.endswith("redelivery") or self.request.path.endswith("ping"):
if self.request.path.endswith("redelivery") or self.request.path.endswith(
"ping"
):
return None
else:
if self.request.method in SAFE_METHODS:
@ -128,29 +138,29 @@ class WebhookViewSet(viewsets.ModelViewSet):
@extend_schema(
summary="Method return a list of deliveries for a specific webhook",
responses={"200": WebhookDeliveryReadSerializer(many=True)},
responses=WebhookDeliveryReadSerializer(
many=True
), # Duplicate to still get 'list' op. name
)
@action(
detail=True, methods=["GET"], serializer_class=WebhookDeliveryReadSerializer
detail=True,
methods=["GET"],
serializer_class=WebhookDeliveryReadSerializer,
pagination_class=viewsets.GenericViewSet.pagination_class,
# These non-root list endpoints do not suppose extra options, just the basic output
# Remove regular list() parameters from the swagger schema.
# Unset, they would be taken from the enclosing class, which is wrong.
# https://drf-spectacular.readthedocs.io/en/latest/faq.html#my-action-is-erroneously-paginated-or-has-filter-parameters-that-i-do-not-want
filter_fields=None, ordering_fields=None, search_fields=None, simple_filters=None,
)
def deliveries(self, request, pk):
self.get_object()
self.get_object() # force call of check_object_permissions()
queryset = WebhookDelivery.objects.filter(webhook_id=pk).order_by(
"-updated_date"
)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = WebhookDeliveryReadSerializer(
page, many=True, context={"request": request}
)
return self.get_paginated_response(serializer.data)
serializer = WebhookDeliveryReadSerializer(
queryset, many=True, context={"request": request}
)
return Response(serializer.data)
return make_paginated_response(
queryset, viewset=self, serializer_type=self.serializer_class
) # from @action
@extend_schema(
summary="Method return a specific delivery for a specific webhook",
@ -163,22 +173,23 @@ class WebhookViewSet(viewsets.ModelViewSet):
serializer_class=WebhookDeliveryReadSerializer,
)
def retrieve_delivery(self, request, pk, delivery_id):
self.get_object()
self.get_object() # force call of check_object_permissions()
queryset = WebhookDelivery.objects.get(webhook_id=pk, id=delivery_id)
serializer = WebhookDeliveryReadSerializer(
queryset, context={"request": request}
)
return Response(serializer.data)
@extend_schema(summary="Method redeliver a specific webhook delivery",
@extend_schema(
summary="Method redeliver a specific webhook delivery",
request=None,
responses={200: None}
responses={200: None},
)
@action(
detail=True,
methods=["POST"],
url_path=r"deliveries/(?P<delivery_id>\d+)/redelivery",
serializer_class=None
serializer_class=None,
)
def redelivery(self, request, pk, delivery_id):
delivery = WebhookDelivery.objects.get(webhook_id=pk, id=delivery_id)
@ -196,7 +207,7 @@ class WebhookViewSet(viewsets.ModelViewSet):
detail=True, methods=["POST"], serializer_class=WebhookDeliveryReadSerializer
)
def ping(self, request, pk):
instance = self.get_object()
instance = self.get_object() # force call of check_object_permissions()
serializer = WebhookReadSerializer(instance, context={"request": request})
delivery = signal_ping.send(sender=self, serializer=serializer)[0][1]

@ -1,5 +1,5 @@
# Copyright (C) 2018-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -114,6 +114,7 @@ INSTALLED_APPS = [
"dj_rest_auth",
'dj_rest_auth.registration',
'dj_pagination',
'django_filters',
'rest_framework',
'rest_framework.authtoken',
'drf_spectacular',
@ -174,10 +175,12 @@ REST_FRAMEWORK = {
'cvat.apps.engine.pagination.CustomPagination',
'PAGE_SIZE': 10,
'DEFAULT_FILTER_BACKENDS': (
'cvat.apps.engine.filters.SimpleFilter',
'cvat.apps.engine.filters.SearchFilter',
'cvat.apps.engine.filters.OrderingFilter',
'cvat.apps.engine.filters.JsonLogicFilter',
'cvat.apps.iam.filters.OrganizationFilterBackend'),
'cvat.apps.iam.filters.OrganizationFilterBackend',
),
'SEARCH_PARAM': 'search',
# Disable default handling of the 'format' query parameter by REST framework

@ -1,4 +1,5 @@
// Copyright (C) 2022 Intel Corporation
// Copyright (C) 2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
@ -81,6 +82,7 @@ context('Paste labels from one task to another.', { browser: '!firefox' }, () =>
});
cy.wait('@patchTaskLabels').its('response.statusCode').should('equal', 200);
cy.get('.cvat-modal-confirm-remove-existing-labels').should('not.exist');
cy.get('.cvat-spinner').should('not.exist');
cy.get('.cvat-raw-labels-viewer').then((raw) => {
expect(raw.text()).contain('"id":');
});

@ -257,6 +257,7 @@ Cypress.Commands.add('openTask', (taskName, projectSubsetFieldValue) => {
cy.contains('strong', new RegExp(`^${taskName}$`))
.parents('.cvat-tasks-list-item')
.contains('a', 'Open').click({ force: true });
cy.get('.cvat-spinner').should('not.exist');
cy.get('.cvat-task-details').should('exist');
if (projectSubsetFieldValue) {
cy.get('.cvat-project-subset-field').find('input').should('have.attr', 'value', projectSubsetFieldValue);
@ -282,6 +283,8 @@ Cypress.Commands.add('getJobNum', (jobID) => {
});
Cypress.Commands.add('openJob', (jobID = 0, removeAnnotations = true, expectedFail = false) => {
cy.get('.cvat-task-job-list').should('exist');
cy.get('.cvat-task-jobs-table-row').should('exist');
cy.getJobNum(jobID).then(($job) => {
cy.get('.cvat-task-jobs-table-row').contains('a', `Job #${$job}`).click();
});

@ -1,4 +1,5 @@
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2023 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
@ -152,6 +153,7 @@ Cypress.Commands.add('createIssueFromControlButton', (createIssueParams) => {
cy.get('[type="submit"]').click();
});
cy.wait('@issues').its('response.statusCode').should('equal', 201);
cy.get('.cvat-create-issue-dialog').should('not.exist');
cy.checkIssueRegion();
});

@ -1,17 +1,22 @@
# Copyright (C) 2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import io
from http import HTTPStatus
from typing import List
import pytest
from cvat_sdk.api_client import ApiClient, models
from cvat_sdk.api_client.api_client import Endpoint
from deepdiff import DeepDiff
from PIL import Image
from shared.utils.config import get_method, patch_method, post_method
from .utils import CollectionSimpleFilterTestBase
# https://docs.pytest.org/en/7.1.x/example/markers.html#marking-whole-classes-or-modules
pytestmark = [pytest.mark.with_external_services]
@ -97,6 +102,35 @@ class TestGetCloudStorage:
self._test_cannot_see(username, storage_id, org_id=org_id)
class TestCloudStoragesListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
"name": ["display_name"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, cloud_storages):
self.user = admin_user
self.samples = cloud_storages
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.cloudstorages_api.list_endpoint
def _retrieve_collection(self, **kwargs) -> List:
# TODO: fix invalid serializer schema for manifests
results = super()._retrieve_collection(_parse_response=False, return_json=True, **kwargs)
for r in results:
r["manifests"] = [{"filename": m} for m in r["manifests"]]
return [models.CloudStorageRead._from_openapi_data(**r) for r in results]
@pytest.mark.parametrize(
"field",
("provider_type", "name", "resource", "credentials_type", "owner"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
@pytest.mark.usefixtures("restore_db_per_function")
class TestPostCloudStorage:
_SPEC = {

@ -1,14 +1,17 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from http import HTTPStatus
import pytest
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from shared.utils.config import post_method
from .utils import CollectionSimpleFilterTestBase
@pytest.mark.usefixtures("restore_db_per_function")
class TestCreateInvitations:
@ -84,3 +87,28 @@ class TestCreateInvitations:
{"role": "owner", "email": non_member_users[4]["email"]},
org_id=org_id,
)
class TestInvitationsListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, invitations):
self.user = admin_user
self.samples = invitations
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.invitations_api.list_endpoint
@pytest.mark.parametrize(
"field",
("owner",),
)
def test_can_use_simple_filter_for_object_list(self, field):
value, gt_objects = self._get_field_samples(field)
received_items = self._retrieve_collection(**{field: str(value)})
assert set(p["key"] for p in gt_objects) == set(p.key for p in received_items)

@ -1,19 +1,23 @@
# Copyright (C) 2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import json
from copy import deepcopy
from http import HTTPStatus
from typing import Any, Dict, List, Tuple
import pytest
from cvat_sdk import models
from cvat_sdk.api_client import exceptions
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from deepdiff import DeepDiff
from shared.utils.config import make_api_client
from .utils import CollectionSimpleFilterTestBase
@pytest.mark.usefixtures("restore_db_per_function")
class TestPostIssues:
@ -30,7 +34,11 @@ class TestPostIssues:
assert response.status == HTTPStatus.CREATED
response_json = json.loads(response.data)
assert user == response_json["owner"]["username"]
assert data["message"] == response_json["comments"][0]["message"]
with make_api_client(user) as client:
(comments, _) = client.comments_api.list(issue_id=str(response_json["id"]))
assert data["message"] == comments.results[0].message
assert (
DeepDiff(
data,
@ -123,10 +131,11 @@ class TestPostIssues:
@pytest.mark.usefixtures("restore_db_per_function")
class TestPatchIssues:
def _test_check_response(self, user, issue_id, data, is_allow, **kwargs):
request_data, expected_response_data = data
with make_api_client(user) as client:
(_, response) = client.issues_api.partial_update(
issue_id,
patched_issue_write_request=models.PatchedIssueWriteRequest(**data),
patched_issue_write_request=models.PatchedIssueWriteRequest(**request_data),
**kwargs,
_parse_response=False,
_check_status=False,
@ -136,7 +145,7 @@ class TestPatchIssues:
assert response.status == HTTPStatus.OK
assert (
DeepDiff(
data,
expected_response_data,
json.loads(response.data),
exclude_regex_paths=r"root\['created_date|updated_date|comments|id|owner'\]",
)
@ -146,15 +155,28 @@ class TestPatchIssues:
assert response.status == HTTPStatus.FORBIDDEN
@pytest.fixture(scope="class")
def request_data(self, issues):
def get_data(issue_id):
data = deepcopy(issues[issue_id])
data["resolved"] = not data["resolved"]
data.pop("comments")
data.pop("updated_date")
data.pop("id")
data.pop("owner")
return data
def request_and_response_data(self, issues, users):
def get_data(issue_id, *, username: str = None):
request_data = deepcopy(issues[issue_id])
request_data["resolved"] = not request_data["resolved"]
response_data = deepcopy(request_data)
request_data.pop("comments")
request_data.pop("updated_date")
request_data.pop("id")
request_data.pop("owner")
if username:
assignee = next(u for u in users if u["username"] == username)
request_data["assignee"] = assignee["id"]
response_data["assignee"] = {
k: assignee[k] for k in ["id", "username", "url", "first_name", "last_name"]
}
else:
request_data["assignee"] = None
return request_data, response_data
return get_data
@ -183,13 +205,13 @@ class TestPatchIssues:
find_issue_staff_user,
find_users,
issues_by_org,
request_data,
request_and_response_data,
):
users = find_users(privilege=privilege)
issues = issues_by_org[org]
username, issue_id = find_issue_staff_user(issues, users, issue_staff, issue_admin)
data = request_data(issue_id)
data = request_and_response_data(issue_id, username=username)
self._test_check_response(username, issue_id, data, is_allow)
@pytest.mark.parametrize("org", [2])
@ -217,13 +239,13 @@ class TestPatchIssues:
find_issue_staff_user,
find_users,
issues_by_org,
request_data,
request_and_response_data,
):
users = find_users(role=role, org=org)
issues = issues_by_org[org]
username, issue_id = find_issue_staff_user(issues, users, issue_staff, issue_admin)
data = request_data(issue_id)
data = request_and_response_data(issue_id, username=username)
self._test_check_response(username, issue_id, data, is_allow, org_id=org)
@pytest.mark.xfail(
@ -326,3 +348,65 @@ class TestDeleteIssues:
username, issue_id = find_issue_staff_user(issues, users, issue_staff, issue_admin)
self._test_check_response(username, issue_id, expect_success, org_id=org)
class TestIssuesListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
"assignee": ["assignee", "username"],
"job_id": ["job"],
"frame_id": ["frame"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, issues):
self.user = admin_user
self.samples = issues
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.issues_api.list_endpoint
@pytest.mark.parametrize(
"field",
("owner", "assignee", "job_id", "resolved", "frame_id"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
class TestCommentsListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
"issue_id": ["issue"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, comments, issues):
self.user = admin_user
self.samples = comments
self.sample_issues = issues
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.comments_api.list_endpoint
def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]:
if field == "job_id":
issue_id, issue_comments = super()._get_field_samples("issue_id")
issue = next((s for s in self.sample_issues if s["id"] == issue_id))
return issue["job"], issue_comments
elif field == "frame_id":
frame_id = self._find_valid_field_value(self.sample_issues, ["frame"])
issues = [s["id"] for s in self.sample_issues if s["frame"] == frame_id]
comments = [
s for s in self.samples if self._get_field(s, self._map_field("issue_id")) in issues
]
return frame_id, comments
else:
return super()._get_field_samples(field)
@pytest.mark.parametrize(
"field",
("owner", "issue_id", "job_id", "frame_id"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -12,13 +12,14 @@ from io import BytesIO
from typing import List
import pytest
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from cvat_sdk.core.helpers import get_paginated_collection
from deepdiff import DeepDiff
from PIL import Image
from shared.utils.config import make_api_client
from .utils import export_dataset
from .utils import CollectionSimpleFilterTestBase, export_dataset
def get_job_staff(job, tasks, projects):
@ -146,6 +147,33 @@ class TestListJobs:
self._test_list_jobs_403(user["username"], **kwargs)
class TestJobsListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"assignee": ["assignee", "username"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, jobs):
self.user = admin_user
self.samples = jobs
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.jobs_api.list_endpoint
@pytest.mark.parametrize(
"field",
(
"assignee",
"state",
"stage",
"task_id",
"project_id",
),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
@pytest.mark.usefixtures("restore_db_per_class")
class TestGetAnnotations:
def _test_get_job_annotations_200(self, user, jid, data, **kwargs):

@ -1,15 +1,18 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from http import HTTPStatus
import pytest
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from deepdiff import DeepDiff
from shared.utils.config import get_method, patch_method
from .utils import CollectionSimpleFilterTestBase
@pytest.mark.usefixtures("restore_db_per_class")
class TestGetMemberships:
@ -44,6 +47,27 @@ class TestGetMemberships:
self._test_cannot_see_memberships(user, org_id=1)
class TestMembershipsListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"user": ["user", "username"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, memberships):
self.user = admin_user
self.samples = memberships
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.memberships_api.list_endpoint
@pytest.mark.parametrize(
"field",
("role", "user"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
@pytest.mark.usefixtures("restore_db_per_function")
class TestPatchMemberships:
_ORG = 2

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -7,10 +7,13 @@ from copy import deepcopy
from http import HTTPStatus
import pytest
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from deepdiff import DeepDiff
from shared.utils.config import delete_method, get_method, options_method, patch_method
from .utils import CollectionSimpleFilterTestBase
class TestMetadataOrganizations:
_ORG = 2
@ -76,6 +79,27 @@ class TestGetOrganizations:
assert response.status_code == HTTPStatus.NOT_FOUND
class TestOrganizationsListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, organizations):
self.user = admin_user
self.samples = organizations
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.organizations_api.list_endpoint
@pytest.mark.parametrize(
"field",
("name", "owner", "slug"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
@pytest.mark.usefixtures("restore_db_per_function")
class TestPatchOrganizations:
_ORG = 2

@ -1,5 +1,5 @@
# Copyright (C) 2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -16,12 +16,13 @@ from typing import Dict, Optional
import pytest
from cvat_sdk.api_client import ApiClient, Configuration, models
from cvat_sdk.api_client.api_client import Endpoint
from deepdiff import DeepDiff
from PIL import Image
from shared.utils.config import BASE_URL, USER_PASS, get_method, make_api_client, patch_method
from .utils import export_dataset
from .utils import CollectionSimpleFilterTestBase, export_dataset
@pytest.mark.usefixtures("restore_db_per_class")
@ -132,6 +133,33 @@ class TestGetProjects:
self._test_response_200(user["username"], pid, org_id=user["org"])
class TestProjectsListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
"assignee": ["assignee", "username"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, projects):
self.user = admin_user
self.samples = projects
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.projects_api.list_endpoint
@pytest.mark.parametrize(
"field",
(
"name",
"owner",
"assignee",
"status",
),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
class TestGetProjectBackup:
def _test_can_get_project_backup(self, username, pid, **kwargs):
for _ in range(30):
@ -547,12 +575,13 @@ class TestImportExportDatasetProject:
"name": project["name"],
"tasks": [
{
"id": tid,
"name": (task := tasks[tid])["name"],
"id": task["id"],
"name": task["name"],
"size": str(task["size"]),
"mode": task["mode"],
}
for tid in project["tasks"]
for task in tasks
if task["project_id"] == project["id"]
],
}
@ -578,7 +607,7 @@ class TestImportExportDatasetProject:
self._test_import_project(username, project_id, "CVAT 1.1", import_data)
response = get_method(username, f"/projects/{project_id}/tasks")
response = get_method(username, f"/tasks", project_id=project_id)
assert response.status_code == HTTPStatus.OK
tasks = response.json()["results"]
@ -795,9 +824,9 @@ class TestGetProjectPreview:
project_with_assignee["assignee"]["username"], project_with_assignee["id"]
)
def test_project_preview_not_found(self, projects):
def test_project_preview_not_found(self, projects, tasks):
for p in projects:
if p["tasks"]:
if any(t["project_id"] == p["id"] for t in tasks):
continue
if p["owner"] is not None:
project_with_owner = p

@ -1,5 +1,5 @@
# Copyright (C) 2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -18,6 +18,7 @@ from time import sleep
import pytest
from cvat_sdk import Client, Config
from cvat_sdk.api_client import apis, models
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from cvat_sdk.core.helpers import get_paginated_collection
from cvat_sdk.core.proxies.tasks import ResourceType, Task
from deepdiff import DeepDiff
@ -28,7 +29,7 @@ from shared.fixtures.init import get_server_image_tag
from shared.utils.config import BASE_URL, USER_PASS, get_method, make_api_client, patch_method
from shared.utils.helpers import generate_image_files
from .utils import export_dataset
from .utils import CollectionSimpleFilterTestBase, export_dataset
def get_cloud_storage_content(username, cloud_storage_id, manifest):
@ -44,20 +45,13 @@ class TestGetTasks:
def _test_task_list_200(self, user, project_id, data, exclude_paths="", **kwargs):
with make_api_client(user) as api_client:
results = get_paginated_collection(
api_client.projects_api.list_tasks_endpoint,
api_client.tasks_api.list_endpoint,
return_json=True,
id=project_id,
project_id=str(project_id),
**kwargs,
)
assert DeepDiff(data, results, ignore_order=True, exclude_paths=exclude_paths) == {}
def _test_task_list_403(self, user, project_id, **kwargs):
with make_api_client(user) as api_client:
(_, response) = api_client.projects_api.list_tasks(
project_id, **kwargs, _parse_response=False, _check_status=False
)
assert response.status == HTTPStatus.FORBIDDEN
def _test_users_to_see_task_list(
self, project_id, tasks, users, is_staff, is_allow, is_project_staff, **kwargs
):
@ -68,10 +62,12 @@ class TestGetTasks:
assert len(users)
for user in users:
if is_allow:
self._test_task_list_200(user["username"], project_id, tasks, **kwargs)
else:
self._test_task_list_403(user["username"], project_id, **kwargs)
if not is_allow:
# Users outside project or org should not know if one exists.
# Thus, no error should be produced on a list request.
tasks = []
self._test_task_list_200(user["username"], project_id, tasks, **kwargs)
def _test_assigned_users_to_see_task_data(self, tasks, users, is_task_staff, **kwargs):
for task in tasks:
@ -154,6 +150,39 @@ class TestGetTasks:
self._test_assigned_users_to_see_task_data(tasks, users, is_task_staff, org=org["slug"])
class TestListTasksFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
"assignee": ["assignee", "username"],
"tracker_link": ["bug_tracker"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, tasks):
self.user = admin_user
self.samples = tasks
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.tasks_api.list_endpoint
@pytest.mark.parametrize(
"field",
(
"name",
"owner",
"status",
"assignee",
"subset",
"mode",
"dimension",
"project_id",
"tracker_link",
),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
@pytest.mark.usefixtures("restore_db_per_function")
class TestPostTasks:
def _test_create_task_201(self, user, spec, **kwargs):

@ -1,5 +1,5 @@
# Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -8,11 +8,14 @@ import typing
from http import HTTPStatus
import pytest
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from cvat_sdk.core.helpers import get_paginated_collection
from deepdiff import DeepDiff
from shared.utils.config import make_api_client
from .utils import CollectionSimpleFilterTestBase
@pytest.mark.usefixtures("restore_db_per_class")
class TestGetUsers:
@ -93,3 +96,20 @@ class TestGetUsers:
for member in org_members:
self._test_can_see(member, data, org="org1")
class TestUsersListFilters(CollectionSimpleFilterTestBase):
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, users):
self.user = admin_user
self.samples = users
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.users_api.list_endpoint
@pytest.mark.parametrize(
"field",
("is_active", "username"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)

@ -1,4 +1,4 @@
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
@ -7,10 +7,13 @@ from http import HTTPStatus
from itertools import product
import pytest
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from deepdiff import DeepDiff
from shared.utils.config import delete_method, get_method, patch_method, post_method
from .utils import CollectionSimpleFilterTestBase
@pytest.mark.usefixtures("restore_db_per_function")
class TestPostWebhooks:
@ -528,6 +531,27 @@ class TestGetWebhooks:
assert DeepDiff(webhook, response.json(), ignore_order=True) == {}
class TestWebhooksListFilters(CollectionSimpleFilterTestBase):
field_lookups = {
"owner": ["owner", "username"],
}
@pytest.fixture(autouse=True)
def setup(self, restore_db_per_class, admin_user, webhooks):
self.user = admin_user
self.samples = webhooks
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
return api_client.webhooks_api.list_endpoint
@pytest.mark.parametrize(
"field",
("target_url", "owner", "type", "project_id"),
)
def test_can_use_simple_filter_for_object_list(self, field):
return super().test_can_use_simple_filter_for_object_list(field)
@pytest.mark.usefixtures("restore_db_per_class")
class TestGetListWebhooks:
def test_can_get_webhooks_list(self, webhooks):

@ -1,13 +1,18 @@
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from abc import ABCMeta, abstractmethod
from http import HTTPStatus
from time import sleep
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Union
from cvat_sdk.api_client.api_client import Endpoint
from cvat_sdk.api_client.api_client import ApiClient, Endpoint
from cvat_sdk.core.helpers import get_paginated_collection
from urllib3 import HTTPResponse
from shared.utils.config import make_api_client
def export_dataset(
endpoint: Endpoint, *, max_retries: int = 20, interval: float = 0.1, **kwargs
@ -24,3 +29,63 @@ def export_dataset(
assert response.status == HTTPStatus.OK
return response
FieldPath = Sequence[str]
class CollectionSimpleFilterTestBase(metaclass=ABCMeta):
# These fields need to be defined in the subclass
user: str
samples: List[Dict[str, Any]]
field_lookups: Dict[str, FieldPath] = None
@abstractmethod
def _get_endpoint(self, api_client: ApiClient) -> Endpoint:
...
def _retrieve_collection(self, **kwargs) -> List:
with make_api_client(self.user) as api_client:
return get_paginated_collection(self._get_endpoint(api_client), **kwargs)
@classmethod
def _get_field(cls, d: Dict[str, Any], path: Union[str, FieldPath]) -> Optional[Any]:
assert path
for key in path:
if isinstance(d, dict):
d = d.get(key)
else:
d = None
return d
def _map_field(self, name: str) -> FieldPath:
return (self.field_lookups or {}).get(name, [name])
@classmethod
def _find_valid_field_value(
cls, samples: Iterator[Dict[str, Any]], field_path: FieldPath
) -> Any:
value = None
for sample in samples:
value = cls._get_field(sample, field_path)
if value:
break
assert value, f"Failed to find a sample for the '{'.'.join(field_path)}' field"
return value
def _get_field_samples(self, field: str) -> Tuple[Any, List[Dict[str, Any]]]:
field_path = self._map_field(field)
field_value = self._find_valid_field_value(self.samples, field_path)
gt_objects = filter(lambda p: field_value == self._get_field(p, field_path), self.samples)
return field_value, gt_objects
def test_can_use_simple_filter_for_object_list(self, field):
value, gt_objects = self._get_field_samples(field)
received_items = self._retrieve_collection(**{field: str(value)})
assert set(p["id"] for p in gt_objects) == set(p.id for p in received_items)

@ -91,7 +91,7 @@ class TestIssuesUsecases:
comment = self.client.comments.create(models.CommentWriteRequest(issue.id, message="hi!"))
issue.fetch()
comment_ids = {c.id for c in issue.comments}
comment_ids = {c.id for c in issue.get_comments()}
assert len(comment_ids) == 2
assert comment.id in comment_ids
@ -123,13 +123,14 @@ class TestIssuesUsecases:
message="hello",
)
)
comments = issue.get_comments()
issue.remove()
with pytest.raises(exceptions.NotFoundException):
issue.fetch()
with pytest.raises(exceptions.NotFoundException):
self.client.comments.retrieve(issue.comments[0].id)
self.client.comments.retrieve(comments[0].id)
assert self.stdout.getvalue() == ""

@ -161,7 +161,7 @@ class TestProjectUsecases:
assert self.stdout.getvalue() == ""
def test_can_get_tasks(self, fxt_project_with_shapes: Project):
task_ids = set(fxt_project_with_shapes.tasks)
task_ids = set(t.id for t in fxt_project_with_shapes.get_tasks())
tasks = fxt_project_with_shapes.get_tasks()

@ -659,6 +659,30 @@
"name": "example.com"
}
},
{
"model": "account.emailaddress",
"pk": 1,
"fields": {
"user": [
"admin1"
],
"email": "admin1@cvat.org",
"verified": true,
"primary": true
}
},
{
"model": "account.emailaddress",
"pk": 2,
"fields": {
"user": [
"admin2"
],
"email": "admin2@cvat.org",
"verified": true,
"primary": true
}
},
{
"model": "organizations.organization",
"pk": 1,
@ -2618,7 +2642,7 @@
"admin1"
],
"assignee": null,
"bug_tracker": "",
"bug_tracker": "https://bugtracker.localhost/task/12345",
"created_date": "2022-09-22T14:22:25.820Z",
"updated_date": "2022-09-23T11:57:02.300Z",
"overlap": 0,
@ -5860,7 +5884,7 @@
"assignee": null,
"created_date": "2022-03-16T11:04:39.444Z",
"updated_date": null,
"resolved": false
"resolved": true
}
},
{
@ -5905,7 +5929,9 @@
"owner": [
"user1"
],
"assignee": null,
"assignee": [
"user1"
],
"created_date": "2022-03-16T12:40:00.764Z",
"updated_date": null,
"resolved": false

@ -5,22 +5,7 @@
"results": [
{
"assignee": null,
"comments": [
{
"created_date": "2022-03-16T12:49:29.372000Z",
"id": 6,
"issue": 5,
"message": "Wrong position",
"owner": {
"first_name": "User",
"id": 20,
"last_name": "Sixth",
"url": "http://localhost:8080/api/users/20",
"username": "user6"
},
"updated_date": "2022-03-16T12:49:29.372000Z"
}
],
"comments": "http://localhost:8080/api/comments?issue_id=5",
"created_date": "2022-03-16T12:49:29.369000Z",
"frame": 0,
"id": 5,
@ -42,23 +27,14 @@
"updated_date": null
},
{
"assignee": null,
"comments": [
{
"created_date": "2022-03-16T12:40:00.767000Z",
"id": 5,
"issue": 4,
"message": "Issue with empty frame",
"owner": {
"first_name": "User",
"id": 2,
"last_name": "First",
"url": "http://localhost:8080/api/users/2",
"username": "user1"
},
"updated_date": "2022-03-16T12:40:00.767000Z"
}
],
"assignee": {
"first_name": "User",
"id": 2,
"last_name": "First",
"url": "http://localhost:8080/api/users/2",
"username": "user1"
},
"comments": "http://localhost:8080/api/comments?issue_id=4",
"created_date": "2022-03-16T12:40:00.764000Z",
"frame": 5,
"id": 4,
@ -81,22 +57,7 @@
},
{
"assignee": null,
"comments": [
{
"created_date": "2022-03-16T11:08:18.370000Z",
"id": 4,
"issue": 3,
"message": "Another one issue",
"owner": {
"first_name": "Business",
"id": 11,
"last_name": "Second",
"url": "http://localhost:8080/api/users/11",
"username": "business2"
},
"updated_date": "2022-03-16T11:08:18.370000Z"
}
],
"comments": "http://localhost:8080/api/comments?issue_id=3",
"created_date": "2022-03-16T11:08:18.367000Z",
"frame": 5,
"id": 3,
@ -119,22 +80,7 @@
},
{
"assignee": null,
"comments": [
{
"created_date": "2022-03-16T11:07:22.173000Z",
"id": 3,
"issue": 2,
"message": "Something should be here",
"owner": {
"first_name": "Business",
"id": 11,
"last_name": "Second",
"url": "http://localhost:8080/api/users/11",
"username": "business2"
},
"updated_date": "2022-03-16T11:07:22.173000Z"
}
],
"comments": "http://localhost:8080/api/comments?issue_id=2",
"created_date": "2022-03-16T11:07:22.170000Z",
"frame": 0,
"id": 2,
@ -157,36 +103,7 @@
},
{
"assignee": null,
"comments": [
{
"created_date": "2022-03-16T11:04:39.447000Z",
"id": 1,
"issue": 1,
"message": "Why are we still here?",
"owner": {
"first_name": "User",
"id": 2,
"last_name": "First",
"url": "http://localhost:8080/api/users/2",
"username": "user1"
},
"updated_date": "2022-03-16T11:04:39.447000Z"
},
{
"created_date": "2022-03-16T11:04:49.821000Z",
"id": 2,
"issue": 1,
"message": "Just to suffer?",
"owner": {
"first_name": "User",
"id": 2,
"last_name": "First",
"url": "http://localhost:8080/api/users/2",
"username": "user1"
},
"updated_date": "2022-03-16T11:04:49.821000Z"
}
],
"comments": "http://localhost:8080/api/comments?issue_id=1",
"created_date": "2022-03-16T11:04:39.444000Z",
"frame": 0,
"id": 1,
@ -214,7 +131,7 @@
244.58581235698148,
319.63386727689067
],
"resolved": false,
"resolved": true,
"updated_date": null
}
]

@ -10,6 +10,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 19,
"issues": "http://localhost:8080/api/issues?job_id=19",
"labels": [
{
"attributes": [],
@ -43,11 +44,12 @@
},
{
"assignee": null,
"bug_tracker": "",
"bug_tracker": "https://bugtracker.localhost/task/12345",
"data_chunk_size": 72,
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 18,
"issues": "http://localhost:8080/api/issues?job_id=18",
"labels": [
{
"attributes": [],
@ -245,6 +247,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 17,
"issues": "http://localhost:8080/api/issues?job_id=17",
"labels": [
{
"attributes": [],
@ -289,6 +292,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 16,
"issues": "http://localhost:8080/api/issues?job_id=16",
"labels": [
{
"attributes": [],
@ -327,6 +331,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 14,
"issues": "http://localhost:8080/api/issues?job_id=14",
"labels": [
{
"attributes": [
@ -378,6 +383,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 13,
"issues": "http://localhost:8080/api/issues?job_id=13",
"labels": [
{
"attributes": [
@ -429,6 +435,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 12,
"issues": "http://localhost:8080/api/issues?job_id=12",
"labels": [
{
"attributes": [
@ -486,6 +493,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 11,
"issues": "http://localhost:8080/api/issues?job_id=11",
"labels": [
{
"attributes": [
@ -543,6 +551,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 10,
"issues": "http://localhost:8080/api/issues?job_id=10",
"labels": [
{
"attributes": [],
@ -581,6 +590,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 9,
"issues": "http://localhost:8080/api/issues?job_id=9",
"labels": [
{
"attributes": [],
@ -619,6 +629,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "3d",
"id": 8,
"issues": "http://localhost:8080/api/issues?job_id=8",
"labels": [
{
"attributes": [],
@ -654,6 +665,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 7,
"issues": "http://localhost:8080/api/issues?job_id=7",
"labels": [
{
"attributes": [],
@ -689,6 +701,7 @@
"data_compressed_chunk_type": "imageset",
"dimension": "2d",
"id": 2,
"issues": "http://localhost:8080/api/issues?job_id=2",
"labels": [
{
"attributes": [],

@ -50,9 +50,7 @@
"location": "local"
},
"task_subsets": [],
"tasks": [
15
],
"tasks": "http://localhost:8080/api/tasks?project_id=8",
"updated_date": "2022-12-01T12:53:34.917000Z",
"url": "http://localhost:8080/api/projects/8"
},
@ -100,7 +98,7 @@
"location": "local"
},
"task_subsets": [],
"tasks": [],
"tasks": "http://localhost:8080/api/tasks?project_id=7",
"updated_date": "2022-09-28T12:26:29.285000Z",
"url": "http://localhost:8080/api/projects/7"
},
@ -148,7 +146,7 @@
"location": "local"
},
"task_subsets": [],
"tasks": [],
"tasks": "http://localhost:8080/api/tasks?project_id=6",
"updated_date": "2022-09-28T12:25:54.563000Z",
"url": "http://localhost:8080/api/projects/6"
},
@ -358,9 +356,7 @@
"location": "local"
},
"task_subsets": [],
"tasks": [
14
],
"tasks": "http://localhost:8080/api/tasks?project_id=5",
"updated_date": "2022-09-28T12:26:49.493000Z",
"url": "http://localhost:8080/api/projects/5"
},
@ -403,9 +399,7 @@
"status": "annotation",
"target_storage": null,
"task_subsets": [],
"tasks": [
13
],
"tasks": "http://localhost:8080/api/tasks?project_id=4",
"updated_date": "2022-12-05T07:47:01.518000Z",
"url": "http://localhost:8080/api/projects/4"
},
@ -435,7 +429,7 @@
"status": "annotation",
"target_storage": null,
"task_subsets": [],
"tasks": [],
"tasks": "http://localhost:8080/api/tasks?project_id=3",
"updated_date": "2022-03-28T13:06:09.283000Z",
"url": "http://localhost:8080/api/projects/3"
},
@ -494,9 +488,7 @@
"task_subsets": [
"Train"
],
"tasks": [
11
],
"tasks": "http://localhost:8080/api/tasks?project_id=2",
"updated_date": "2022-06-30T08:56:45.601000Z",
"url": "http://localhost:8080/api/projects/2"
},
@ -558,9 +550,7 @@
"status": "annotation",
"target_storage": null,
"task_subsets": [],
"tasks": [
9
],
"tasks": "http://localhost:8080/api/tasks?project_id=1",
"updated_date": "2022-11-03T13:57:25.895000Z",
"url": "http://localhost:8080/api/projects/1"
}

@ -14,6 +14,7 @@
"dimension": "2d",
"id": 15,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=15",
"labels": [
{
"attributes": [],
@ -81,7 +82,7 @@
},
{
"assignee": null,
"bug_tracker": "",
"bug_tracker": "https://bugtracker.localhost/task/12345",
"created_date": "2022-09-22T14:22:25.820000Z",
"data": 13,
"data_chunk_size": 72,
@ -90,6 +91,7 @@
"dimension": "2d",
"id": 14,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=14",
"labels": [
{
"attributes": [],
@ -325,6 +327,7 @@
"dimension": "2d",
"id": 13,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=13",
"labels": [
{
"attributes": [],
@ -388,6 +391,7 @@
"created_date": "2022-03-14T13:24:05.852000Z",
"dimension": "2d",
"id": 12,
"jobs": "http://localhost:8080/api/jobs?task_id=12",
"labels": [
{
"attributes": [],
@ -437,6 +441,7 @@
"dimension": "2d",
"id": 11,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=11",
"labels": [
{
"attributes": [],
@ -525,6 +530,7 @@
"dimension": "2d",
"id": 9,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=9",
"labels": [
{
"attributes": [
@ -660,6 +666,7 @@
"dimension": "2d",
"id": 8,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=8",
"labels": [
{
"attributes": [],
@ -740,6 +747,7 @@
"dimension": "2d",
"id": 7,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=7",
"labels": [
{
"attributes": [],
@ -808,6 +816,7 @@
"dimension": "3d",
"id": 6,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=6",
"labels": [
{
"attributes": [],
@ -873,6 +882,7 @@
"dimension": "2d",
"id": 5,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=5",
"labels": [
{
"attributes": [],
@ -944,6 +954,7 @@
"dimension": "2d",
"id": 2,
"image_quality": 70,
"jobs": "http://localhost:8080/api/jobs?task_id=2",
"labels": [
{
"attributes": [],

Loading…
Cancel
Save