Add REST API GET, POST, PATCH tests for cloud storage (#4353)

Co-authored-by: kirill.sizov <kirill.sizov@intel.com>
main
Maria Khrustaleva 4 years ago committed by GitHub
parent 96af4f18c8
commit 2a05316496
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -74,12 +74,15 @@ jobs:
- name: Running REST API tests - name: Running REST API tests
env: env:
API_ABOUT_PAGE: "localhost:8080/api/server/about" API_ABOUT_PAGE: "localhost:8080/api/server/about"
# Access key length should be at least 3, and secret key length at least 8 characters
MINIO_ACCESS_KEY: "minio_access_key"
MINIO_SECRET_KEY: "minio_secret_key"
run: | run: |
docker-compose -f docker-compose.yml -f docker-compose.dev.yml -f components/serverless/docker-compose.serverless.yml -f components/analytics/docker-compose.analytics.yml up -d docker-compose -f docker-compose.yml -f docker-compose.dev.yml -f components/serverless/docker-compose.serverless.yml -f components/analytics/docker-compose.analytics.yml -f tests/rest_api/docker-compose.minio.yml up -d
/bin/bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' ${API_ABOUT_PAGE})" != "401" ]]; do sleep 5; done' /bin/bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' ${API_ABOUT_PAGE})" != "401" ]]; do sleep 5; done'
pip3 install --user -r tests/rest_api/requirements.txt pip3 install --user -r tests/rest_api/requirements.txt
pytest tests/rest_api/ pytest tests/rest_api/
docker-compose -f docker-compose.yml -f docker-compose.dev.yml -f components/serverless/docker-compose.serverless.yml -f components/analytics/docker-compose.analytics.yml down -v docker-compose -f docker-compose.yml -f docker-compose.dev.yml -f components/serverless/docker-compose.serverless.yml -f components/analytics/docker-compose.analytics.yml -f tests/rest_api/docker-compose.minio.yml down -v
- name: Running unit tests - name: Running unit tests
env: env:
HOST_COVERAGE_DATA_DIR: ${{ github.workspace }} HOST_COVERAGE_DATA_DIR: ${{ github.workspace }}

@ -48,6 +48,7 @@ interface CloudStorageForm {
prefix?: string; prefix?: string;
project_id?: string; project_id?: string;
manifests: string[]; manifests: string[];
endpoint_url?: string;
} }
const { Dragger } = Upload; const { Dragger } = Upload;
@ -117,16 +118,20 @@ export default function CreateCloudStorageForm(props: Props): JSX.Element {
const location = parsedOptions.get('region') || parsedOptions.get('location'); const location = parsedOptions.get('region') || parsedOptions.get('location');
const prefix = parsedOptions.get('prefix'); const prefix = parsedOptions.get('prefix');
const projectId = parsedOptions.get('project_id'); const projectId = parsedOptions.get('project_id');
const endpointUrl = parsedOptions.get('endpoint_url');
if (location) { if (location) {
setSelectedRegion(location); setSelectedRegion(location);
} }
if (prefix) { if (prefix) {
fieldsValue.prefix = prefix; fieldsValue.prefix = prefix;
} }
if (projectId) { if (projectId) {
fieldsValue.project_id = projectId; fieldsValue.project_id = projectId;
} }
if (endpointUrl) {
fieldsValue.endpoint_url = endpointUrl;
}
} }
form.setFieldsValue(fieldsValue); form.setFieldsValue(fieldsValue);
@ -222,6 +227,10 @@ export default function CreateCloudStorageForm(props: Props): JSX.Element {
delete cloudStorageData.project_id; delete cloudStorageData.project_id;
specificAttributes.append('project_id', formValues.project_id); specificAttributes.append('project_id', formValues.project_id);
} }
if (formValues.endpoint_url) {
delete cloudStorageData.endpoint_url;
specificAttributes.append('endpoint_url', formValues.endpoint_url);
}
cloudStorageData.specific_attributes = specificAttributes.toString(); cloudStorageData.specific_attributes = specificAttributes.toString();
@ -489,6 +498,14 @@ export default function CreateCloudStorageForm(props: Props): JSX.Element {
</Select> </Select>
</Form.Item> </Form.Item>
{credentialsBlok()} {credentialsBlok()}
<Form.Item
label='Endpoint URL'
help='You can specify an endpoint for your storage when using the AWS S3 cloud storage compatible API'
name='endpoint_url'
{...internalCommonProps}
>
<Input />
</Form.Item>
<S3Region <S3Region
selectedRegion={selectedRegion} selectedRegion={selectedRegion}
onSelectRegion={onSelectRegion} onSelectRegion={onSelectRegion}

@ -99,7 +99,7 @@ class _CloudStorage(ABC):
def content(self): def content(self):
return list(map(lambda x: x['name'] , self._files)) return list(map(lambda x: x['name'] , self._files))
def get_cloud_storage_instance(cloud_provider, resource, credentials, specific_attributes=None): def get_cloud_storage_instance(cloud_provider, resource, credentials, specific_attributes=None, endpoint=None):
instance = None instance = None
if cloud_provider == CloudProviderChoice.AWS_S3: if cloud_provider == CloudProviderChoice.AWS_S3:
instance = AWS_S3( instance = AWS_S3(
@ -107,7 +107,8 @@ def get_cloud_storage_instance(cloud_provider, resource, credentials, specific_a
access_key_id=credentials.key, access_key_id=credentials.key,
secret_key=credentials.secret_key, secret_key=credentials.secret_key,
session_token=credentials.session_token, session_token=credentials.session_token,
region=specific_attributes.get('region', 'us-east-2') region=specific_attributes.get('region'),
endpoint_url=specific_attributes.get('endpoint_url'),
) )
elif cloud_provider == CloudProviderChoice.AZURE_CONTAINER: elif cloud_provider == CloudProviderChoice.AZURE_CONTAINER:
instance = AzureBlobContainer( instance = AzureBlobContainer(
@ -137,7 +138,8 @@ class AWS_S3(_CloudStorage):
region, region,
access_key_id=None, access_key_id=None,
secret_key=None, secret_key=None,
session_token=None): session_token=None,
endpoint_url=None):
super().__init__() super().__init__()
if all([access_key_id, secret_key, session_token]): if all([access_key_id, secret_key, session_token]):
self._s3 = boto3.resource( self._s3 = boto3.resource(
@ -145,20 +147,22 @@ class AWS_S3(_CloudStorage):
aws_access_key_id=access_key_id, aws_access_key_id=access_key_id,
aws_secret_access_key=secret_key, aws_secret_access_key=secret_key,
aws_session_token=session_token, aws_session_token=session_token,
region_name=region region_name=region,
endpoint_url=endpoint_url
) )
elif access_key_id and secret_key: elif access_key_id and secret_key:
self._s3 = boto3.resource( self._s3 = boto3.resource(
's3', 's3',
aws_access_key_id=access_key_id, aws_access_key_id=access_key_id,
aws_secret_access_key=secret_key, aws_secret_access_key=secret_key,
region_name=region region_name=region,
endpoint_url=endpoint_url
) )
elif any([access_key_id, secret_key, session_token]): elif any([access_key_id, secret_key, session_token]):
raise Exception('Insufficient data for authorization') raise Exception('Insufficient data for authorization')
# anonymous access # anonymous access
if not any([access_key_id, secret_key, session_token]): if not any([access_key_id, secret_key, session_token]):
self._s3 = boto3.resource('s3', region_name=region) self._s3 = boto3.resource('s3', region_name=region, endpoint_url=endpoint_url)
self._s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) self._s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
self._client_s3 = self._s3.meta.client self._client_s3 = self._s3.meta.client
self._bucket = self._s3.Bucket(bucket) self._bucket = self._s3.Bucket(bucket)

@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2022-03-14 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('engine', '0051_auto_20220220_1824'),
]
operations = [
migrations.AlterField(
model_name='cloudstorage',
name='specific_attributes',
field=models.CharField(blank=True, max_length=1024),
),
]

@ -673,7 +673,7 @@ class CloudStorage(models.Model):
updated_date = models.DateTimeField(auto_now=True) updated_date = models.DateTimeField(auto_now=True)
credentials = models.CharField(max_length=500) credentials = models.CharField(max_length=500)
credentials_type = models.CharField(max_length=29, choices=CredentialsTypeChoice.choices())#auth_type credentials_type = models.CharField(max_length=29, choices=CredentialsTypeChoice.choices())#auth_type
specific_attributes = models.CharField(max_length=128, blank=True) specific_attributes = models.CharField(max_length=1024, blank=True)
description = models.TextField(blank=True) description = models.TextField(blank=True)
organization = models.ForeignKey(Organization, null=True, default=None, organization = models.ForeignKey(Organization, null=True, default=None,
blank=True, on_delete=models.SET_NULL, related_name="cloudstorages") blank=True, on_delete=models.SET_NULL, related_name="cloudstorages")

@ -972,6 +972,22 @@ class CloudStorageWriteSerializer(serializers.ModelSerializer):
raise serializers.ValidationError('Account name for Azure container was not specified') raise serializers.ValidationError('Account name for Azure container was not specified')
return attrs return attrs
@staticmethod
def _manifests_validation(storage, manifests):
# check manifest files availability
for manifest in manifests:
file_status = storage.get_file_status(manifest)
if file_status == Status.NOT_FOUND:
raise serializers.ValidationError({
'manifests': "The '{}' file does not exist on '{}' cloud storage" \
.format(manifest, storage.name)
})
elif file_status == Status.FORBIDDEN:
raise serializers.ValidationError({
'manifests': "The '{}' file does not available on '{}' cloud storage. Access denied" \
.format(manifest, storage.name)
})
def create(self, validated_data): def create(self, validated_data):
provider_type = validated_data.get('provider_type') provider_type = validated_data.get('provider_type')
should_be_created = validated_data.pop('should_be_created', None) should_be_created = validated_data.pop('should_be_created', None)
@ -1008,20 +1024,8 @@ class CloudStorageWriteSerializer(serializers.ModelSerializer):
storage_status = storage.get_status() storage_status = storage.get_status()
if storage_status == Status.AVAILABLE: if storage_status == Status.AVAILABLE:
manifests = validated_data.pop('manifests') manifests = [m.get('filename') for m in validated_data.pop('manifests')]
# check manifest files availability self._manifests_validation(storage, manifests)
for manifest in manifests:
file_status = storage.get_file_status(manifest.get('filename'))
if file_status == Status.NOT_FOUND:
raise serializers.ValidationError({
'manifests': "The '{}' file does not exist on '{}' cloud storage" \
.format(manifest.get('filename'), storage.name)
})
elif file_status == Status.FORBIDDEN:
raise serializers.ValidationError({
'manifests': "The '{}' file does not available on '{}' cloud storage. Access denied" \
.format(manifest.get('filename'), storage.name)
})
db_storage = models.CloudStorage.objects.create( db_storage = models.CloudStorage.objects.create(
credentials=credentials.convert_to_db(), credentials=credentials.convert_to_db(),
@ -1029,7 +1033,7 @@ class CloudStorageWriteSerializer(serializers.ModelSerializer):
) )
db_storage.save() db_storage.save()
manifest_file_instances = [models.Manifest(**manifest, cloud_storage=db_storage) for manifest in manifests] manifest_file_instances = [models.Manifest(filename=manifest, cloud_storage=db_storage) for manifest in manifests]
models.Manifest.objects.bulk_create(manifest_file_instances) models.Manifest.objects.bulk_create(manifest_file_instances)
cloud_storage_path = db_storage.get_storage_dirname() cloud_storage_path = db_storage.get_storage_dirname()
@ -1105,18 +1109,7 @@ class CloudStorageWriteSerializer(serializers.ModelSerializer):
instance.manifests.filter(filename__in=delta_to_delete).delete() instance.manifests.filter(filename__in=delta_to_delete).delete()
if delta_to_create: if delta_to_create:
# check manifest files existing # check manifest files existing
for manifest in delta_to_create: self._manifests_validation(storage, delta_to_create)
file_status = storage.get_file_status(manifest)
if file_status == Status.NOT_FOUND:
raise serializers.ValidationError({
'manifests': "The '{}' file does not exist on '{}' cloud storage"
.format(manifest, storage.name)
})
elif file_status == Status.FORBIDDEN:
raise serializers.ValidationError({
'manifests': "The '{}' file does not available on '{}' cloud storage. Access denied" \
.format(manifest.get('filename'), storage.name)
})
manifest_instances = [models.Manifest(filename=f, cloud_storage=instance) for f in delta_to_create] manifest_instances = [models.Manifest(filename=f, cloud_storage=instance) for f in delta_to_create]
models.Manifest.objects.bulk_create(manifest_instances) models.Manifest.objects.bulk_create(manifest_instances)
if temporary_file: if temporary_file:

@ -11,6 +11,8 @@ import sys
import traceback import traceback
import subprocess import subprocess
import os import os
import urllib.parse
from av import VideoFrame from av import VideoFrame
from PIL import Image from PIL import Image
@ -102,7 +104,7 @@ def md5_hash(frame):
def parse_specific_attributes(specific_attributes): def parse_specific_attributes(specific_attributes):
assert isinstance(specific_attributes, str), 'Specific attributes must be a string' assert isinstance(specific_attributes, str), 'Specific attributes must be a string'
parsed_specific_attributes = urllib.parse.parse_qsl(specific_attributes)
return { return {
item.split('=')[0].strip(): item.split('=')[1].strip() key: value for (key, value) in parsed_specific_attributes
for item in specific_attributes.split('&') } if parsed_specific_attributes else dict()
} if specific_attributes else dict()

@ -19,14 +19,20 @@ the server calling REST API directly (as it done by users).
## How to run? ## How to run?
Please look at documentation for [pytest](https://docs.pytest.org/en/6.2.x/). 1. Execute commands below to run docker containers:
Generally you have to install requirements and run the following command from ```console
the root directory of the cloned CVAT repository: export MINIO_ACCESS_KEY="minio_access_key"
export MINIO_SECRET_KEY="minio_secret_key"
docker-compose -f docker-compose.yml -f docker-compose.dev.yml -f components/analytics/docker-compose.analytics.yml -f tests/rest_api/docker-compose.minio.yml up -d --build
```
1. After that please look at documentation for [pytest](https://docs.pytest.org/en/6.2.x/).
Generally, you have to install requirements and run the following command from
the root directory of the cloned CVAT repository:
```console ```console
pip3 install --user -r tests/rest_api/requirements.txt pip3 install --user -r tests/rest_api/requirements.txt
pytest tests/rest_api/ pytest tests/rest_api/
``` ```
## How to upgrade testing assets? ## How to upgrade testing assets?
@ -151,7 +157,7 @@ Assets directory has two parts:
``` ```
1. If your tests was failed due to date field incompatibility and you have 1. If your tests was failed due to date field incompatibility and you have
error message like this: error message like this:
``` ```
assert {'values_chan...34.908528Z'}}} == {} assert {'values_chan...34.908528Z'}}} == {}
E Left contains 1 more item: E Left contains 1 more item:
@ -182,7 +188,7 @@ error message like this:
``` ```
1. If for some reason you need to recreate cvat database, but using `dropdb` 1. If for some reason you need to recreate cvat database, but using `dropdb`
you have error message: you have error message:
``` ```
ERROR: database "cvat" is being accessed by other users ERROR: database "cvat" is being accessed by other users
DETAIL: There are 1 other session(s) using the database. DETAIL: There are 1 other session(s) using the database.

@ -0,0 +1,51 @@
{
"count": 2,
"next": null,
"previous": null,
"results": [
{
"created_date": "2022-03-17T07:23:59.305000Z",
"credentials_type": "KEY_SECRET_KEY_PAIR",
"description": "",
"display_name": "Bucket 2",
"id": 2,
"manifests": [
"manifest.jsonl"
],
"organization": 2,
"owner": {
"first_name": "Business",
"id": 11,
"last_name": "Second",
"url": "http://localhost:8080/api/users/11",
"username": "business2"
},
"provider_type": "AWS_S3_BUCKET",
"resource": "private",
"specific_attributes": "endpoint_url=http%3A%2F%2Fminio%3A9000",
"updated_date": "2022-03-17T07:23:59.309000Z"
},
{
"created_date": "2022-03-17T07:22:49.519000Z",
"credentials_type": "ANONYMOUS_ACCESS",
"description": "",
"display_name": "Bucket 1",
"id": 1,
"manifests": [
"manifest.jsonl"
],
"organization": null,
"owner": {
"first_name": "User",
"id": 2,
"last_name": "First",
"url": "http://localhost:8080/api/users/2",
"username": "user1"
},
"provider_type": "AWS_S3_BUCKET",
"resource": "public",
"specific_attributes": "endpoint_url=http%3A%2F%2Fminio%3A9000",
"updated_date": "2022-03-17T07:22:49.529000Z"
}
]
}

@ -1433,7 +1433,7 @@
"pk": 2, "pk": 2,
"fields": { "fields": {
"password": "pbkdf2_sha256$260000$Pf2xYWXBedoAJ504jyDD8e$8sJ244Ai0xhZrUTelapPNHlEg7CV0cCUaxbcxZtfaug=", "password": "pbkdf2_sha256$260000$Pf2xYWXBedoAJ504jyDD8e$8sJ244Ai0xhZrUTelapPNHlEg7CV0cCUaxbcxZtfaug=",
"last_login": "2022-03-05T08:52:22.036Z", "last_login": "2022-03-17T07:22:09.327Z",
"is_superuser": false, "is_superuser": false,
"username": "user1", "username": "user1",
"first_name": "User", "first_name": "User",
@ -1613,7 +1613,7 @@
"pk": 11, "pk": 11,
"fields": { "fields": {
"password": "pbkdf2_sha256$260000$Zw76ANIvIsDngZGsTv2G8O$piTVoqHrpTskW8rI1FBT9rzM2dcpjhrcOfI3pDgtjbo=", "password": "pbkdf2_sha256$260000$Zw76ANIvIsDngZGsTv2G8O$piTVoqHrpTskW8rI1FBT9rzM2dcpjhrcOfI3pDgtjbo=",
"last_login": "2022-02-21T10:29:16.518Z", "last_login": "2022-03-17T07:22:55.930Z",
"is_superuser": false, "is_superuser": false,
"username": "business2", "username": "business2",
"first_name": "Business", "first_name": "Business",
@ -2200,6 +2200,14 @@
"expire_date": "2022-03-02T06:24:53.914Z" "expire_date": "2022-03-02T06:24:53.914Z"
} }
}, },
{
"model": "sessions.session",
"pk": "dpaw6pntyqwr5l6qjv6zq3yoajp301be",
"fields": {
"session_data": ".eJxVjEsOwjAMBe-SNYrq_BxYsucMlWO7tIAaqWlXiLujSl3A9s3Me5uetnXst6ZLP4m5GABz-h0L8VPnnciD5nu1XOd1mYrdFXvQZm9V9HU93L-Dkdq41550gJRLCCmJyoBdJmKMmaEEjpwDqmh0wuhc8Q68cmJA7M4OMJjPFyYSODI:1nUkTH:BLJ2UbMcN1AmDEbuOLS5T_O-vbrAzEW_K8oRkQZYt4M",
"expire_date": "2022-03-31T07:22:55.934Z"
}
},
{ {
"model": "sessions.session", "model": "sessions.session",
"pk": "gcz795933839j3g0t3rjgmikzkzlwse3", "pk": "gcz795933839j3g0t3rjgmikzkzlwse3",
@ -2288,6 +2296,14 @@
"created": "2022-03-05T10:31:48.838Z" "created": "2022-03-05T10:31:48.838Z"
} }
}, },
{
"model": "authtoken.token",
"pk": "53da3ff9e514d84b56b5170059ff0f595c34157b",
"fields": {
"user": 11,
"created": "2022-03-17T07:22:55.921Z"
}
},
{ {
"model": "sites.site", "model": "sites.site",
"pk": 1, "pk": 1,
@ -5629,6 +5645,56 @@
"rating": 0.0 "rating": 0.0
} }
}, },
{
"model": "engine.manifest",
"pk": 1,
"fields": {
"filename": "manifest.jsonl",
"cloud_storage": 1
}
},
{
"model": "engine.manifest",
"pk": 2,
"fields": {
"filename": "manifest.jsonl",
"cloud_storage": 2
}
},
{
"model": "engine.cloudstorage",
"pk": 1,
"fields": {
"provider_type": "AWS_S3_BUCKET",
"resource": "public",
"display_name": "Bucket 1",
"owner": 2,
"created_date": "2022-03-17T07:22:49.519Z",
"updated_date": "2022-03-17T07:22:49.529Z",
"credentials": "",
"credentials_type": "ANONYMOUS_ACCESS",
"specific_attributes": "endpoint_url=http%3A%2F%2Fminio%3A9000",
"description": "",
"organization": null
}
},
{
"model": "engine.cloudstorage",
"pk": 2,
"fields": {
"provider_type": "AWS_S3_BUCKET",
"resource": "private",
"display_name": "Bucket 2",
"owner": 11,
"created_date": "2022-03-17T07:23:59.305Z",
"updated_date": "2022-03-17T07:23:59.309Z",
"credentials": "minio_access_key minio_secret_key",
"credentials_type": "KEY_SECRET_KEY_PAIR",
"specific_attributes": "endpoint_url=http%3A%2F%2Fminio%3A9000",
"description": "",
"organization": 2
}
},
{ {
"model": "engine.issue", "model": "engine.issue",
"pk": 1, "pk": 1,

@ -150,7 +150,7 @@
"is_active": true, "is_active": true,
"is_staff": false, "is_staff": false,
"is_superuser": false, "is_superuser": false,
"last_login": "2022-02-21T10:29:16.518000Z", "last_login": "2022-03-17T07:22:55.930000Z",
"last_name": "Second", "last_name": "Second",
"url": "http://localhost:8080/api/users/11", "url": "http://localhost:8080/api/users/11",
"username": "business2" "username": "business2"
@ -294,7 +294,7 @@
"is_active": true, "is_active": true,
"is_staff": false, "is_staff": false,
"is_superuser": false, "is_superuser": false,
"last_login": "2022-03-05T08:52:22.036000Z", "last_login": "2022-03-17T07:22:09.327000Z",
"last_name": "First", "last_name": "First",
"url": "http://localhost:8080/api/users/2", "url": "http://localhost:8080/api/users/2",
"username": "user1" "username": "user1"

@ -103,6 +103,11 @@ def annotations():
with open(osp.join(ASSETS_DIR, 'annotations.json')) as f: with open(osp.join(ASSETS_DIR, 'annotations.json')) as f:
return json.load(f) return json.load(f)
@pytest.fixture(scope='module')
def cloud_storages():
with open(osp.join(ASSETS_DIR, 'cloudstorages.json')) as f:
return Container(json.load(f)['results'])
@pytest.fixture(scope='module') @pytest.fixture(scope='module')
def issues(): def issues():
with open(osp.join(ASSETS_DIR, 'issues.json')) as f: with open(osp.join(ASSETS_DIR, 'issues.json')) as f:

@ -0,0 +1,58 @@
version: '3.3'
services:
minio:
image: quay.io/minio/minio
hostname: minio
restart: always
command: server /data --console-address ":9001"
expose:
- "9000"
- "9001"
ports:
- 9000:9000
- 9001:9001
environment:
MINIO_ROOT_USER: ${MINIO_ACCESS_KEY}
MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
cvat:
aliases:
- minio
mc:
image: minio/mc
depends_on:
- minio
environment:
MC_PATH: "/usr/bin/mc"
MINIO_HOST: "http://minio:9000"
MINIO_ACCESS_KEY:
MINIO_SECRET_KEY:
MINIO_ALIAS: "local_minio"
PRIVATE_BUCKET: "private"
PUBLIC_BUCKET: "public"
TEST_BUCKET: "test"
volumes:
- ./tests/cypress/integration/actions_tasks/assets/case_65_manifest/:/storage
networks:
- cvat
entrypoint: >
/bin/sh -c "
$${MC_PATH} config host add --quiet --api s3v4 $${MINIO_ALIAS} $${MINIO_HOST} $${MINIO_ACCESS_KEY} $${MINIO_SECRET_KEY};
$${MC_PATH} mb $${MINIO_ALIAS}/$${PRIVATE_BUCKET} $${MINIO_ALIAS}/$${PUBLIC_BUCKET} $${MINIO_ALIAS}/$${TEST_BUCKET};
for BUCKET in $${MINIO_ALIAS}/$${PRIVATE_BUCKET} $${MINIO_ALIAS}/$${PUBLIC_BUCKET} $${MINIO_ALIAS}/$${TEST_BUCKET};
do
$${MC_PATH} cp --recursive /storage/ $${BUCKET};
for i in 1 2;
do
$${MC_PATH} cp /storage/manifest.jsonl $${BUCKET}/manifest_$${i}.jsonl;
done;
done;
$${MC_PATH} policy set public $${MINIO_ALIAS}/$${PUBLIC_BUCKET};
exit 0;
"

@ -0,0 +1,188 @@
# Copyright (C) 2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
import pytest
from http import HTTPStatus
from deepdiff import DeepDiff
from .utils.config import get_method, patch_method, post_method
class TestGetCloudStorage:
def _test_can_see(self, user, storage_id, data, **kwargs):
response = get_method(user, f'cloudstorages/{storage_id}', **kwargs)
response_data = response.json()
response_data = response_data.get('results', response_data)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(data, response_data, ignore_order=True) == {}
def _test_cannot_see(self, user, storage_id, **kwargs):
response = get_method(user, f'cloudstorages/{storage_id}', **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('storage_id', [1])
@pytest.mark.parametrize('group, is_owner, is_allow', [
('admin', False, True),
('business', False, False),
('user', True, True),
])
def test_sandbox_user_get_coud_storage(self, storage_id, group, is_owner, is_allow, users, cloud_storages):
org = ''
cloud_storage = cloud_storages[storage_id]
username = cloud_storage['owner']['username'] if is_owner else \
next((u for u in users if group in u['groups'] and u['id'] != cloud_storage['owner']['id']))['username']
if is_allow:
self._test_can_see(username, storage_id, cloud_storage, org=org)
else:
self._test_cannot_see(username, storage_id, org=org)
@pytest.mark.parametrize('org_id', [2])
@pytest.mark.parametrize('storage_id', [2])
@pytest.mark.parametrize('role, is_owner, is_allow', [
('worker', True, True),
('supervisor', False, True),
('worker', False, False),
])
def test_org_user_get_coud_storage(self, org_id, storage_id, role, is_owner, is_allow, find_users, cloud_storages):
cloud_storage = cloud_storages[storage_id]
username = cloud_storage['owner']['username'] if is_owner else \
next((u for u in find_users(role=role, org=org_id) if u['id'] != cloud_storage['owner']['id']))['username']
if is_allow:
self._test_can_see(username, storage_id, cloud_storage, org_id=org_id)
else:
self._test_cannot_see(username, storage_id, org_id=org_id)
class TestPostCloudStorage:
_SPEC = {
'provider_type': 'AWS_S3_BUCKET',
'resource': 'test',
'display_name': 'Bucket',
'credentials_type': 'KEY_SECRET_KEY_PAIR',
'key': 'minio_access_key', 'secret_key': 'minio_secret_key',
'specific_attributes': 'endpoint_url=http://minio:9000',
'description': 'Some description',
'manifests': [
'manifest.jsonl'
],
}
_EXCLUDE_PATHS = [
f"root['{extra_field}']" for extra_field in {
# unchanged fields
'created_date', 'id', 'organization', 'owner', 'updated_date',
# credentials that server doesn't return
'key', 'secret_key',
}]
def _test_can_create(self, user, spec, **kwargs):
response = post_method(user, 'cloudstorages', spec, **kwargs)
response_data = response.json()
response_data = response_data.get('results', response_data)
assert response.status_code == HTTPStatus.CREATED
assert DeepDiff(self._SPEC, response_data, ignore_order=True,
exclude_paths=self._EXCLUDE_PATHS) == {}
def _test_cannot_create(self, user, spec, **kwargs):
response = post_method(user, 'cloudstorages', spec, **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('group, is_allow', [
('user', True), ('worker', False)
])
def test_sandbox_user_create_cloud_storage(self, group, is_allow, users):
org = ''
username = [u for u in users if group in u['groups']][0]['username']
if is_allow:
self._test_can_create(username, self._SPEC, org=org)
else:
self._test_cannot_create(username, self._SPEC, org=org)
@pytest.mark.parametrize('org_id', [2])
@pytest.mark.parametrize('role, is_allow', [
('owner', True), ('maintainer', True),
('worker', False), ('supervisor', False),
])
def test_org_user_create_coud_storage(self, org_id, role, is_allow, find_users):
username = find_users(role=role, org=org_id)[0]['username']
if is_allow:
self._test_can_create(username, self._SPEC, org_id=org_id)
else:
self._test_cannot_create(username, self._SPEC, org_id=org_id)
class TestPatchCloudStorage:
_SPEC = {
'display_name': 'New display name',
'description': 'New description',
'manifests': [
'manifest_1.jsonl',
'manifest_2.jsonl',
],
}
_EXCLUDE_PATHS = [
f"root['{extra_field}']" for extra_field in {
# unchanged fields
'created_date', 'credentials_type', 'id', 'organization', 'owner',
'provider_type', 'resource', 'specific_attributes', 'updated_date',
}]
def _test_can_update(self, user, storage_id, spec, **kwargs):
response = patch_method(user, f'cloudstorages/{storage_id}', spec, **kwargs)
response_data = response.json()
response_data = response_data.get('results', response_data)
assert response.status_code == HTTPStatus.OK
assert DeepDiff(self._SPEC, response_data, ignore_order=True,
exclude_paths=self._EXCLUDE_PATHS) == {}
assert response.status_code == HTTPStatus.OK
def _test_cannot_update(self, user, storage_id, spec, **kwargs):
response = patch_method(user, f'cloudstorages/{storage_id}', spec, **kwargs)
assert response.status_code == HTTPStatus.FORBIDDEN
@pytest.mark.parametrize('storage_id', [1])
@pytest.mark.parametrize('group, is_owner, is_allow', [
('admin', False, True),
('business', False, False),
('worker', True, True),
])
def test_sandbox_user_update_cloud_storage(self, storage_id, group, is_owner, is_allow, users, cloud_storages):
org = ''
cloud_storage = cloud_storages[storage_id]
username = cloud_storage['owner']['username'] if is_owner else \
next((u for u in users if group in u['groups'] and u['id'] != cloud_storage['owner']['id']))['username']
if is_allow:
self._test_can_update(username, storage_id, self._SPEC, org=org)
else:
self._test_cannot_update(username, storage_id, self._SPEC, org=org)
@pytest.mark.parametrize('org_id', [2])
@pytest.mark.parametrize('storage_id', [2])
@pytest.mark.parametrize('role, is_owner, is_allow', [
('worker', True, True),
('maintainer', False, True),
('supervisor', False, False),
])
def test_org_user_update_coud_storage(self, org_id, storage_id, role, is_owner, is_allow, find_users, cloud_storages):
cloud_storage = cloud_storages[storage_id]
username = cloud_storage['owner']['username'] if is_owner else \
next((u for u in find_users(role=role, org=org_id) if u['id'] != cloud_storage['owner']['id']))['username']
if is_allow:
self._test_can_update(username, storage_id, self._SPEC, org_id=org_id)
else:
self._test_cannot_update(username, storage_id, self._SPEC, org_id=org_id)

@ -4,7 +4,7 @@ import json
annotations = {} annotations = {}
for obj in ['user', 'project', 'task', 'job', 'organization', 'membership', for obj in ['user', 'project', 'task', 'job', 'organization', 'membership',
'invitation', 'issue']: 'invitation', 'cloudstorage', 'issue']:
response = get_method('admin1', f'{obj}s', page_size='all') response = get_method('admin1', f'{obj}s', page_size='all')
with open(osp.join(ASSETS_DIR, f'{obj}s.json'), 'w') as f: with open(osp.join(ASSETS_DIR, f'{obj}s.json'), 'w') as f:
json.dump(response.json(), f, indent=2, sort_keys=True) json.dump(response.json(), f, indent=2, sort_keys=True)

Loading…
Cancel
Save