Fix pylint issues (#100)

main
Maxim Zhiltsov 4 years ago committed by GitHub
parent 57bc0e9c90
commit bddd44642d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -70,8 +70,8 @@ class AnnotationIR:
prev_shape = None prev_shape = None
for shape in track['shapes']: for shape in track['shapes']:
if prev_shape and not prev_shape['outside'] and \ if prev_shape and not prev_shape['outside'] and \
has_overlap(prev_shape['frame'], shape['frame']): has_overlap(prev_shape['frame'], shape['frame']):
return True return True
prev_shape = shape prev_shape = shape
if not prev_shape['outside'] and prev_shape['frame'] <= stop: if not prev_shape['outside'] and prev_shape['frame'] <= stop:

@ -1434,7 +1434,8 @@ def import_dm_annotations(dm_dataset: Dataset, instance_data: Union[TaskData, Pr
if isinstance(instance_data, ProjectData): if isinstance(instance_data, ProjectData):
for sub_dataset, task_data in instance_data.split_dataset(dm_dataset): for sub_dataset, task_data in instance_data.split_dataset(dm_dataset):
# FIXME: temporary workaround for cvat format, will be removed after migration importer to datumaro # FIXME: temporary workaround for cvat format
# will be removed after migration importer to datumaro
sub_dataset._format = dm_dataset.format sub_dataset._format = dm_dataset.format
import_dm_annotations(sub_dataset, task_data) import_dm_annotations(sub_dataset, task_data)
return return
@ -1477,6 +1478,12 @@ def import_dm_annotations(dm_dataset: Dataset, instance_data: Union[TaskData, Pr
try: try:
if hasattr(ann, 'label') and ann.label is None: if hasattr(ann, 'label') and ann.label is None:
raise CvatImportError("annotation has no label") raise CvatImportError("annotation has no label")
attributes = [
instance_data.Attribute(name=n, value=str(v))
for n, v in ann.attributes.items()
]
if ann.type in shapes: if ann.type in shapes:
if ann.type == datum_annotation.AnnotationType.cuboid_3d: if ann.type == datum_annotation.AnnotationType.cuboid_3d:
try: try:
@ -1485,44 +1492,50 @@ def import_dm_annotations(dm_dataset: Dataset, instance_data: Union[TaskData, Pr
ann.points = ann.points ann.points = ann.points
ann.z_order = 0 ann.z_order = 0
# Use safe casting to bool instead of plain reading
# because in some formats return type can be different
# from bool / None
# https://github.com/openvinotoolkit/datumaro/issues/719
occluded = cast(ann.attributes.pop('occluded', None), bool) is True
keyframe = cast(ann.attributes.get('keyframe', None), bool) is True
outside = cast(ann.attributes.pop('outside', None), bool) is True
track_id = ann.attributes.pop('track_id', None) track_id = ann.attributes.pop('track_id', None)
source = ann.attributes.pop('source').lower() \
if ann.attributes.get('source', '').lower() in {'auto', 'manual'} else 'manual'
if track_id is None or dm_dataset.format != 'cvat' : if track_id is None or dm_dataset.format != 'cvat' :
instance_data.add_shape(instance_data.LabeledShape( instance_data.add_shape(instance_data.LabeledShape(
type=shapes[ann.type], type=shapes[ann.type],
frame=frame_number, frame=frame_number,
points=ann.points, points=ann.points,
label=label_cat.items[ann.label].name, label=label_cat.items[ann.label].name,
occluded=ann.attributes.pop('occluded', None) == True, occluded=occluded,
z_order=ann.z_order, z_order=ann.z_order,
group=group_map.get(ann.group, 0), group=group_map.get(ann.group, 0),
source=str(ann.attributes.pop('source')).lower() \ source=source,
if str(ann.attributes.get('source', None)).lower() in {'auto', 'manual'} else 'manual', attributes=attributes,
attributes=[instance_data.Attribute(name=n, value=str(v))
for n, v in ann.attributes.items()],
)) ))
continue continue
if ann.attributes.get('keyframe', None) == True or ann.attributes.get('outside', None) == True: if keyframe or outside:
track = instance_data.TrackedShape( track = instance_data.TrackedShape(
type=shapes[ann.type], type=shapes[ann.type],
frame=frame_number, frame=frame_number,
occluded=ann.attributes.pop('occluded', None) == True, occluded=occluded,
outside=ann.attributes.pop('outside', None) == True, outside=outside,
keyframe=ann.attributes.get('keyframe', None) == True, keyframe=keyframe,
points=ann.points, points=ann.points,
z_order=ann.z_order, z_order=ann.z_order,
source=str(ann.attributes.pop('source')).lower() \ source=source,
if str(ann.attributes.get('source', None)).lower() in {'auto', 'manual'} else 'manual', attributes=attributes,
attributes=[instance_data.Attribute(name=n, value=str(v))
for n, v in ann.attributes.items()],
) )
if track_id not in tracks: if track_id not in tracks:
tracks[track_id] = instance_data.Track( tracks[track_id] = instance_data.Track(
label=label_cat.items[ann.label].name, label=label_cat.items[ann.label].name,
group=group_map.get(ann.group, 0), group=group_map.get(ann.group, 0),
source=str(ann.attributes.pop('source')).lower() \ source=source,
if str(ann.attributes.get('source', None)).lower() in {'auto', 'manual'} else 'manual',
shapes=[], shapes=[],
) )
@ -1534,8 +1547,7 @@ def import_dm_annotations(dm_dataset: Dataset, instance_data: Union[TaskData, Pr
label=label_cat.items[ann.label].name, label=label_cat.items[ann.label].name,
group=group_map.get(ann.group, 0), group=group_map.get(ann.group, 0),
source='manual', source='manual',
attributes=[instance_data.Attribute(name=n, value=str(v)) attributes=attributes,
for n, v in ann.attributes.items()],
)) ))
except Exception as e: except Exception as e:
raise CvatImportError("Image {}: can't import annotation " raise CvatImportError("Image {}: can't import annotation "

@ -38,4 +38,4 @@ def _import(src_file, instance_data, load_data_callback=None):
dataset = Dataset.import_from(tmp_dir, 'imagenet', env=dm_env) dataset = Dataset.import_from(tmp_dir, 'imagenet', env=dm_env)
if load_data_callback is not None: if load_data_callback is not None:
load_data_callback(dataset, instance_data) load_data_callback(dataset, instance_data)
import_dm_annotations(dataset, instance_data) import_dm_annotations(dataset, instance_data)

@ -50,7 +50,7 @@ class LabelAttrToAttr(ItemTransform):
def transform_item(self, item): def transform_item(self, item):
annotations = list(item.annotations) annotations = list(item.annotations)
attributes = dict(item.attributes) attributes = dict(item.attributes)
if self._label != None: if self._label is not None:
labels = [ann for ann in annotations labels = [ann for ann in annotations
if ann.type == AnnotationType.label \ if ann.type == AnnotationType.label \
and ann.label == self._label] and ann.label == self._label]

@ -32,7 +32,7 @@ def _import_task(dataset, task_data):
type='rectangle', type='rectangle',
label=label_cat.items[ann.label].name, label=label_cat.items[ann.label].name,
points=ann.points, points=ann.points,
occluded=ann.attributes.get('occluded') == True, occluded=ann.attributes.get('occluded') is True,
z_order=ann.z_order, z_order=ann.z_order,
group=0, group=0,
frame=frame_number, frame=frame_number,
@ -44,7 +44,7 @@ def _import_task(dataset, task_data):
shape = task_data.TrackedShape( shape = task_data.TrackedShape(
type='rectangle', type='rectangle',
points=ann.points, points=ann.points,
occluded=ann.attributes.get('occluded') == True, occluded=ann.attributes.get('occluded') is True,
outside=False, outside=False,
keyframe=True, keyframe=True,
z_order=ann.z_order, z_order=ann.z_order,

@ -52,7 +52,7 @@ def _import_task(dataset, task_data):
shape = task_data.TrackedShape( shape = task_data.TrackedShape(
type='polygon', type='polygon',
points=ann.points, points=ann.points,
occluded=ann.attributes.get('occluded') == True, occluded=ann.attributes.get('occluded') is True,
outside=False, outside=False,
keyframe=True, keyframe=True,
z_order=ann.z_order, z_order=ann.z_order,

@ -19,7 +19,6 @@ from .registry import exporter, importer
@exporter(name='Kitti Raw Format', ext='ZIP', version='1.0', dimension=DimensionType.DIM_3D) @exporter(name='Kitti Raw Format', ext='ZIP', version='1.0', dimension=DimensionType.DIM_3D)
def _export_images(dst_file, task_data, save_images=False): def _export_images(dst_file, task_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor( dataset = Dataset.from_extractors(GetCVATDataExtractor(
task_data, include_images=save_images, format_type="kitti_raw", dimension=DimensionType.DIM_3D), env=dm_env) task_data, include_images=save_images, format_type="kitti_raw", dimension=DimensionType.DIM_3D), env=dm_env)
@ -33,14 +32,10 @@ def _export_images(dst_file, task_data, save_images=False):
def _import(src_file, instance_data, load_data_callback=None): def _import(src_file, instance_data, load_data_callback=None):
with TemporaryDirectory() as tmp_dir: with TemporaryDirectory() as tmp_dir:
if zipfile.is_zipfile(src_file): if zipfile.is_zipfile(src_file):
zipfile.ZipFile(src_file).extractall(tmp_dir) zipfile.ZipFile(src_file).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'kitti_raw', env=dm_env)
dataset = Dataset.import_from(
tmp_dir, 'kitti_raw', env=dm_env)
else: else:
dataset = Dataset.import_from(src_file.name, 'kitti_raw', env=dm_env)
dataset = Dataset.import_from(
src_file.name, 'kitti_raw', env=dm_env)
if load_data_callback is not None: if load_data_callback is not None:
load_data_callback(dataset, instance_data) load_data_callback(dataset, instance_data)
import_dm_annotations(dataset, instance_data) import_dm_annotations(dataset, instance_data)

@ -34,4 +34,4 @@ def bulk_create(db_model, objects, flt_param):
else: else:
return db_model.objects.bulk_create(objects) return db_model.objects.bulk_create(objects)
return [] return []

@ -463,7 +463,9 @@ def update_states():
try: try:
get(db_git.task_id, db_user) get(db_git.task_id, db_user)
except Exception: except Exception:
slogger.glob("Exception occurred during a status updating for db_git with tid: {}".format(db_git.task_id)) slogger.glob.exception("Exception occurred during a status "
"updating for db_git with tid: {}".format(db_git.task_id),
exc_info=True)
@transaction.atomic @transaction.atomic
def _onsave(jid, data, action): def _onsave(jid, data, action):

@ -1,3 +1,3 @@
# Copyright (C) 2018 Intel Corporation # Copyright (C) 2018 Intel Corporation
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT

@ -1,3 +1,3 @@
# Copyright (C) 2018 Intel Corporation # Copyright (C) 2018 Intel Corporation
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT

@ -494,7 +494,7 @@ class FragmentMediaReader:
if idx < self._start_chunk_frame_number: if idx < self._start_chunk_frame_number:
continue continue
elif idx < self._end_chunk_frame_number and \ elif idx < self._end_chunk_frame_number and \
not ((idx - self._start_chunk_frame_number) % self._step): not (idx - self._start_chunk_frame_number) % self._step:
frame_range.append(idx) frame_range.append(idx)
elif (idx - self._start_chunk_frame_number) % self._step: elif (idx - self._start_chunk_frame_number) % self._step:
continue continue
@ -654,20 +654,20 @@ class Mpeg4ChunkWriter(IChunkWriter):
} }
def _create_av_container(self, path, w, h, rate, options, f='mp4'): def _create_av_container(self, path, w, h, rate, options, f='mp4'):
# x264 requires width and height must be divisible by 2 for yuv420p # x264 requires width and height must be divisible by 2 for yuv420p
if h % 2: if h % 2:
h += 1 h += 1
if w % 2: if w % 2:
w += 1 w += 1
container = av.open(path, 'w',format=f) container = av.open(path, 'w',format=f)
video_stream = container.add_stream(self._codec_name, rate=rate) video_stream = container.add_stream(self._codec_name, rate=rate)
video_stream.pix_fmt = "yuv420p" video_stream.pix_fmt = "yuv420p"
video_stream.width = w video_stream.width = w
video_stream.height = h video_stream.height = h
video_stream.options = options video_stream.options = options
return container, video_stream return container, video_stream
def save_as_chunk(self, images, chunk_path): def save_as_chunk(self, images, chunk_path):
if not images: if not images:
@ -779,7 +779,7 @@ def _is_zip(path):
# 'mode': 'annotation' or 'interpolation' - mode of task that should be created. # 'mode': 'annotation' or 'interpolation' - mode of task that should be created.
# 'unique': True or False - describes how the type can be combined with other. # 'unique': True or False - describes how the type can be combined with other.
# True - only one item of this type and no other is allowed # True - only one item of this type and no other is allowed
# False - this media types can be combined with other which have unique == False # False - this media types can be combined with other which have unique is False
MEDIA_TYPES = { MEDIA_TYPES = {
'image': { 'image': {

@ -14,4 +14,4 @@ class TusUploadParser(BaseParser):
# exception because a parser for the request with the non-standard # exception because a parser for the request with the non-standard
# content media type isn't defined. # content media type isn't defined.
# https://github.com/imtapps/django-rest-framework/blob/master/docs/api-guide/parsers.md # https://github.com/imtapps/django-rest-framework/blob/master/docs/api-guide/parsers.md
return {} return {}

@ -5,4 +5,4 @@
from rest_framework.renderers import JSONRenderer from rest_framework.renderers import JSONRenderer
class CVATAPIRenderer(JSONRenderer): class CVATAPIRenderer(JSONRenderer):
media_type = 'application/vnd.cvat+json' media_type = 'application/vnd.cvat+json'

@ -2,4 +2,4 @@
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
from .schema import * from .schema import * # force import of extensions

@ -23,4 +23,4 @@ class CustomGroupAdmin(GroupAdmin):
admin.site.unregister(User) admin.site.unregister(User)
admin.site.unregister(Group) admin.site.unregister(Group)
admin.site.register(User, CustomUserAdmin) admin.site.register(User, CustomUserAdmin)
admin.site.register(Group, CustomGroupAdmin) admin.site.register(Group, CustomGroupAdmin)

@ -41,4 +41,4 @@ class SignatureAuthenticationScheme(OpenApiAuthenticationExtension):
'type': 'apiKey', 'type': 'apiKey',
'in': 'query', 'in': 'query',
'name': 'sign', 'name': 'sign',
} }

@ -38,4 +38,4 @@ if settings.IAM_TYPE == 'BASIC':
name='account_email_verification_sent'), name='account_email_verification_sent'),
] ]
urlpatterns = [path('auth/', include(urlpatterns))] urlpatterns = [path('auth/', include(urlpatterns))]

@ -32,13 +32,13 @@ def get_context(request):
org_id = request.GET.get('org_id') org_id = request.GET.get('org_id')
org_header = request.headers.get('X-Organization') org_header = request.headers.get('X-Organization')
if org_id != None and (org_slug != None or org_header != None): if org_id is not None and (org_slug is not None or org_header is not None):
raise BadRequest('You cannot specify "org_id" query parameter with ' + raise BadRequest('You cannot specify "org_id" query parameter with '
'"org" query parameter or "X-Organization" HTTP header at the same time.') '"org" query parameter or "X-Organization" HTTP header at the same time.')
if org_slug != None and org_header != None and org_slug != org_header: if org_slug is not None and org_header is not None and org_slug != org_header:
raise BadRequest('You cannot specify "org" query parameter and ' + raise BadRequest('You cannot specify "org" query parameter and '
'"X-Organization" HTTP header with different values.') '"X-Organization" HTTP header with different values.')
org_slug = org_slug if org_slug != None else org_header org_slug = org_slug if org_slug is not None else org_header
org_filter = None org_filter = None
if org_slug: if org_slug:

@ -382,7 +382,7 @@ class LambdaQueue:
def fetch_job(self, pk): def fetch_job(self, pk):
queue = self._get_queue() queue = self._get_queue()
job = queue.fetch_job(pk) job = queue.fetch_job(pk)
if job == None or not job.meta.get("lambda"): if job is None or not job.meta.get("lambda"):
raise ValidationError("{} lambda job is not found".format(pk), raise ValidationError("{} lambda job is not found".format(pk),
code=status.HTTP_404_NOT_FOUND) code=status.HTTP_404_NOT_FOUND)

@ -199,4 +199,4 @@ class InvitationViewSet(viewsets.ModelViewSet):
if 'accepted' in self.request.query_params: if 'accepted' in self.request.query_params:
serializer.instance.accept() serializer.instance.accept()
else: else:
super().perform_update(serializer) super().perform_update(serializer)

@ -1,7 +1,7 @@
from django.apps import apps from django.apps import apps
if apps.is_installed('silk'): if apps.is_installed('silk'):
from silk.profiling.profiler import silk_profile from silk.profiling.profiler import silk_profile # pylint: disable=unused-import
else: else:
from functools import wraps from functools import wraps
def silk_profile(name=None): def silk_profile(name=None):
@ -10,4 +10,4 @@ else:
def wrapped(*args, **kwargs): def wrapped(*args, **kwargs):
return f(*args, **kwargs) return f(*args, **kwargs)
return wrapped return wrapped
return profile return profile

@ -213,7 +213,7 @@ def org_staff(memberships):
return set() return set()
else: else:
return set(m['user']['id'] for m in memberships return set(m['user']['id'] for m in memberships
if m['role'] in ['maintainer', 'owner'] and m['user'] != None if m['role'] in ['maintainer', 'owner'] and m['user'] is not None
and m['organization'] == org_id) and m['organization'] == org_id)
return find return find
@ -224,7 +224,7 @@ def is_org_member(memberships):
return True return True
else: else:
return user_id in set(m['user']['id'] for m in memberships return user_id in set(m['user']['id'] for m in memberships
if m['user'] != None and m['organization'] == org_id) if m['user'] is not None and m['organization'] == org_id)
return check return check
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
@ -273,4 +273,4 @@ def filter_tasks_with_shapes(annotations):
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
def tasks_with_shapes(tasks, filter_tasks_with_shapes): def tasks_with_shapes(tasks, filter_tasks_with_shapes):
return filter_tasks_with_shapes(tasks) return filter_tasks_with_shapes(tasks)

@ -22,7 +22,7 @@ class TestCreateInvitations:
@staticmethod @staticmethod
def get_non_member_users(memberships, users): def get_non_member_users(memberships, users):
organization_users = set(m['user']['id'] for m in memberships if m['user'] != None) organization_users = set(m['user']['id'] for m in memberships if m['user'] is not None)
non_member_users = [u for u in users if u['id'] not in organization_users] non_member_users = [u for u in users if u['id'] not in organization_users]
return non_member_users return non_member_users
@ -30,7 +30,7 @@ class TestCreateInvitations:
@staticmethod @staticmethod
def get_member(role, memberships, org_id): def get_member(role, memberships, org_id):
member = [m['user'] for m in memberships if m['role'] == role and member = [m['user'] for m in memberships if m['role'] == role and
m['organization'] == org_id and m['user'] != None][0] m['organization'] == org_id and m['user'] is not None][0]
return member return member

@ -1,4 +1,4 @@
# Copyright (C) 2021 Intel Corporation # Copyright (C) 2021 Intel Corporation
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
from .core import VideoManifestManager, ImageManifestManager, is_manifest from .core import VideoManifestManager, ImageManifestManager, is_manifest

@ -522,7 +522,7 @@ class VideoManifestManager(_ManifestManager):
@property @property
def data(self): def data(self):
return (self.video_name) return self.video_name
def get_subset(self, subset_names): def get_subset(self, subset_names):
raise NotImplementedError() raise NotImplementedError()

@ -57,7 +57,7 @@ def main():
# If the source is a glob expression, we need additional processing # If the source is a glob expression, we need additional processing
abs_root = source abs_root = source
while abs_root and re.search('[*?\[\]]', abs_root): while abs_root and re.search(r'[*?\[\]]', abs_root):
abs_root = os.path.split(abs_root)[0] abs_root = os.path.split(abs_root)[0]
related_images = detect_related_images(sources, abs_root) related_images = detect_related_images(sources, abs_root)

Loading…
Cancel
Save