Refactoring

main
Maya 6 years ago
parent 43c2c6ea87
commit 2144c4aadd

@ -21,14 +21,6 @@ class CacheInteraction:
self.save_chunk(db_data.id, chunk_number, quality, chunk, tag)
return chunk, tag
def get_buff(self, chunk_number, quality, db_data):
chunk, tag = self._cache.get('{}_{}_{}'.format(db_data.id, chunk_number, quality), tag=True)
if not chunk:
chunk, tag = self.prepare_chunk_buff(db_data, quality, chunk_number)
self.save_chunk(db_data.id, chunk_number, quality, chunk, tag)
return chunk
def prepare_chunk_buff(self, db_data, quality, chunk_number):
from cvat.apps.engine.frame_provider import FrameProvider
extractor_classes = {
@ -37,24 +29,22 @@ class CacheInteraction:
}
image_quality = 100 if extractor_classes[quality] in [Mpeg4ChunkWriter, ZipChunkWriter] else db_data.image_quality
file_extension = 'mp4' if extractor_classes[quality] in [Mpeg4ChunkWriter, Mpeg4CompressedChunkWriter] else 'jpeg'
mime_type = 'video/mp4' if extractor_classes[quality] in [Mpeg4ChunkWriter, Mpeg4CompressedChunkWriter] else 'application/zip'
extractor = extractor_classes[quality](image_quality)
#if 'interpolation' == task_mode:
if os.path.exists(db_data.get_meta_path()):
meta = PrepareInfo(source_path=os.path.join(db_data.get_upload_dirname(), db_data.video.path),
meta_path=db_data.get_meta_path())
frames = []
for frame in meta.decode_needed_frames(chunk_number, db_data):#db_data.chunk_size
for frame in meta.decode_needed_frames(chunk_number, db_data):
frames.append(frame)
buff = extractor.save_as_chunk_to_buff(frames, file_extension)
buff = extractor.save_as_chunk_to_buff(frames)
else:
img_paths = None
with open(db_data.get_dummy_chunk_path(chunk_number), 'r') as dummy_file:
img_paths = [os.path.join(db_data.get_upload_dirname(), line.strip()) for line in dummy_file]
buff = extractor.save_as_chunk_to_buff(img_paths, file_extension)
buff = extractor.save_as_chunk_to_buff(img_paths)
return buff, mime_type
def save_chunk(self, db_data_id, chunk_number, quality, buff, mime_type):

@ -66,9 +66,8 @@ class FrameProvider:
return self.chunk_reader
class BuffChunkLoader(ChunkLoader):
def __init__(self, reader_class, path_getter, buff_mime_getter, quality, db_data):
def __init__(self, reader_class, path_getter, quality, db_data):
super().__init__(reader_class, path_getter)
self.get_chunk = buff_mime_getter
self.quality = quality
self.db_data = db_data
@ -76,7 +75,7 @@ class FrameProvider:
if self.chunk_id != chunk_id:
self.chunk_id = chunk_id
self.chunk_reader = RandomAccessIterator(
self.reader_class([self.get_chunk_path(chunk_id, self.quality, self.db_data)]))
self.reader_class([self.get_chunk_path(chunk_id, self.quality, self.db_data)[0]]))
return self.chunk_reader
def __init__(self, db_data):
@ -93,13 +92,11 @@ class FrameProvider:
self._loaders[self.Quality.COMPRESSED] = self.BuffChunkLoader(
reader_class[db_data.compressed_chunk_type],
cache.get_buff,
cache.get_buff_mime,
self.Quality.COMPRESSED,
self._db_data)
self._loaders[self.Quality.ORIGINAL] = self.BuffChunkLoader(
reader_class[db_data.original_chunk_type],
cache.get_buff,
cache.get_buff_mime,
self.Quality.ORIGINAL,
self._db_data)
@ -161,7 +158,7 @@ class FrameProvider:
def get_chunk(self, chunk_number, quality=Quality.ORIGINAL):
chunk_number = self._validate_chunk_number(chunk_number)
if self._db_data.storage_method == StorageMethodChoice.CACHE:
return self._loaders[quality].get_chunk(chunk_number, quality, self._db_data)
return self._loaders[quality].get_chunk_path(chunk_number, quality, self._db_data)
return self._loaders[quality].get_chunk_path(chunk_number)
def get_frame(self, frame_number, quality=Quality.ORIGINAL,

@ -287,12 +287,12 @@ class ZipChunkWriter(IChunkWriter):
# and does not decode it to know img size.
return []
def save_as_chunk_to_buff(self, images, format_='jpeg'):
def save_as_chunk_to_buff(self, images):
buff = io.BytesIO()
with zipfile.ZipFile(buff, 'w') as zip_file:
for idx, image in enumerate(images):
arcname = '{:06d}.{}'.format(idx, format_)
arcname = '{:06d}.{}'.format(idx, os.path.splitext(image)[1])
if isinstance(image, av.VideoFrame):
zip_file.writestr(arcname, image.to_image().tobytes().getvalue())
else:
@ -312,12 +312,12 @@ class ZipCompressedChunkWriter(IChunkWriter):
return image_sizes
def save_as_chunk_to_buff(self, images, format_='jpeg'):
def save_as_chunk_to_buff(self, images):
buff = io.BytesIO()
with zipfile.ZipFile(buff, 'x') as zip_file:
for idx, image in enumerate(images):
(_, _, image_buf) = self._compress_image(image, self._image_quality)
arcname = '{:06d}.{}'.format(idx, format_)
arcname = '{:06d}.jpeg'.format(idx)
zip_file.writestr(arcname, image_buf.getvalue())
buff.seek(0)
return buff
@ -366,7 +366,7 @@ class Mpeg4ChunkWriter(IChunkWriter):
output_container.close()
return [(input_w, input_h)]
def save_as_chunk_to_buff(self, frames, format_):
def save_as_chunk_to_buff(self, frames):
if not frames:
raise Exception('no images to save')
@ -383,7 +383,7 @@ class Mpeg4ChunkWriter(IChunkWriter):
"crf": str(self._image_quality),
"preset": "ultrafast",
},
f=format_,
f='mp4',
)
for frame in frames:
@ -454,7 +454,7 @@ class Mpeg4CompressedChunkWriter(Mpeg4ChunkWriter):
output_container.close()
return [(input_w, input_h)]
def save_as_chunk_to_buff(self, frames, format_):
def save_as_chunk_to_buff(self, frames):
if not frames:
raise Exception('no images to save')
@ -482,7 +482,7 @@ class Mpeg4CompressedChunkWriter(Mpeg4ChunkWriter):
'wpredp': '0',
'flags': '-loop'
},
f=format_,
f='mp4',
)
for frame in frames:

@ -93,7 +93,7 @@ class PrepareInfo(WorkWithVideo):
key_frames_copy = self.key_frames.copy()
for _, key_frame in key_frames_copy.items():
for index, key_frame in key_frames_copy.items():
container.seek(offset=key_frame.pts, stream=video_stream)
flag = True
for packet in container.demux(video_stream):

@ -304,9 +304,7 @@ def _create_thread(tid, data):
frame = meta_info.key_frames.get(next(iter(meta_info.key_frames)))
video_size = (frame.width, frame.height)
except AssertionError as ex:
db_data.storage_method = StorageMethodChoice.FILE_SYSTEM
except Exception as ex:
except Exception:
db_data.storage_method = StorageMethodChoice.FILE_SYSTEM
else:#images,archive

@ -408,7 +408,6 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
if data_type == 'chunk':
data_id = int(data_id)
quality = data_quality
data_quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL

Loading…
Cancel
Save