Data streaming using chunks (#1007)
Huge feature (200+ commits from different developers). It completely changes layout of data (please expect very long DB migration process if you have a lot of tasks). The primary idea is to send data as zip chunks (e.g. 36 images in one chunk) or encoded video chunks and decode them on the client side. It helps to solve the problem with latency when you try to view a separate frame in the UI quickly (play mode). Another important feature of the patch is to provide access to the original images. Thus for annotations the client uses compressed chunks but if you want to export a dataset Datumaro will use original chunks (but video will be decoded with original quality and encoded with maximum/optimal quality in any case).main
parent
ecad0231c9
commit
e7808cfb03
@ -1,4 +1,5 @@
|
||||
exclude_paths:
|
||||
- '**/3rdparty/**'
|
||||
- '**/engine/js/cvat-core.min.js'
|
||||
- '**/engine/js/unzip_imgs.js'
|
||||
- CHANGELOG.md
|
||||
|
||||
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
/* global
|
||||
require:false
|
||||
*/
|
||||
|
||||
const Axios = require('axios');
|
||||
|
||||
Axios.defaults.withCredentials = true;
|
||||
Axios.defaults.xsrfHeaderName = 'X-CSRFTOKEN';
|
||||
Axios.defaults.xsrfCookieName = 'csrftoken';
|
||||
|
||||
|
||||
onmessage = (e) => {
|
||||
Axios.get(e.data.url, e.data.config)
|
||||
.then((response) => {
|
||||
postMessage({
|
||||
responseData: response.data,
|
||||
id: e.data.id,
|
||||
isSuccess: true,
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
postMessage({
|
||||
id: e.data.id,
|
||||
error,
|
||||
isSuccess: false,
|
||||
});
|
||||
});
|
||||
};
|
||||
@ -0,0 +1 @@
|
||||
**/3rdparty/*.js
|
||||
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
"env": {
|
||||
"node": false,
|
||||
"browser": true,
|
||||
"es6": true,
|
||||
"jquery": true,
|
||||
"qunit": true,
|
||||
},
|
||||
"parserOptions": {
|
||||
"parser": "babel-eslint",
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": 2018,
|
||||
},
|
||||
"plugins": [
|
||||
"security",
|
||||
"no-unsanitized",
|
||||
"no-unsafe-innerhtml",
|
||||
],
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:security/recommended",
|
||||
"plugin:no-unsanitized/DOM",
|
||||
"airbnb-base",
|
||||
],
|
||||
"rules": {
|
||||
"no-await-in-loop": [0],
|
||||
"global-require": [0],
|
||||
"no-new": [0],
|
||||
"class-methods-use-this": [0],
|
||||
"no-restricted-properties": [0, {
|
||||
"object": "Math",
|
||||
"property": "pow",
|
||||
}],
|
||||
"no-plusplus": [0],
|
||||
"no-param-reassign": [0],
|
||||
"no-underscore-dangle": ["error", { "allowAfterThis": true }],
|
||||
"no-restricted-syntax": [0, {"selector": "ForOfStatement"}],
|
||||
"no-continue": [0],
|
||||
"no-unsafe-innerhtml/no-unsafe-innerhtml": 1,
|
||||
// This rule actual for user input data on the node.js environment mainly.
|
||||
"security/detect-object-injection": 0,
|
||||
"indent": ["warn", 4],
|
||||
"no-useless-constructor": 0,
|
||||
"func-names": [0],
|
||||
"valid-typeof": [0],
|
||||
"no-console": [0], // this rule deprecates console.log, console.warn etc. because "it is not good in production code"
|
||||
"max-classes-per-file": [0],
|
||||
"quotes": ["warn", "single"],
|
||||
},
|
||||
};
|
||||
@ -0,0 +1 @@
|
||||
dist
|
||||
@ -0,0 +1,7 @@
|
||||
# cvat-data module
|
||||
|
||||
```bash
|
||||
npm run build # build with minification
|
||||
npm run build -- --mode=development # build without minification
|
||||
npm run server # run debug server
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,34 @@
|
||||
{
|
||||
"name": "cvat-data",
|
||||
"version": "0.1.0",
|
||||
"description": "",
|
||||
"main": "src/js/cvat-data.js",
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"babel-loader": "^8.0.6",
|
||||
"copy-webpack-plugin": "^5.0.5",
|
||||
"eslint": "^6.4.0",
|
||||
"eslint-config-airbnb-base": "^14.0.0",
|
||||
"eslint-plugin-import": "^2.18.2",
|
||||
"eslint-plugin-no-unsafe-innerhtml": "^1.0.16",
|
||||
"eslint-plugin-no-unsanitized": "^3.0.2",
|
||||
"eslint-plugin-security": "^1.4.0",
|
||||
"nodemon": "^1.19.2",
|
||||
"webpack": "^4.39.3",
|
||||
"webpack-cli": "^3.3.7",
|
||||
"worker-loader": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"async-mutex": "^0.1.4",
|
||||
"jszip": "3.1.5"
|
||||
},
|
||||
"scripts": {
|
||||
"patch": "cd src/js && patch --dry-run --forward -p0 < 3rdparty_patch.diff >> /dev/null && patch -p0 < 3rdparty_patch.diff; true",
|
||||
"build": "npm run patch; webpack --config ./webpack.config.js",
|
||||
"server": "npm run patch; nodemon --watch config --exec 'webpack-dev-server --config ./webpack.config.js --mode=development --open'"
|
||||
},
|
||||
"author": "Intel",
|
||||
"license": "MIT"
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -0,0 +1,88 @@
|
||||
## 3rdparty components
|
||||
|
||||
These files are from the [Broadway.js](https://github.com/mbebenita/Broadway) repository:
|
||||
- Decoder.js
|
||||
- mp4.js
|
||||
- avc.wasm
|
||||
|
||||
### Why do we store them here?
|
||||
|
||||
Authors don't provide an npm package, so we need to store these components in our repository.
|
||||
We use this dependency to decode video chunks from a server and split them to frames on client side.
|
||||
|
||||
We need to run this package in node environent (for example for debug, or for running unit tests).
|
||||
But there aren't any ways to do that (even with syntetic environment, provided for example by the package ``browser-env``).
|
||||
For example there are issues with canvas using (webpack doesn't work with binary canvas package for node-js) and others.
|
||||
So, we have solved to write patch file for this library.
|
||||
It modifies source code a little to support our scenario of using.
|
||||
|
||||
### How to build awc.wasm and Decoder.js
|
||||
1. Clone Emscripten SDK, install and activate the latest fastcomp SDK:
|
||||
```sh
|
||||
git clone https://github.com/emscripten-core/emsdk.git && cd emsdk
|
||||
```
|
||||
```sh
|
||||
./emsdk install latest-fastcomp
|
||||
```
|
||||
```sh
|
||||
./emsdk activate latest-fastcomp
|
||||
```
|
||||
|
||||
1. Clone Broadway.js
|
||||
```sh
|
||||
git clone https://github.com/mbebenita/Broadway.git && cd Broadway/Decoder
|
||||
```
|
||||
|
||||
1. Edit `make.py`:
|
||||
- Remove or comment the following options:
|
||||
`'-s', 'NO_BROWSER=1',`\
|
||||
`'-s', 'PRECISE_I64_MATH=0',`
|
||||
- Remove `"HEAP8", "HEAP16", "HEAP32"` from the `EXPORTED_FUNCTIONS` list.
|
||||
- Increase total memory to make possible decode 4k videos
|
||||
(or try to enable `ALLOW_MEMORY_GROWTH`, but this option has not been tested):\
|
||||
`'-s', 'TOTAL_MEMORY=' + str(100*1024*1024),`
|
||||
- Add the following options:\
|
||||
`'-s', "ENVIRONMENT='worker'",`\
|
||||
`'-s', 'WASM=1',`
|
||||
|
||||
1. Activate emsdk environment and build Broadway.js:
|
||||
```sh
|
||||
. /tmp/emsdk/emsdk_env.sh
|
||||
```
|
||||
```sh
|
||||
python2 make.py
|
||||
```
|
||||
|
||||
1. Copy the following files to cvat-data 3rdparty source folder:
|
||||
```sh
|
||||
cd ..
|
||||
```
|
||||
```sh
|
||||
cp Player/avc.wasm Player/Decoder.js Player/mp4.js <CVAT_FOLDER>/cvat-data/src/
|
||||
```
|
||||
```sh
|
||||
js/3rdparty
|
||||
```
|
||||
|
||||
### How work with a patch file
|
||||
```bash
|
||||
# from cvat-data/src/js
|
||||
cp -r 3rdparty 3rdparty_edited
|
||||
# change 3rdparty edited as we need
|
||||
diff -u 3rdparty 3rdparty_edited/ > 3rdparty_patch.diff
|
||||
patch -p0 < 3rdparty_patch.diff # apply patch from cvat-data/src/js
|
||||
```
|
||||
|
||||
Also these files have been added to ignore for git in all future revisions:
|
||||
```bash
|
||||
# from cvat-data dir
|
||||
git update-index --skip-worktree src/js/3rdparty/*.js
|
||||
```
|
||||
|
||||
This behaviour can be reset with:
|
||||
```bash
|
||||
# from cvat-data dir
|
||||
git update-index --no-skip-worktree src/js/3rdparty/*.js
|
||||
```
|
||||
|
||||
[Stackoverflow issue](https://stackoverflow.com/questions/4348590/how-can-i-make-git-ignore-future-revisions-to-a-file)
|
||||
Binary file not shown.
@ -0,0 +1,977 @@
|
||||
module.exports = (function(){
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
function assert(condition, message) {
|
||||
if (!condition) {
|
||||
error(message);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Represents a 2-dimensional size value.
|
||||
*/
|
||||
var Size = (function size() {
|
||||
function constructor(w, h) {
|
||||
this.w = w;
|
||||
this.h = h;
|
||||
}
|
||||
constructor.prototype = {
|
||||
toString: function () {
|
||||
return "(" + this.w + ", " + this.h + ")";
|
||||
},
|
||||
getHalfSize: function() {
|
||||
return new Size(this.w >>> 1, this.h >>> 1);
|
||||
},
|
||||
length: function() {
|
||||
return this.w * this.h;
|
||||
}
|
||||
};
|
||||
return constructor;
|
||||
})();
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
var Bytestream = (function BytestreamClosure() {
|
||||
function constructor(arrayBuffer, start, length) {
|
||||
this.bytes = new Uint8Array(arrayBuffer);
|
||||
this.start = start || 0;
|
||||
this.pos = this.start;
|
||||
this.end = (start + length) || this.bytes.length;
|
||||
}
|
||||
constructor.prototype = {
|
||||
get length() {
|
||||
return this.end - this.start;
|
||||
},
|
||||
get position() {
|
||||
return this.pos;
|
||||
},
|
||||
get remaining() {
|
||||
return this.end - this.pos;
|
||||
},
|
||||
readU8Array: function (length) {
|
||||
if (this.pos > this.end - length)
|
||||
return null;
|
||||
var res = this.bytes.subarray(this.pos, this.pos + length);
|
||||
this.pos += length;
|
||||
return res;
|
||||
},
|
||||
readU32Array: function (rows, cols, names) {
|
||||
cols = cols || 1;
|
||||
if (this.pos > this.end - (rows * cols) * 4)
|
||||
return null;
|
||||
if (cols == 1) {
|
||||
var array = new Uint32Array(rows);
|
||||
for (var i = 0; i < rows; i++) {
|
||||
array[i] = this.readU32();
|
||||
}
|
||||
return array;
|
||||
} else {
|
||||
var array = new Array(rows);
|
||||
for (var i = 0; i < rows; i++) {
|
||||
var row = null;
|
||||
if (names) {
|
||||
row = {};
|
||||
for (var j = 0; j < cols; j++) {
|
||||
row[names[j]] = this.readU32();
|
||||
}
|
||||
} else {
|
||||
row = new Uint32Array(cols);
|
||||
for (var j = 0; j < cols; j++) {
|
||||
row[j] = this.readU32();
|
||||
}
|
||||
}
|
||||
array[i] = row;
|
||||
}
|
||||
return array;
|
||||
}
|
||||
},
|
||||
read8: function () {
|
||||
return this.readU8() << 24 >> 24;
|
||||
},
|
||||
readU8: function () {
|
||||
if (this.pos >= this.end)
|
||||
return null;
|
||||
return this.bytes[this.pos++];
|
||||
},
|
||||
read16: function () {
|
||||
return this.readU16() << 16 >> 16;
|
||||
},
|
||||
readU16: function () {
|
||||
if (this.pos >= this.end - 1)
|
||||
return null;
|
||||
var res = this.bytes[this.pos + 0] << 8 | this.bytes[this.pos + 1];
|
||||
this.pos += 2;
|
||||
return res;
|
||||
},
|
||||
read24: function () {
|
||||
return this.readU24() << 8 >> 8;
|
||||
},
|
||||
readU24: function () {
|
||||
var pos = this.pos;
|
||||
var bytes = this.bytes;
|
||||
if (pos > this.end - 3)
|
||||
return null;
|
||||
var res = bytes[pos + 0] << 16 | bytes[pos + 1] << 8 | bytes[pos + 2];
|
||||
this.pos += 3;
|
||||
return res;
|
||||
},
|
||||
peek32: function (advance) {
|
||||
var pos = this.pos;
|
||||
var bytes = this.bytes;
|
||||
if (pos > this.end - 4)
|
||||
return null;
|
||||
var res = bytes[pos + 0] << 24 | bytes[pos + 1] << 16 | bytes[pos + 2] << 8 | bytes[pos + 3];
|
||||
if (advance) {
|
||||
this.pos += 4;
|
||||
}
|
||||
return res;
|
||||
},
|
||||
read32: function () {
|
||||
return this.peek32(true);
|
||||
},
|
||||
readU32: function () {
|
||||
return this.peek32(true) >>> 0;
|
||||
},
|
||||
read4CC: function () {
|
||||
var pos = this.pos;
|
||||
if (pos > this.end - 4)
|
||||
return null;
|
||||
var res = "";
|
||||
for (var i = 0; i < 4; i++) {
|
||||
res += String.fromCharCode(this.bytes[pos + i]);
|
||||
}
|
||||
this.pos += 4;
|
||||
return res;
|
||||
},
|
||||
readFP16: function () {
|
||||
return this.read32() / 65536;
|
||||
},
|
||||
readFP8: function () {
|
||||
return this.read16() / 256;
|
||||
},
|
||||
readISO639: function () {
|
||||
var bits = this.readU16();
|
||||
var res = "";
|
||||
for (var i = 0; i < 3; i++) {
|
||||
var c = (bits >>> (2 - i) * 5) & 0x1f;
|
||||
res += String.fromCharCode(c + 0x60);
|
||||
}
|
||||
return res;
|
||||
},
|
||||
readUTF8: function (length) {
|
||||
var res = "";
|
||||
for (var i = 0; i < length; i++) {
|
||||
res += String.fromCharCode(this.readU8());
|
||||
}
|
||||
return res;
|
||||
},
|
||||
readPString: function (max) {
|
||||
var len = this.readU8();
|
||||
assert (len <= max);
|
||||
var res = this.readUTF8(len);
|
||||
this.reserved(max - len - 1, 0);
|
||||
return res;
|
||||
},
|
||||
skip: function (length) {
|
||||
this.seek(this.pos + length);
|
||||
},
|
||||
reserved: function (length, value) {
|
||||
for (var i = 0; i < length; i++) {
|
||||
assert (this.readU8() == value);
|
||||
}
|
||||
},
|
||||
seek: function (index) {
|
||||
if (index < 0 || index > this.end) {
|
||||
error("Index out of bounds (bounds: [0, " + this.end + "], index: " + index + ").");
|
||||
}
|
||||
this.pos = index;
|
||||
},
|
||||
subStream: function (start, length) {
|
||||
return new Bytestream(this.bytes.buffer, start, length);
|
||||
}
|
||||
};
|
||||
return constructor;
|
||||
})();
|
||||
|
||||
|
||||
var PARANOID = true; // Heavy-weight assertions.
|
||||
|
||||
/**
|
||||
* Reads an mp4 file and constructs a object graph that corresponds to the box/atom
|
||||
* structure of the file. Mp4 files are based on the ISO Base Media format, which in
|
||||
* turn is based on the Apple Quicktime format. The Quicktime spec is available at:
|
||||
* http://developer.apple.com/library/mac/#documentation/QuickTime/QTFF. An mp4 spec
|
||||
* also exists, but I cannot find it freely available.
|
||||
*
|
||||
* Mp4 files contain a tree of boxes (or atoms in Quicktime). The general structure
|
||||
* is as follows (in a pseudo regex syntax):
|
||||
*
|
||||
* Box / Atom Structure:
|
||||
*
|
||||
* [size type [version flags] field* box*]
|
||||
* <32> <4C> <--8--> <24-> <-?-> <?>
|
||||
* <------------- box size ------------>
|
||||
*
|
||||
* The box size indicates the entire size of the box and its children, we can use it
|
||||
* to skip over boxes that are of no interest. Each box has a type indicated by a
|
||||
* four character code (4C), this describes how the box should be parsed and is also
|
||||
* used as an object key name in the resulting box tree. For example, the expression:
|
||||
* "moov.trak[0].mdia.minf" can be used to access individual boxes in the tree based
|
||||
* on their 4C name. If two or more boxes with the same 4C name exist in a box, then
|
||||
* an array is built with that name.
|
||||
*
|
||||
*/
|
||||
var MP4Reader = (function reader() {
|
||||
var BOX_HEADER_SIZE = 8;
|
||||
var FULL_BOX_HEADER_SIZE = BOX_HEADER_SIZE + 4;
|
||||
|
||||
function constructor(stream) {
|
||||
this.stream = stream;
|
||||
this.tracks = {};
|
||||
}
|
||||
|
||||
constructor.prototype = {
|
||||
readBoxes: function (stream, parent) {
|
||||
while (stream.peek32()) {
|
||||
var child = this.readBox(stream);
|
||||
if (child.type in parent) {
|
||||
var old = parent[child.type];
|
||||
if (!(old instanceof Array)) {
|
||||
parent[child.type] = [old];
|
||||
}
|
||||
parent[child.type].push(child);
|
||||
} else {
|
||||
parent[child.type] = child;
|
||||
}
|
||||
}
|
||||
},
|
||||
readBox: function readBox(stream) {
|
||||
var box = { offset: stream.position };
|
||||
|
||||
function readHeader() {
|
||||
box.size = stream.readU32();
|
||||
box.type = stream.read4CC();
|
||||
}
|
||||
|
||||
function readFullHeader() {
|
||||
box.version = stream.readU8();
|
||||
box.flags = stream.readU24();
|
||||
}
|
||||
|
||||
function remainingBytes() {
|
||||
return box.size - (stream.position - box.offset);
|
||||
}
|
||||
|
||||
function skipRemainingBytes () {
|
||||
stream.skip(remainingBytes());
|
||||
}
|
||||
|
||||
var readRemainingBoxes = function () {
|
||||
var subStream = stream.subStream(stream.position, remainingBytes());
|
||||
this.readBoxes(subStream, box);
|
||||
stream.skip(subStream.length);
|
||||
}.bind(this);
|
||||
|
||||
readHeader();
|
||||
|
||||
switch (box.type) {
|
||||
case 'ftyp':
|
||||
box.name = "File Type Box";
|
||||
box.majorBrand = stream.read4CC();
|
||||
box.minorVersion = stream.readU32();
|
||||
box.compatibleBrands = new Array((box.size - 16) / 4);
|
||||
for (var i = 0; i < box.compatibleBrands.length; i++) {
|
||||
box.compatibleBrands[i] = stream.read4CC();
|
||||
}
|
||||
break;
|
||||
case 'moov':
|
||||
box.name = "Movie Box";
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'mvhd':
|
||||
box.name = "Movie Header Box";
|
||||
readFullHeader();
|
||||
assert (box.version == 0);
|
||||
box.creationTime = stream.readU32();
|
||||
box.modificationTime = stream.readU32();
|
||||
box.timeScale = stream.readU32();
|
||||
box.duration = stream.readU32();
|
||||
box.rate = stream.readFP16();
|
||||
box.volume = stream.readFP8();
|
||||
stream.skip(10);
|
||||
box.matrix = stream.readU32Array(9);
|
||||
stream.skip(6 * 4);
|
||||
box.nextTrackId = stream.readU32();
|
||||
break;
|
||||
case 'trak':
|
||||
box.name = "Track Box";
|
||||
readRemainingBoxes();
|
||||
this.tracks[box.tkhd.trackId] = new Track(this, box);
|
||||
break;
|
||||
case 'tkhd':
|
||||
box.name = "Track Header Box";
|
||||
readFullHeader();
|
||||
assert (box.version == 0);
|
||||
box.creationTime = stream.readU32();
|
||||
box.modificationTime = stream.readU32();
|
||||
box.trackId = stream.readU32();
|
||||
stream.skip(4);
|
||||
box.duration = stream.readU32();
|
||||
stream.skip(8);
|
||||
box.layer = stream.readU16();
|
||||
box.alternateGroup = stream.readU16();
|
||||
box.volume = stream.readFP8();
|
||||
stream.skip(2);
|
||||
box.matrix = stream.readU32Array(9);
|
||||
box.width = stream.readFP16();
|
||||
box.height = stream.readFP16();
|
||||
break;
|
||||
case 'mdia':
|
||||
box.name = "Media Box";
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'mdhd':
|
||||
box.name = "Media Header Box";
|
||||
readFullHeader();
|
||||
assert (box.version == 0);
|
||||
box.creationTime = stream.readU32();
|
||||
box.modificationTime = stream.readU32();
|
||||
box.timeScale = stream.readU32();
|
||||
box.duration = stream.readU32();
|
||||
box.language = stream.readISO639();
|
||||
stream.skip(2);
|
||||
break;
|
||||
case 'hdlr':
|
||||
box.name = "Handler Reference Box";
|
||||
readFullHeader();
|
||||
stream.skip(4);
|
||||
box.handlerType = stream.read4CC();
|
||||
stream.skip(4 * 3);
|
||||
var bytesLeft = box.size - 32;
|
||||
if (bytesLeft > 0) {
|
||||
box.name = stream.readUTF8(bytesLeft);
|
||||
}
|
||||
break;
|
||||
case 'minf':
|
||||
box.name = "Media Information Box";
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'stbl':
|
||||
box.name = "Sample Table Box";
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'stsd':
|
||||
box.name = "Sample Description Box";
|
||||
readFullHeader();
|
||||
box.sd = [];
|
||||
var entries = stream.readU32();
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'avc1':
|
||||
stream.reserved(6, 0);
|
||||
box.dataReferenceIndex = stream.readU16();
|
||||
assert (stream.readU16() == 0); // Version
|
||||
assert (stream.readU16() == 0); // Revision Level
|
||||
stream.readU32(); // Vendor
|
||||
stream.readU32(); // Temporal Quality
|
||||
stream.readU32(); // Spatial Quality
|
||||
box.width = stream.readU16();
|
||||
box.height = stream.readU16();
|
||||
box.horizontalResolution = stream.readFP16();
|
||||
box.verticalResolution = stream.readFP16();
|
||||
assert (stream.readU32() == 0); // Reserved
|
||||
box.frameCount = stream.readU16();
|
||||
box.compressorName = stream.readPString(32);
|
||||
box.depth = stream.readU16();
|
||||
assert (stream.readU16() == 0xFFFF); // Color Table Id
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'mp4a':
|
||||
stream.reserved(6, 0);
|
||||
box.dataReferenceIndex = stream.readU16();
|
||||
box.version = stream.readU16();
|
||||
stream.skip(2);
|
||||
stream.skip(4);
|
||||
box.channelCount = stream.readU16();
|
||||
box.sampleSize = stream.readU16();
|
||||
box.compressionId = stream.readU16();
|
||||
box.packetSize = stream.readU16();
|
||||
box.sampleRate = stream.readU32() >>> 16;
|
||||
|
||||
// TODO: Parse other version levels.
|
||||
assert (box.version == 0);
|
||||
readRemainingBoxes();
|
||||
break;
|
||||
case 'esds':
|
||||
box.name = "Elementary Stream Descriptor";
|
||||
readFullHeader();
|
||||
// TODO: Do we really need to parse this?
|
||||
skipRemainingBytes();
|
||||
break;
|
||||
case 'avcC':
|
||||
box.name = "AVC Configuration Box";
|
||||
box.configurationVersion = stream.readU8();
|
||||
box.avcProfileIndication = stream.readU8();
|
||||
box.profileCompatibility = stream.readU8();
|
||||
box.avcLevelIndication = stream.readU8();
|
||||
box.lengthSizeMinusOne = stream.readU8() & 3;
|
||||
assert (box.lengthSizeMinusOne == 3, "TODO");
|
||||
var count = stream.readU8() & 31;
|
||||
box.sps = [];
|
||||
for (var i = 0; i < count; i++) {
|
||||
box.sps.push(stream.readU8Array(stream.readU16()));
|
||||
}
|
||||
var count = stream.readU8() & 31;
|
||||
box.pps = [];
|
||||
for (var i = 0; i < count; i++) {
|
||||
box.pps.push(stream.readU8Array(stream.readU16()));
|
||||
}
|
||||
skipRemainingBytes();
|
||||
break;
|
||||
case 'btrt':
|
||||
box.name = "Bit Rate Box";
|
||||
box.bufferSizeDb = stream.readU32();
|
||||
box.maxBitrate = stream.readU32();
|
||||
box.avgBitrate = stream.readU32();
|
||||
break;
|
||||
case 'stts':
|
||||
box.name = "Decoding Time to Sample Box";
|
||||
readFullHeader();
|
||||
box.table = stream.readU32Array(stream.readU32(), 2, ["count", "delta"]);
|
||||
break;
|
||||
case 'stss':
|
||||
box.name = "Sync Sample Box";
|
||||
readFullHeader();
|
||||
box.samples = stream.readU32Array(stream.readU32());
|
||||
break;
|
||||
case 'stsc':
|
||||
box.name = "Sample to Chunk Box";
|
||||
readFullHeader();
|
||||
box.table = stream.readU32Array(stream.readU32(), 3,
|
||||
["firstChunk", "samplesPerChunk", "sampleDescriptionId"]);
|
||||
break;
|
||||
case 'stsz':
|
||||
box.name = "Sample Size Box";
|
||||
readFullHeader();
|
||||
box.sampleSize = stream.readU32();
|
||||
var count = stream.readU32();
|
||||
if (box.sampleSize == 0) {
|
||||
box.table = stream.readU32Array(count);
|
||||
}
|
||||
break;
|
||||
case 'stco':
|
||||
box.name = "Chunk Offset Box";
|
||||
readFullHeader();
|
||||
box.table = stream.readU32Array(stream.readU32());
|
||||
break;
|
||||
case 'smhd':
|
||||
box.name = "Sound Media Header Box";
|
||||
readFullHeader();
|
||||
box.balance = stream.readFP8();
|
||||
stream.reserved(2, 0);
|
||||
break;
|
||||
case 'mdat':
|
||||
box.name = "Media Data Box";
|
||||
assert (box.size >= 8, "Cannot parse large media data yet.");
|
||||
box.data = stream.readU8Array(remainingBytes());
|
||||
break;
|
||||
default:
|
||||
skipRemainingBytes();
|
||||
break;
|
||||
};
|
||||
return box;
|
||||
},
|
||||
read: function () {
|
||||
var start = (new Date).getTime();
|
||||
this.file = {};
|
||||
this.readBoxes(this.stream, this.file);
|
||||
console.info("Parsed stream in " + ((new Date).getTime() - start) + " ms");
|
||||
},
|
||||
traceSamples: function () {
|
||||
var video = this.tracks[1];
|
||||
var audio = this.tracks[2];
|
||||
|
||||
console.info("Video Samples: " + video.getSampleCount());
|
||||
console.info("Audio Samples: " + audio.getSampleCount());
|
||||
|
||||
var vi = 0;
|
||||
var ai = 0;
|
||||
|
||||
for (var i = 0; i < 100; i++) {
|
||||
var vo = video.sampleToOffset(vi);
|
||||
var ao = audio.sampleToOffset(ai);
|
||||
|
||||
var vs = video.sampleToSize(vi, 1);
|
||||
var as = audio.sampleToSize(ai, 1);
|
||||
|
||||
if (vo < ao) {
|
||||
console.info("V Sample " + vi + " Offset : " + vo + ", Size : " + vs);
|
||||
vi ++;
|
||||
} else {
|
||||
console.info("A Sample " + ai + " Offset : " + ao + ", Size : " + as);
|
||||
ai ++;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
return constructor;
|
||||
})();
|
||||
|
||||
var Track = (function track () {
|
||||
function constructor(file, trak) {
|
||||
this.file = file;
|
||||
this.trak = trak;
|
||||
}
|
||||
|
||||
constructor.prototype = {
|
||||
getSampleSizeTable: function () {
|
||||
return this.trak.mdia.minf.stbl.stsz.table;
|
||||
},
|
||||
getSampleCount: function () {
|
||||
return this.getSampleSizeTable().length;
|
||||
},
|
||||
/**
|
||||
* Computes the size of a range of samples, returns zero if length is zero.
|
||||
*/
|
||||
sampleToSize: function (start, length) {
|
||||
var table = this.getSampleSizeTable();
|
||||
var size = 0;
|
||||
for (var i = start; i < start + length; i++) {
|
||||
size += table[i];
|
||||
}
|
||||
return size;
|
||||
},
|
||||
/**
|
||||
* Computes the chunk that contains the specified sample, as well as the offset of
|
||||
* the sample in the computed chunk.
|
||||
*/
|
||||
sampleToChunk: function (sample) {
|
||||
|
||||
/* Samples are grouped in chunks which may contain a variable number of samples.
|
||||
* The sample-to-chunk table in the stsc box describes how samples are arranged
|
||||
* in chunks. Each table row corresponds to a set of consecutive chunks with the
|
||||
* same number of samples and description ids. For example, the following table:
|
||||
*
|
||||
* +-------------+-------------------+----------------------+
|
||||
* | firstChunk | samplesPerChunk | sampleDescriptionId |
|
||||
* +-------------+-------------------+----------------------+
|
||||
* | 1 | 3 | 23 |
|
||||
* | 3 | 1 | 23 |
|
||||
* | 5 | 1 | 24 |
|
||||
* +-------------+-------------------+----------------------+
|
||||
*
|
||||
* describes 5 chunks with a total of (2 * 3) + (2 * 1) + (1 * 1) = 9 samples,
|
||||
* each chunk containing samples 3, 3, 1, 1, 1 in chunk order, or
|
||||
* chunks 1, 1, 1, 2, 2, 2, 3, 4, 5 in sample order.
|
||||
*
|
||||
* This function determines the chunk that contains a specified sample by iterating
|
||||
* over every entry in the table. It also returns the position of the sample in the
|
||||
* chunk which can be used to compute the sample's exact position in the file.
|
||||
*
|
||||
* TODO: Determine if we should memoize this function.
|
||||
*/
|
||||
|
||||
var table = this.trak.mdia.minf.stbl.stsc.table;
|
||||
|
||||
if (table.length === 1) {
|
||||
var row = table[0];
|
||||
assert (row.firstChunk === 1);
|
||||
return {
|
||||
index: Math.floor(sample / row.samplesPerChunk),
|
||||
offset: sample % row.samplesPerChunk
|
||||
};
|
||||
}
|
||||
|
||||
var totalChunkCount = 0;
|
||||
for (var i = 0; i < table.length; i++) {
|
||||
var row = table[i];
|
||||
if (i > 0) {
|
||||
var previousRow = table[i - 1];
|
||||
var previousChunkCount = row.firstChunk - previousRow.firstChunk;
|
||||
var previousSampleCount = previousRow.samplesPerChunk * previousChunkCount;
|
||||
if (sample >= previousSampleCount) {
|
||||
sample -= previousSampleCount;
|
||||
if (i == table.length - 1) {
|
||||
return {
|
||||
index: totalChunkCount + previousChunkCount + Math.floor(sample / row.samplesPerChunk),
|
||||
offset: sample % row.samplesPerChunk
|
||||
};
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
index: totalChunkCount + Math.floor(sample / previousRow.samplesPerChunk),
|
||||
offset: sample % previousRow.samplesPerChunk
|
||||
};
|
||||
}
|
||||
totalChunkCount += previousChunkCount;
|
||||
}
|
||||
}
|
||||
assert(false);
|
||||
},
|
||||
chunkToOffset: function (chunk) {
|
||||
var table = this.trak.mdia.minf.stbl.stco.table;
|
||||
return table[chunk];
|
||||
},
|
||||
sampleToOffset: function (sample) {
|
||||
var res = this.sampleToChunk(sample);
|
||||
var offset = this.chunkToOffset(res.index);
|
||||
return offset + this.sampleToSize(sample - res.offset, res.offset);
|
||||
},
|
||||
/**
|
||||
* Computes the sample at the specified time.
|
||||
*/
|
||||
timeToSample: function (time) {
|
||||
/* In the time-to-sample table samples are grouped by their duration. The count field
|
||||
* indicates the number of consecutive samples that have the same duration. For example,
|
||||
* the following table:
|
||||
*
|
||||
* +-------+-------+
|
||||
* | count | delta |
|
||||
* +-------+-------+
|
||||
* | 4 | 3 |
|
||||
* | 2 | 1 |
|
||||
* | 3 | 2 |
|
||||
* +-------+-------+
|
||||
*
|
||||
* describes 9 samples with a total time of (4 * 3) + (2 * 1) + (3 * 2) = 20.
|
||||
*
|
||||
* This function determines the sample at the specified time by iterating over every
|
||||
* entry in the table.
|
||||
*
|
||||
* TODO: Determine if we should memoize this function.
|
||||
*/
|
||||
var table = this.trak.mdia.minf.stbl.stts.table;
|
||||
var sample = 0;
|
||||
for (var i = 0; i < table.length; i++) {
|
||||
var delta = table[i].count * table[i].delta;
|
||||
if (time >= delta) {
|
||||
time -= delta;
|
||||
sample += table[i].count;
|
||||
} else {
|
||||
return sample + Math.floor(time / table[i].delta);
|
||||
}
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Gets the total time of the track.
|
||||
*/
|
||||
getTotalTime: function () {
|
||||
if (PARANOID) {
|
||||
var table = this.trak.mdia.minf.stbl.stts.table;
|
||||
var duration = 0;
|
||||
for (var i = 0; i < table.length; i++) {
|
||||
duration += table[i].count * table[i].delta;
|
||||
}
|
||||
assert (this.trak.mdia.mdhd.duration == duration);
|
||||
}
|
||||
return this.trak.mdia.mdhd.duration;
|
||||
},
|
||||
getTotalTimeInSeconds: function () {
|
||||
return this.timeToSeconds(this.getTotalTime());
|
||||
},
|
||||
getTimeScale: function () {
|
||||
return this.trak.mdia.mdhd.timeScale;
|
||||
},
|
||||
/**
|
||||
* Converts time units to real time (seconds).
|
||||
*/
|
||||
timeToSeconds: function (time) {
|
||||
return time / this.getTimeScale();
|
||||
},
|
||||
/**
|
||||
* Converts real time (seconds) to time units.
|
||||
*/
|
||||
secondsToTime: function (seconds) {
|
||||
return seconds * this.getTimeScale();
|
||||
},
|
||||
foo: function () {
|
||||
/*
|
||||
for (var i = 0; i < this.getSampleCount(); i++) {
|
||||
var res = this.sampleToChunk(i);
|
||||
console.info("Sample " + i + " -> " + res.index + " % " + res.offset +
|
||||
" @ " + this.chunkToOffset(res.index) +
|
||||
" @@ " + this.sampleToOffset(i));
|
||||
}
|
||||
console.info("Total Time: " + this.timeToSeconds(this.getTotalTime()));
|
||||
var total = this.getTotalTimeInSeconds();
|
||||
for (var i = 50; i < total; i += 0.1) {
|
||||
// console.info("Time: " + i.toFixed(2) + " " + this.secondsToTime(i));
|
||||
|
||||
console.info("Time: " + i.toFixed(2) + " " + this.timeToSample(this.secondsToTime(i)));
|
||||
}
|
||||
*/
|
||||
},
|
||||
/**
|
||||
* AVC samples contain one or more NAL units each of which have a length prefix.
|
||||
* This function returns an array of NAL units without their length prefixes.
|
||||
*/
|
||||
getSampleNALUnits: function (sample) {
|
||||
var bytes = this.file.stream.bytes;
|
||||
var offset = this.sampleToOffset(sample);
|
||||
var end = offset + this.sampleToSize(sample, 1);
|
||||
var nalUnits = [];
|
||||
while(end - offset > 0) {
|
||||
var length = (new Bytestream(bytes.buffer, offset)).readU32();
|
||||
nalUnits.push(bytes.subarray(offset + 4, offset + length + 4));
|
||||
offset = offset + length + 4;
|
||||
}
|
||||
return nalUnits;
|
||||
}
|
||||
};
|
||||
return constructor;
|
||||
})();
|
||||
|
||||
|
||||
// Only add setZeroTimeout to the window object, and hide everything
|
||||
// else in a closure. (http://dbaron.org/log/20100309-faster-timeouts)
|
||||
(function() {
|
||||
var timeouts = [];
|
||||
var messageName = "zero-timeout-message";
|
||||
|
||||
// Like setTimeout, but only takes a function argument. There's
|
||||
// no time argument (always zero) and no arguments (you have to
|
||||
// use a closure).
|
||||
function setZeroTimeout(fn) {
|
||||
timeouts.push(fn);
|
||||
window.postMessage(messageName, "*");
|
||||
}
|
||||
|
||||
function handleMessage(event) {
|
||||
if (event.source == window && event.data == messageName) {
|
||||
event.stopPropagation();
|
||||
if (timeouts.length > 0) {
|
||||
var fn = timeouts.shift();
|
||||
fn();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener("message", handleMessage, true);
|
||||
|
||||
// Add the one thing we want added to the window object.
|
||||
window.setZeroTimeout = setZeroTimeout;
|
||||
})();
|
||||
|
||||
var MP4Player = (function reader() {
|
||||
var defaultConfig = {
|
||||
filter: "original",
|
||||
filterHorLuma: "optimized",
|
||||
filterVerLumaEdge: "optimized",
|
||||
getBoundaryStrengthsA: "optimized"
|
||||
};
|
||||
|
||||
function constructor(stream, useWorkers, webgl, render) {
|
||||
this.stream = stream;
|
||||
this.useWorkers = useWorkers;
|
||||
this.webgl = webgl;
|
||||
this.render = render;
|
||||
|
||||
this.statistics = {
|
||||
videoStartTime: 0,
|
||||
videoPictureCounter: 0,
|
||||
windowStartTime: 0,
|
||||
windowPictureCounter: 0,
|
||||
fps: 0,
|
||||
fpsMin: 1000,
|
||||
fpsMax: -1000,
|
||||
webGLTextureUploadTime: 0
|
||||
};
|
||||
|
||||
this.onStatisticsUpdated = function () {};
|
||||
|
||||
this.avc = new Player({
|
||||
useWorker: useWorkers,
|
||||
reuseMemory: true,
|
||||
webgl: webgl,
|
||||
size: {
|
||||
width: 640,
|
||||
height: 368
|
||||
}
|
||||
});
|
||||
|
||||
this.webgl = this.avc.webgl;
|
||||
|
||||
var self = this;
|
||||
this.avc.onPictureDecoded = function(){
|
||||
updateStatistics.call(self);
|
||||
};
|
||||
|
||||
this.canvas = this.avc.canvas;
|
||||
}
|
||||
|
||||
function updateStatistics() {
|
||||
var s = this.statistics;
|
||||
s.videoPictureCounter += 1;
|
||||
s.windowPictureCounter += 1;
|
||||
var now = Date.now();
|
||||
if (!s.videoStartTime) {
|
||||
s.videoStartTime = now;
|
||||
}
|
||||
var videoElapsedTime = now - s.videoStartTime;
|
||||
s.elapsed = videoElapsedTime / 1000;
|
||||
if (videoElapsedTime < 1000) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!s.windowStartTime) {
|
||||
s.windowStartTime = now;
|
||||
return;
|
||||
} else if ((now - s.windowStartTime) > 1000) {
|
||||
var windowElapsedTime = now - s.windowStartTime;
|
||||
var fps = (s.windowPictureCounter / windowElapsedTime) * 1000;
|
||||
s.windowStartTime = now;
|
||||
s.windowPictureCounter = 0;
|
||||
|
||||
if (fps < s.fpsMin) s.fpsMin = fps;
|
||||
if (fps > s.fpsMax) s.fpsMax = fps;
|
||||
s.fps = fps;
|
||||
}
|
||||
|
||||
var fps = (s.videoPictureCounter / videoElapsedTime) * 1000;
|
||||
s.fpsSinceStart = fps;
|
||||
this.onStatisticsUpdated(this.statistics);
|
||||
return;
|
||||
}
|
||||
|
||||
constructor.prototype = {
|
||||
readAll: function(callback) {
|
||||
console.info("MP4Player::readAll()");
|
||||
this.stream.readAll(null, function (buffer) {
|
||||
this.reader = new MP4Reader(new Bytestream(buffer));
|
||||
this.reader.read();
|
||||
var video = this.reader.tracks[1];
|
||||
this.size = new Size(video.trak.tkhd.width, video.trak.tkhd.height);
|
||||
console.info("MP4Player::readAll(), length: " + this.reader.stream.length);
|
||||
if (callback) callback();
|
||||
}.bind(this));
|
||||
},
|
||||
play: function() {
|
||||
var reader = this.reader;
|
||||
|
||||
if (!reader) {
|
||||
this.readAll(this.play.bind(this));
|
||||
return;
|
||||
};
|
||||
|
||||
var video = reader.tracks[1];
|
||||
var audio = reader.tracks[2];
|
||||
|
||||
var avc = reader.tracks[1].trak.mdia.minf.stbl.stsd.avc1.avcC;
|
||||
var sps = avc.sps[0];
|
||||
var pps = avc.pps[0];
|
||||
|
||||
/* Decode Sequence & Picture Parameter Sets */
|
||||
this.avc.decode(sps);
|
||||
this.avc.decode(pps);
|
||||
|
||||
/* Decode Pictures */
|
||||
var pic = 0;
|
||||
setTimeout(function foo() {
|
||||
var avc = this.avc;
|
||||
video.getSampleNALUnits(pic).forEach(function (nal) {
|
||||
avc.decode(nal);
|
||||
});
|
||||
pic ++;
|
||||
if (pic < 3000) {
|
||||
setTimeout(foo.bind(this), 1);
|
||||
};
|
||||
}.bind(this), 1);
|
||||
}
|
||||
};
|
||||
|
||||
return constructor;
|
||||
})();
|
||||
|
||||
var Broadway = (function broadway() {
|
||||
function constructor(div) {
|
||||
var src = div.attributes.src ? div.attributes.src.value : undefined;
|
||||
var width = div.attributes.width ? div.attributes.width.value : 640;
|
||||
var height = div.attributes.height ? div.attributes.height.value : 480;
|
||||
|
||||
var controls = document.createElement('div');
|
||||
controls.setAttribute('style', "z-index: 100; position: absolute; bottom: 0px; background-color: rgba(0,0,0,0.8); height: 30px; width: 100%; text-align: left;");
|
||||
this.info = document.createElement('div');
|
||||
this.info.setAttribute('style', "font-size: 14px; font-weight: bold; padding: 6px; color: lime;");
|
||||
controls.appendChild(this.info);
|
||||
div.appendChild(controls);
|
||||
|
||||
var useWorkers = div.attributes.workers ? div.attributes.workers.value == "true" : false;
|
||||
var render = div.attributes.render ? div.attributes.render.value == "true" : false;
|
||||
|
||||
var webgl = "auto";
|
||||
if (div.attributes.webgl){
|
||||
if (div.attributes.webgl.value == "true"){
|
||||
webgl = true;
|
||||
};
|
||||
if (div.attributes.webgl.value == "false"){
|
||||
webgl = false;
|
||||
};
|
||||
};
|
||||
|
||||
var infoStrPre = "Click canvas to load and play - ";
|
||||
var infoStr = "";
|
||||
if (useWorkers){
|
||||
infoStr += "worker thread ";
|
||||
}else{
|
||||
infoStr += "main thread ";
|
||||
};
|
||||
|
||||
this.player = new MP4Player(new Stream(src), useWorkers, webgl, render);
|
||||
this.canvas = this.player.canvas;
|
||||
this.canvas.onclick = function () {
|
||||
this.play();
|
||||
}.bind(this);
|
||||
div.appendChild(this.canvas);
|
||||
|
||||
|
||||
infoStr += " - webgl: " + this.player.webgl;
|
||||
this.info.innerHTML = infoStrPre + infoStr;
|
||||
|
||||
|
||||
this.score = null;
|
||||
this.player.onStatisticsUpdated = function (statistics) {
|
||||
if (statistics.videoPictureCounter % 10 != 0) {
|
||||
return;
|
||||
}
|
||||
var info = "";
|
||||
if (statistics.fps) {
|
||||
info += " fps: " + statistics.fps.toFixed(2);
|
||||
}
|
||||
if (statistics.fpsSinceStart) {
|
||||
info += " avg: " + statistics.fpsSinceStart.toFixed(2);
|
||||
}
|
||||
var scoreCutoff = 1200;
|
||||
if (statistics.videoPictureCounter < scoreCutoff) {
|
||||
this.score = scoreCutoff - statistics.videoPictureCounter;
|
||||
} else if (statistics.videoPictureCounter == scoreCutoff) {
|
||||
this.score = statistics.fpsSinceStart.toFixed(2);
|
||||
}
|
||||
// info += " score: " + this.score;
|
||||
|
||||
this.info.innerHTML = infoStr + info;
|
||||
}.bind(this);
|
||||
}
|
||||
constructor.prototype = {
|
||||
play: function () {
|
||||
this.player.play();
|
||||
}
|
||||
};
|
||||
return constructor;
|
||||
})();
|
||||
|
||||
|
||||
return {
|
||||
Size,
|
||||
Track,
|
||||
MP4Reader,
|
||||
MP4Player,
|
||||
Bytestream,
|
||||
Broadway,
|
||||
}
|
||||
|
||||
})();
|
||||
File diff suppressed because one or more lines are too long
@ -0,0 +1,350 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
/* global
|
||||
require:true
|
||||
*/
|
||||
|
||||
const { Mutex } = require('async-mutex');
|
||||
// eslint-disable-next-line max-classes-per-file
|
||||
const { MP4Reader, Bytestream } = require('./3rdparty/mp4');
|
||||
const ZipDecoder = require('./unzip_imgs.worker');
|
||||
const H264Decoder = require('./3rdparty/Decoder.worker');
|
||||
|
||||
const BlockType = Object.freeze({
|
||||
MP4VIDEO: 'mp4video',
|
||||
ARCHIVE: 'archive',
|
||||
});
|
||||
|
||||
class FrameProvider {
|
||||
constructor(blockType, blockSize, cachedBlockCount,
|
||||
decodedBlocksCacheSize = 5, maxWorkerThreadCount = 2) {
|
||||
this._frames = {};
|
||||
this._cachedBlockCount = Math.max(1, cachedBlockCount); // number of stored blocks
|
||||
this._decodedBlocksCacheSize = decodedBlocksCacheSize;
|
||||
this._blocksRanges = [];
|
||||
this._blocks = {};
|
||||
this._blockSize = blockSize;
|
||||
this._running = false;
|
||||
this._blockType = blockType;
|
||||
this._currFrame = -1;
|
||||
this._requestedBlockDecode = null;
|
||||
this._width = null;
|
||||
this._height = null;
|
||||
this._decodingBlocks = {};
|
||||
this._decodeThreadCount = 0;
|
||||
this._timerId = setTimeout(this._worker.bind(this), 100);
|
||||
this._mutex = new Mutex();
|
||||
this._promisedFrames = {};
|
||||
this._maxWorkerThreadCount = maxWorkerThreadCount;
|
||||
}
|
||||
|
||||
async _worker() {
|
||||
if (this._requestedBlockDecode !== null
|
||||
&& this._decodeThreadCount < this._maxWorkerThreadCount) {
|
||||
await this.startDecode();
|
||||
}
|
||||
this._timerId = setTimeout(this._worker.bind(this), 100);
|
||||
}
|
||||
|
||||
isChunkCached(start, end) {
|
||||
return (`${start}:${end}` in this._blocksRanges);
|
||||
}
|
||||
|
||||
/* This method removes extra data from a cache when memory overflow */
|
||||
async _cleanup() {
|
||||
if (this._blocksRanges.length > this._cachedBlockCount) {
|
||||
const shifted = this._blocksRanges.shift(); // get the oldest block
|
||||
const [start, end] = shifted.split(':').map((el) => +el);
|
||||
delete this._blocks[start / this._blockSize];
|
||||
for (let i = start; i <= end; i++) {
|
||||
delete this._frames[i];
|
||||
}
|
||||
}
|
||||
|
||||
// delete frames whose are not in areas of current frame
|
||||
const distance = Math.floor(this._decodedBlocksCacheSize / 2);
|
||||
for (let i = 0; i < this._blocksRanges.length; i++) {
|
||||
const [start, end] = this._blocksRanges[i].split(':').map((el) => +el);
|
||||
if (end < this._currFrame - distance * this._blockSize
|
||||
|| start > this._currFrame + distance * this._blockSize) {
|
||||
for (let j = start; j <= end; j++) {
|
||||
delete this._frames[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async requestDecodeBlock(block, start, end, resolveCallback, rejectCallback) {
|
||||
const release = await this._mutex.acquire();
|
||||
try {
|
||||
if (this._requestedBlockDecode !== null) {
|
||||
if (start === this._requestedBlockDecode.start
|
||||
&& end === this._requestedBlockDecode.end) {
|
||||
this._requestedBlockDecode.resolveCallback = resolveCallback;
|
||||
this._requestedBlockDecode.rejectCallback = rejectCallback;
|
||||
} else if (this._requestedBlockDecode.rejectCallback) {
|
||||
this._requestedBlockDecode.rejectCallback();
|
||||
}
|
||||
}
|
||||
if (!(`${start}:${end}` in this._decodingBlocks)) {
|
||||
this._requestedBlockDecode = {
|
||||
block: block || this._blocks[Math.floor(start / this._blockSize)],
|
||||
start,
|
||||
end,
|
||||
resolveCallback,
|
||||
rejectCallback,
|
||||
};
|
||||
} else {
|
||||
this._decodingBlocks[`${start}:${end}`].rejectCallback = rejectCallback;
|
||||
this._decodingBlocks[`${start}:${end}`].resolveCallback = resolveCallback;
|
||||
}
|
||||
} finally {
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
isRequestExist() {
|
||||
return this._requestedBlockDecode !== null;
|
||||
}
|
||||
|
||||
setRenderSize(width, height) {
|
||||
this._width = width;
|
||||
this._height = height;
|
||||
}
|
||||
|
||||
/* Method returns frame from collection. Else method returns 0 */
|
||||
async frame(frameNumber) {
|
||||
this._currFrame = frameNumber;
|
||||
return new Promise((resolve, reject) => {
|
||||
if (frameNumber in this._frames) {
|
||||
if (this._frames[frameNumber] !== null) {
|
||||
resolve(this._frames[frameNumber]);
|
||||
} else {
|
||||
this._promisedFrames[frameNumber] = {
|
||||
resolve,
|
||||
reject,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
resolve(null);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
isNextChunkExists(frameNumber) {
|
||||
const nextChunkNum = Math.floor(frameNumber / this._blockSize) + 1;
|
||||
if (this._blocks[nextChunkNum] === 'loading') {
|
||||
return true;
|
||||
}
|
||||
|
||||
return nextChunkNum in this._blocks;
|
||||
}
|
||||
|
||||
/*
|
||||
Method start asynchronic decode a block of data
|
||||
|
||||
@param block - is a data from a server as is (ts file or archive)
|
||||
@param start {number} - is the first frame of a block
|
||||
@param end {number} - is the last frame of a block + 1
|
||||
@param callback - callback)
|
||||
|
||||
*/
|
||||
|
||||
setReadyToLoading(chunkNumber) {
|
||||
this._blocks[chunkNumber] = 'loading';
|
||||
}
|
||||
|
||||
static cropImage(imageBuffer, imageWidth, imageHeight, xOffset, yOffset, width, height) {
|
||||
if (xOffset === 0 && width === imageWidth
|
||||
&& yOffset === 0 && height === imageHeight) {
|
||||
return new ImageData(new Uint8ClampedArray(imageBuffer), width, height);
|
||||
}
|
||||
const source = new Uint32Array(imageBuffer);
|
||||
|
||||
const bufferSize = width * height * 4;
|
||||
const buffer = new ArrayBuffer(bufferSize);
|
||||
const rgbaInt32 = new Uint32Array(buffer);
|
||||
const rgbaInt8Clamped = new Uint8ClampedArray(buffer);
|
||||
|
||||
if (imageWidth === width) {
|
||||
return new ImageData(
|
||||
new Uint8ClampedArray(imageBuffer, yOffset * 4, bufferSize),
|
||||
width,
|
||||
height,
|
||||
);
|
||||
}
|
||||
|
||||
let writeIdx = 0;
|
||||
for (let row = yOffset; row < height; row++) {
|
||||
const start = row * imageWidth + xOffset;
|
||||
rgbaInt32.set(source.subarray(start, start + width), writeIdx);
|
||||
writeIdx += width;
|
||||
}
|
||||
|
||||
return new ImageData(rgbaInt8Clamped, width, height);
|
||||
}
|
||||
|
||||
async startDecode() {
|
||||
const release = await this._mutex.acquire();
|
||||
try {
|
||||
const height = this._height;
|
||||
const width = this._width;
|
||||
const { start, end, block } = this._requestedBlockDecode;
|
||||
|
||||
this._blocksRanges.push(`${start}:${end}`);
|
||||
this._decodingBlocks[`${start}:${end}`] = this._requestedBlockDecode;
|
||||
this._requestedBlockDecode = null;
|
||||
this._blocks[Math.floor((start + 1) / this._blockSize)] = block;
|
||||
for (let i = start; i <= end; i++) {
|
||||
this._frames[i] = null;
|
||||
}
|
||||
this._cleanup();
|
||||
if (this._blockType === BlockType.MP4VIDEO) {
|
||||
const worker = new H264Decoder();
|
||||
let index = start;
|
||||
|
||||
worker.onmessage = (e) => {
|
||||
if (e.data.consoleLog) { // ignore initialization message
|
||||
return;
|
||||
}
|
||||
|
||||
const scaleFactor = Math.ceil(this._height / e.data.height);
|
||||
this._frames[index] = FrameProvider.cropImage(
|
||||
e.data.buf, e.data.width, e.data.height, 0, 0,
|
||||
Math.floor(width / scaleFactor), Math.floor(height / scaleFactor),
|
||||
);
|
||||
|
||||
if (this._decodingBlocks[`${start}:${end}`].resolveCallback) {
|
||||
this._decodingBlocks[`${start}:${end}`].resolveCallback(index);
|
||||
}
|
||||
|
||||
if (index in this._promisedFrames) {
|
||||
this._promisedFrames[index].resolve(this._frames[index]);
|
||||
delete this._promisedFrames[index];
|
||||
}
|
||||
if (index === end) {
|
||||
this._decodeThreadCount--;
|
||||
delete this._decodingBlocks[`${start}:${end}`];
|
||||
worker.terminate();
|
||||
}
|
||||
index++;
|
||||
};
|
||||
|
||||
worker.onerror = (e) => {
|
||||
worker.terminate();
|
||||
this._decodeThreadCount--;
|
||||
|
||||
for (let i = index; i <= end; i++) {
|
||||
if (i in this._promisedFrames) {
|
||||
this._promisedFrames[i].reject();
|
||||
delete this._promisedFrames[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (this._decodingBlocks[`${start}:${end}`].rejectCallback) {
|
||||
this._decodingBlocks[`${start}:${end}`].rejectCallback(Error(e));
|
||||
}
|
||||
delete this._decodingBlocks[`${start}:${end}`];
|
||||
};
|
||||
|
||||
worker.postMessage({
|
||||
type: 'Broadway.js - Worker init',
|
||||
options: {
|
||||
rgb: true,
|
||||
reuseMemory: false,
|
||||
},
|
||||
});
|
||||
|
||||
const reader = new MP4Reader(new Bytestream(block));
|
||||
reader.read();
|
||||
const video = reader.tracks[1];
|
||||
|
||||
const avc = reader.tracks[1].trak.mdia.minf.stbl.stsd.avc1.avcC;
|
||||
const sps = avc.sps[0];
|
||||
const pps = avc.pps[0];
|
||||
|
||||
/* Decode Sequence & Picture Parameter Sets */
|
||||
worker.postMessage({ buf: sps, offset: 0, length: sps.length });
|
||||
worker.postMessage({ buf: pps, offset: 0, length: pps.length });
|
||||
|
||||
/* Decode Pictures */
|
||||
for (let sample = 0; sample < video.getSampleCount(); sample++) {
|
||||
video.getSampleNALUnits(sample).forEach((nal) => {
|
||||
worker.postMessage({ buf: nal, offset: 0, length: nal.length });
|
||||
});
|
||||
}
|
||||
this._decodeThreadCount++;
|
||||
} else {
|
||||
const worker = new ZipDecoder();
|
||||
let index = start;
|
||||
|
||||
worker.onerror = (e) => {
|
||||
for (let i = start; i <= end; i++) {
|
||||
if (i in this._promisedFrames) {
|
||||
this._promisedFrames[i].reject();
|
||||
delete this._promisedFrames[i];
|
||||
}
|
||||
}
|
||||
if (this._decodingBlocks[`${start}:${end}`].rejectCallback) {
|
||||
this._decodingBlocks[`${start}:${end}`].rejectCallback(Error(e));
|
||||
}
|
||||
this._decodeThreadCount--;
|
||||
worker.terminate();
|
||||
};
|
||||
|
||||
worker.onmessage = (event) => {
|
||||
this._frames[event.data.index] = event.data.data;
|
||||
|
||||
if (this._decodingBlocks[`${start}:${end}`].resolveCallback) {
|
||||
this._decodingBlocks[`${start}:${end}`].resolveCallback(event.data.index);
|
||||
}
|
||||
|
||||
if (event.data.index in this._promisedFrames) {
|
||||
this._promisedFrames[event.data.index].resolve(
|
||||
this._frames[event.data.index],
|
||||
);
|
||||
delete this._promisedFrames[event.data.index];
|
||||
}
|
||||
|
||||
if (index === end) {
|
||||
worker.terminate();
|
||||
delete this._decodingBlocks[`${start}:${end}`];
|
||||
this._decodeThreadCount--;
|
||||
}
|
||||
index++;
|
||||
};
|
||||
|
||||
worker.postMessage({ block, start, end });
|
||||
this._decodeThreadCount++;
|
||||
}
|
||||
} finally {
|
||||
release();
|
||||
}
|
||||
}
|
||||
|
||||
get decodeThreadCount() {
|
||||
return this._decodeThreadCount;
|
||||
}
|
||||
|
||||
get decodedBlocksCacheSize() {
|
||||
return this._decodedBlocksCacheSize;
|
||||
}
|
||||
|
||||
/*
|
||||
Method returns a list of cached ranges
|
||||
Is an array of strings like "start:end"
|
||||
*/
|
||||
get cachedFrames() {
|
||||
return [...this._blocksRanges].sort(
|
||||
(a, b) => a.split(':')[0] - b.split(':')[0],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
FrameProvider,
|
||||
BlockType,
|
||||
};
|
||||
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
/* global
|
||||
require:true
|
||||
*/
|
||||
|
||||
const JSZip = require('jszip');
|
||||
|
||||
onmessage = (e) => {
|
||||
const zip = new JSZip();
|
||||
if (e.data) {
|
||||
const { start, end, block } = e.data;
|
||||
|
||||
zip.loadAsync(block).then((_zip) => {
|
||||
let index = start;
|
||||
_zip.forEach((relativePath) => {
|
||||
const fileIndex = index++;
|
||||
if (fileIndex <= end) {
|
||||
_zip.file(relativePath).async('blob').then((fileData) => {
|
||||
createImageBitmap(fileData).then((img) => {
|
||||
postMessage({
|
||||
fileName: relativePath,
|
||||
index: fileIndex,
|
||||
data: img,
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
};
|
||||
@ -0,0 +1,64 @@
|
||||
/* global
|
||||
require:true,
|
||||
__dirname:true,
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const CopyPlugin = require('copy-webpack-plugin');
|
||||
|
||||
const cvatData = {
|
||||
target: 'web',
|
||||
mode: 'production',
|
||||
entry: './src/js/cvat-data.js',
|
||||
output: {
|
||||
path: path.resolve(__dirname, 'dist'),
|
||||
filename: 'cvat-data.min.js',
|
||||
library: 'cvatData',
|
||||
libraryTarget: 'window',
|
||||
},
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /.js?$/,
|
||||
exclude: /node_modules/,
|
||||
use: {
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
presets: [
|
||||
['@babel/preset-env', {
|
||||
targets: '> 2.5%', // https://github.com/browserslist/browserslist
|
||||
}],
|
||||
],
|
||||
sourceType: 'unambiguous',
|
||||
},
|
||||
},
|
||||
}, {
|
||||
test: /\.worker\.js$/,
|
||||
exclude: /3rdparty/,
|
||||
use: {
|
||||
loader: 'worker-loader',
|
||||
options: {
|
||||
publicPath: '/',
|
||||
name: '[name].js',
|
||||
},
|
||||
},
|
||||
}, {
|
||||
test: /3rdparty\/.*\.worker\.js$/,
|
||||
use: {
|
||||
loader: 'worker-loader',
|
||||
options: {
|
||||
publicPath: '/3rdparty/',
|
||||
name: '3rdparty/[name].js',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
plugins: [
|
||||
new CopyPlugin([
|
||||
'./src/js/3rdparty/avc.wasm',
|
||||
]),
|
||||
],
|
||||
};
|
||||
|
||||
module.exports = cvatData;
|
||||
@ -0,0 +1,149 @@
|
||||
# Copyright (C) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import math
|
||||
from io import BytesIO
|
||||
from enum import Enum
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from cvat.apps.engine.media_extractors import VideoReader, ZipReader
|
||||
from cvat.apps.engine.models import DataChoice
|
||||
from cvat.apps.engine.mime_types import mimetypes
|
||||
|
||||
|
||||
class FrameProvider():
|
||||
class Quality(Enum):
|
||||
COMPRESSED = 0
|
||||
ORIGINAL = 100
|
||||
|
||||
class Type(Enum):
|
||||
BUFFER = 0
|
||||
PIL = 1
|
||||
NUMPY_ARRAY = 2
|
||||
|
||||
def __init__(self, db_data):
|
||||
self._db_data = db_data
|
||||
if db_data.compressed_chunk_type == DataChoice.IMAGESET:
|
||||
self._compressed_chunk_reader_class = ZipReader
|
||||
elif db_data.compressed_chunk_type == DataChoice.VIDEO:
|
||||
self._compressed_chunk_reader_class = VideoReader
|
||||
else:
|
||||
raise Exception('Unsupported chunk type')
|
||||
|
||||
if db_data.original_chunk_type == DataChoice.IMAGESET:
|
||||
self._original_chunk_reader_class = ZipReader
|
||||
elif db_data.original_chunk_type == DataChoice.VIDEO:
|
||||
self._original_chunk_reader_class = VideoReader
|
||||
else:
|
||||
raise Exception('Unsupported chunk type')
|
||||
|
||||
self._extracted_compressed_chunk = None
|
||||
self._compressed_chunk_reader = None
|
||||
self._extracted_original_chunk = None
|
||||
self._original_chunk_reader = None
|
||||
|
||||
def __len__(self):
|
||||
return self._db_data.size
|
||||
|
||||
def _validate_frame_number(self, frame_number):
|
||||
frame_number_ = int(frame_number)
|
||||
if frame_number_ < 0 or frame_number_ >= self._db_data.size:
|
||||
raise Exception('Incorrect requested frame number: {}'.format(frame_number_))
|
||||
|
||||
chunk_number = frame_number_ // self._db_data.chunk_size
|
||||
frame_offset = frame_number_ % self._db_data.chunk_size
|
||||
|
||||
return frame_number_, chunk_number, frame_offset
|
||||
|
||||
def _validate_chunk_number(self, chunk_number):
|
||||
chunk_number_ = int(chunk_number)
|
||||
if chunk_number_ < 0 or chunk_number_ >= math.ceil(self._db_data.size / self._db_data.chunk_size):
|
||||
raise Exception('requested chunk does not exist')
|
||||
|
||||
return chunk_number_
|
||||
|
||||
@staticmethod
|
||||
def _av_frame_to_png_bytes(av_frame):
|
||||
pil_img = av_frame.to_image()
|
||||
buf = BytesIO()
|
||||
pil_img.save(buf, format='PNG')
|
||||
buf.seek(0)
|
||||
return buf
|
||||
|
||||
def _get_frame(self, frame_number, chunk_path_getter, extracted_chunk, chunk_reader, reader_class):
|
||||
_, chunk_number, frame_offset = self._validate_frame_number(frame_number)
|
||||
chunk_path = chunk_path_getter(chunk_number)
|
||||
if chunk_number != extracted_chunk:
|
||||
extracted_chunk = chunk_number
|
||||
chunk_reader = reader_class([chunk_path])
|
||||
|
||||
frame, frame_name = chunk_reader[frame_offset]
|
||||
if reader_class is VideoReader:
|
||||
return (self._av_frame_to_png_bytes(frame), 'image/png')
|
||||
|
||||
return (frame, mimetypes.guess_type(frame_name))
|
||||
|
||||
def _get_frames(self, chunk_path_getter, reader_class, out_type):
|
||||
for chunk_idx in range(math.ceil(self._db_data.size / self._db_data.chunk_size)):
|
||||
chunk_path = chunk_path_getter(chunk_idx)
|
||||
chunk_reader = reader_class([chunk_path])
|
||||
for frame, _ in chunk_reader:
|
||||
if out_type == self.Type.BUFFER:
|
||||
yield self._av_frame_to_png_bytes(frame) if reader_class is VideoReader else frame
|
||||
elif out_type == self.Type.PIL:
|
||||
yield frame.to_image() if reader_class is VideoReader else Image.open(frame)
|
||||
elif out_type == self.Type.NUMPY_ARRAY:
|
||||
if reader_class is VideoReader:
|
||||
image = np.array(frame.to_image())
|
||||
else:
|
||||
image = np.array(Image.open(frame))
|
||||
if len(image.shape) == 3 and image.shape[2] in {3, 4}:
|
||||
image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR
|
||||
yield image
|
||||
else:
|
||||
raise Exception('unsupported output type')
|
||||
|
||||
def get_preview(self):
|
||||
return self._db_data.get_preview_path()
|
||||
|
||||
def get_chunk(self, chunk_number, quality=Quality.ORIGINAL):
|
||||
chunk_number = self._validate_chunk_number(chunk_number)
|
||||
if quality == self.Quality.ORIGINAL:
|
||||
return self._db_data.get_original_chunk_path(chunk_number)
|
||||
elif quality == self.Quality.COMPRESSED:
|
||||
return self._db_data.get_compressed_chunk_path(chunk_number)
|
||||
|
||||
def get_frame(self, frame_number, quality=Quality.ORIGINAL):
|
||||
if quality == self.Quality.ORIGINAL:
|
||||
return self._get_frame(
|
||||
frame_number=frame_number,
|
||||
chunk_path_getter=self._db_data.get_original_chunk_path,
|
||||
extracted_chunk=self._extracted_original_chunk,
|
||||
chunk_reader=self._original_chunk_reader,
|
||||
reader_class=self._original_chunk_reader_class,
|
||||
)
|
||||
elif quality == self.Quality.COMPRESSED:
|
||||
return self._get_frame(
|
||||
frame_number=frame_number,
|
||||
chunk_path_getter=self._db_data.get_compressed_chunk_path,
|
||||
extracted_chunk=self._extracted_compressed_chunk,
|
||||
chunk_reader=self._compressed_chunk_reader,
|
||||
reader_class=self._compressed_chunk_reader_class,
|
||||
)
|
||||
|
||||
def get_frames(self, quality=Quality.ORIGINAL, out_type=Type.BUFFER):
|
||||
if quality == self.Quality.ORIGINAL:
|
||||
return self._get_frames(
|
||||
chunk_path_getter=self._db_data.get_original_chunk_path,
|
||||
reader_class=self._original_chunk_reader_class,
|
||||
out_type=out_type,
|
||||
)
|
||||
elif quality == self.Quality.COMPRESSED:
|
||||
return self._get_frames(
|
||||
chunk_path_getter=self._db_data.get_compressed_chunk_path,
|
||||
reader_class=self._compressed_chunk_reader_class,
|
||||
out_type=out_type,
|
||||
)
|
||||
@ -0,0 +1,461 @@
|
||||
# Generated by Django 2.2.4 on 2019-10-23 10:25
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import glob
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
import itertools
|
||||
import multiprocessing
|
||||
import time
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
|
||||
from cvat.apps.engine.media_extractors import (VideoReader, ArchiveReader, ZipReader,
|
||||
PdfReader , ImageListReader, Mpeg4ChunkWriter,
|
||||
ZipChunkWriter, ZipCompressedChunkWriter, get_mime)
|
||||
from cvat.apps.engine.models import DataChoice
|
||||
|
||||
MIGRATION_THREAD_COUNT = 2
|
||||
|
||||
def fix_path(path):
|
||||
ind = path.find('.upload')
|
||||
if ind != -1:
|
||||
path = path[ind + len('.upload') + 1:]
|
||||
return path
|
||||
|
||||
def get_frame_step(frame_filter):
|
||||
match = re.search("step\s*=\s*([1-9]\d*)", frame_filter)
|
||||
return int(match.group(1)) if match else 1
|
||||
|
||||
def get_task_on_disk():
|
||||
folders = [os.path.relpath(f, settings.DATA_ROOT)
|
||||
for f in glob.glob(os.path.join(settings.DATA_ROOT, '*'), recursive=False)]
|
||||
|
||||
return set(int(f) for f in folders if f.isdigit())
|
||||
|
||||
def get_frame_path(task_data_dir, frame):
|
||||
d1 = str(int(frame) // 10000)
|
||||
d2 = str(int(frame) // 100)
|
||||
path = os.path.join(task_data_dir, d1, d2,
|
||||
str(frame) + '.jpg')
|
||||
|
||||
return path
|
||||
|
||||
def slice_by_size(frames, size):
|
||||
it = itertools.islice(frames, 0, None)
|
||||
frames = list(itertools.islice(it, 0, size , 1))
|
||||
while frames:
|
||||
yield frames
|
||||
frames = list(itertools.islice(it, 0, size, 1))
|
||||
|
||||
def migrate_task_data(db_task_id, db_data_id, original_video, original_images, size, start_frame,
|
||||
stop_frame, frame_filter, image_quality, chunk_size, return_dict):
|
||||
try:
|
||||
db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data_id))
|
||||
compressed_cache_dir = os.path.join(db_data_dir, 'compressed')
|
||||
original_cache_dir = os.path.join(db_data_dir, 'original')
|
||||
old_db_task_dir = os.path.join(settings.DATA_ROOT, str(db_task_id))
|
||||
old_task_data_dir = os.path.join(old_db_task_dir, 'data')
|
||||
if os.path.exists(old_task_data_dir) and size != 0:
|
||||
if original_video:
|
||||
if os.path.exists(original_video):
|
||||
reader = VideoReader([original_video], get_frame_step(frame_filter), start_frame, stop_frame)
|
||||
original_chunk_writer = Mpeg4ChunkWriter(100)
|
||||
compressed_chunk_writer = ZipCompressedChunkWriter(image_quality)
|
||||
|
||||
for chunk_idx, chunk_images in enumerate(reader.slice_by_size(chunk_size)):
|
||||
original_chunk_path = os.path.join(original_cache_dir, '{}.mp4'.format(chunk_idx))
|
||||
original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path)
|
||||
|
||||
compressed_chunk_path = os.path.join(compressed_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
compressed_chunk_writer.save_as_chunk(chunk_images, compressed_chunk_path)
|
||||
|
||||
reader.save_preview(os.path.join(db_data_dir, 'preview.jpeg'))
|
||||
else:
|
||||
original_chunk_writer = ZipChunkWriter(100)
|
||||
for chunk_idx, chunk_image_ids in enumerate(slice_by_size(range(size), chunk_size)):
|
||||
chunk_images = []
|
||||
for image_id in chunk_image_ids:
|
||||
image_path = get_frame_path(old_task_data_dir, image_id)
|
||||
chunk_images.append((image_path, image_path))
|
||||
|
||||
original_chunk_path = os.path.join(original_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path)
|
||||
|
||||
compressed_chunk_path = os.path.join(compressed_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
os.symlink(original_chunk_path, compressed_chunk_path)
|
||||
shutil.copyfile(get_frame_path(old_task_data_dir, image_id), os.path.join(db_data_dir, 'preview.jpeg'))
|
||||
else:
|
||||
reader = None
|
||||
if os.path.exists(original_images[0]): # task created from images
|
||||
reader = ImageListReader(original_images)
|
||||
else: # task created from archive or pdf
|
||||
archives = []
|
||||
pdfs = []
|
||||
zips = []
|
||||
for p in glob.iglob(os.path.join(db_data_dir, 'raw', '**', '*'), recursive=True):
|
||||
mime_type = get_mime(p)
|
||||
if mime_type == 'archive':
|
||||
archives.append(p)
|
||||
elif mime_type == 'pdf':
|
||||
pdfs.append(p)
|
||||
elif mime_type == 'zip':
|
||||
zips.append(p)
|
||||
if archives:
|
||||
reader = ArchiveReader(archives, get_frame_step(frame_filter), start_frame, stop_frame)
|
||||
elif zips:
|
||||
reader = ZipReader(archives, get_frame_step(frame_filter), start_frame, stop_frame)
|
||||
elif pdfs:
|
||||
reader = PdfReader(pdfs, get_frame_step(frame_filter), start_frame, stop_frame)
|
||||
|
||||
if not reader:
|
||||
original_chunk_writer = ZipChunkWriter(100)
|
||||
for chunk_idx, chunk_image_ids in enumerate(slice_by_size(range(size), chunk_size)):
|
||||
chunk_images = []
|
||||
for image_id in chunk_image_ids:
|
||||
image_path = get_frame_path(old_task_data_dir, image_id)
|
||||
chunk_images.append((image_path, image_path))
|
||||
|
||||
original_chunk_path = os.path.join(original_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path)
|
||||
|
||||
compressed_chunk_path = os.path.join(compressed_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
os.symlink(original_chunk_path, compressed_chunk_path)
|
||||
shutil.copyfile(get_frame_path(old_task_data_dir, image_id), os.path.join(db_data_dir, 'preview.jpeg'))
|
||||
else:
|
||||
original_chunk_writer = ZipChunkWriter(100)
|
||||
compressed_chunk_writer = ZipCompressedChunkWriter(image_quality)
|
||||
|
||||
for chunk_idx, chunk_images in enumerate(reader.slice_by_size(chunk_size)):
|
||||
compressed_chunk_path = os.path.join(compressed_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
compressed_chunk_writer.save_as_chunk(chunk_images, compressed_chunk_path)
|
||||
|
||||
original_chunk_path = os.path.join(original_cache_dir, '{}.zip'.format(chunk_idx))
|
||||
original_chunk_writer.save_as_chunk(chunk_images, original_chunk_path)
|
||||
|
||||
reader.save_preview(os.path.join(db_data_dir, 'preview.jpeg'))
|
||||
shutil.rmtree(old_db_task_dir)
|
||||
return_dict[db_task_id] = (True, '')
|
||||
except Exception as e:
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return_dict[db_task_id] = (False, str(e))
|
||||
return 0
|
||||
|
||||
def migrate_task_schema(db_task, Data, log):
|
||||
log.info('Start schema migration of task ID {}.'.format(db_task.id))
|
||||
try:
|
||||
# create folders
|
||||
new_task_dir = os.path.join(settings.TASKS_ROOT, str(db_task.id))
|
||||
os.makedirs(new_task_dir, exist_ok=True)
|
||||
os.makedirs(os.path.join(new_task_dir, 'artifacts'), exist_ok=True)
|
||||
new_task_logs_dir = os.path.join(new_task_dir, 'logs')
|
||||
os.makedirs(new_task_logs_dir, exist_ok=True)
|
||||
|
||||
# create Data object
|
||||
db_data = Data.objects.create(
|
||||
size=db_task.size,
|
||||
image_quality=db_task.image_quality,
|
||||
start_frame=db_task.start_frame,
|
||||
stop_frame=db_task.stop_frame,
|
||||
frame_filter=db_task.frame_filter,
|
||||
compressed_chunk_type = DataChoice.IMAGESET,
|
||||
original_chunk_type = DataChoice.VIDEO if db_task.mode == 'interpolation' else DataChoice.IMAGESET,
|
||||
)
|
||||
db_data.save()
|
||||
|
||||
db_task.data = db_data
|
||||
|
||||
db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data.id))
|
||||
os.makedirs(db_data_dir, exist_ok=True)
|
||||
compressed_cache_dir = os.path.join(db_data_dir, 'compressed')
|
||||
os.makedirs(compressed_cache_dir, exist_ok=True)
|
||||
|
||||
original_cache_dir = os.path.join(db_data_dir, 'original')
|
||||
os.makedirs(original_cache_dir, exist_ok=True)
|
||||
|
||||
old_db_task_dir = os.path.join(settings.DATA_ROOT, str(db_task.id))
|
||||
|
||||
# move logs
|
||||
for log_file in ('task.log', 'client.log'):
|
||||
task_log_file = os.path.join(old_db_task_dir, log_file)
|
||||
if os.path.isfile(task_log_file):
|
||||
shutil.move(task_log_file, new_task_logs_dir)
|
||||
|
||||
if hasattr(db_task, 'video'):
|
||||
db_task.video.data = db_data
|
||||
db_task.video.path = fix_path(db_task.video.path)
|
||||
db_task.video.save()
|
||||
|
||||
for db_image in db_task.image_set.all():
|
||||
db_image.data = db_data
|
||||
db_image.path = fix_path(db_image.path)
|
||||
db_image.save()
|
||||
|
||||
old_raw_dir = os.path.join(old_db_task_dir, '.upload')
|
||||
new_raw_dir = os.path.join(db_data_dir, 'raw')
|
||||
|
||||
for client_file in db_task.clientfile_set.all():
|
||||
client_file.file = client_file.file.path.replace(old_raw_dir, new_raw_dir)
|
||||
client_file.save()
|
||||
|
||||
for server_file in db_task.serverfile_set.all():
|
||||
server_file.file = server_file.file.replace(old_raw_dir, new_raw_dir)
|
||||
server_file.save()
|
||||
|
||||
for remote_file in db_task.remotefile_set.all():
|
||||
remote_file.file = remote_file.file.replace(old_raw_dir, new_raw_dir)
|
||||
remote_file.save()
|
||||
|
||||
db_task.save()
|
||||
|
||||
#move old raw data
|
||||
if os.path.exists(old_raw_dir):
|
||||
shutil.move(old_raw_dir, new_raw_dir)
|
||||
|
||||
return (db_task.id, db_data.id)
|
||||
|
||||
except Exception as e:
|
||||
log.error('Cannot migrate schema for the task: {}'.format(db_task.id))
|
||||
log.error(str(e))
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
|
||||
def create_data_objects(apps, schema_editor):
|
||||
migration_name = os.path.splitext(os.path.basename(__file__))[0]
|
||||
migration_log_file = '{}.log'.format(migration_name)
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
# redirect all stdout to the file
|
||||
log_file_object = open(os.path.join(settings.MIGRATIONS_LOGS_ROOT, migration_log_file), 'w')
|
||||
sys.stdout = log_file_object
|
||||
sys.stderr = log_file_object
|
||||
|
||||
log = logging.getLogger(migration_name)
|
||||
log.addHandler(logging.StreamHandler(stdout))
|
||||
log.addHandler(logging.StreamHandler(log_file_object))
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
disk_tasks = get_task_on_disk()
|
||||
|
||||
Task = apps.get_model('engine', 'Task')
|
||||
Data = apps.get_model('engine', 'Data')
|
||||
|
||||
db_tasks = Task.objects
|
||||
task_count = db_tasks.count()
|
||||
log.info('\nStart schema migration...')
|
||||
migrated_db_tasks = []
|
||||
for counter, db_task in enumerate(db_tasks.all().iterator()):
|
||||
res = migrate_task_schema(db_task, Data, log)
|
||||
log.info('Schema migration for the task {} completed. Progress {}/{}'.format(db_task.id, counter+1, task_count))
|
||||
if res:
|
||||
migrated_db_tasks.append(res)
|
||||
|
||||
log.info('\nSchema migration is finished...')
|
||||
log.info('\nStart data migration...')
|
||||
|
||||
manager = multiprocessing.Manager()
|
||||
return_dict = manager.dict()
|
||||
|
||||
def create_process(db_task_id, db_data_id):
|
||||
db_data = Data.objects.get(pk=db_data_id)
|
||||
db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data_id))
|
||||
new_raw_dir = os.path.join(db_data_dir, 'raw')
|
||||
|
||||
original_video = None
|
||||
original_images = None
|
||||
if hasattr(db_data, 'video'):
|
||||
original_video = os.path.join(new_raw_dir, db_data.video.path)
|
||||
else:
|
||||
original_images = [os.path.realpath(os.path.join(new_raw_dir, db_image.path)) for db_image in db_data.images.all()]
|
||||
|
||||
args = (db_task_id, db_data_id, original_video, original_images, db_data.size,
|
||||
db_data.start_frame, db_data.stop_frame, db_data.frame_filter, db_data.image_quality, db_data.chunk_size, return_dict)
|
||||
|
||||
return multiprocessing.Process(target=migrate_task_data, args=args)
|
||||
|
||||
results = {}
|
||||
task_idx = 0
|
||||
while True:
|
||||
for res_idx in list(results.keys()):
|
||||
res = results[res_idx]
|
||||
if not res.is_alive():
|
||||
del results[res_idx]
|
||||
if res.exitcode == 0:
|
||||
ret_code, message = return_dict[res_idx]
|
||||
if ret_code:
|
||||
counter = (task_idx - len(results))
|
||||
progress = (100 * counter) / task_count
|
||||
log.info('Data migration for the task {} completed. Progress: {:.02f}% | {}/{}.'.format(res_idx, progress, counter, task_count))
|
||||
else:
|
||||
log.error('Cannot migrate data for the task: {}'.format(res_idx))
|
||||
log.error(str(message))
|
||||
if res_idx in disk_tasks:
|
||||
disk_tasks.remove(res_idx)
|
||||
else:
|
||||
log.error('#Cannot migrate data for the task: {}'.format(res_idx))
|
||||
|
||||
while task_idx < len(migrated_db_tasks) and len(results) < MIGRATION_THREAD_COUNT:
|
||||
log.info('Start data migration for the task {}, data ID {}'.format(migrated_db_tasks[task_idx][0], migrated_db_tasks[task_idx][1]))
|
||||
results[migrated_db_tasks[task_idx][0]] = create_process(*migrated_db_tasks[task_idx])
|
||||
results[migrated_db_tasks[task_idx][0]].start()
|
||||
task_idx += 1
|
||||
|
||||
if len(results) == 0:
|
||||
break
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
if disk_tasks:
|
||||
suspicious_tasks_dir = os.path.join(settings.DATA_ROOT, 'suspicious_tasks')
|
||||
os.makedirs(suspicious_tasks_dir, exist_ok=True)
|
||||
for tid in disk_tasks:
|
||||
suspicious_task_path = os.path.join(settings.DATA_ROOT, str(tid))
|
||||
try:
|
||||
shutil.move(suspicious_task_path, suspicious_tasks_dir)
|
||||
except Exception as e:
|
||||
log.error('Cannot move data for the suspicious task {}, \
|
||||
that is not represented in the database.'.format(suspicious_task_path))
|
||||
log.error(str(e))
|
||||
|
||||
# DL models migration
|
||||
if apps.is_installed('auto_annotation'):
|
||||
DLModel = apps.get_model('auto_annotation', 'AnnotationModel')
|
||||
|
||||
for db_model in DLModel.objects.all():
|
||||
try:
|
||||
old_location = os.path.join(settings.BASE_DIR, 'models', str(db_model.id))
|
||||
new_location = os.path.join(settings.BASE_DIR, 'data', 'models', str(db_model.id))
|
||||
|
||||
if os.path.isdir(old_location):
|
||||
shutil.move(old_location, new_location)
|
||||
|
||||
db_model.model_file.name = db_model.model_file.name.replace(old_location, new_location)
|
||||
db_model.weights_file.name = db_model.weights_file.name.replace(old_location, new_location)
|
||||
db_model.labelmap_file.name = db_model.labelmap_file.name.replace(old_location, new_location)
|
||||
db_model.interpretation_file.name = db_model.interpretation_file.name.replace(old_location, new_location)
|
||||
|
||||
db_model.save()
|
||||
except Exception as e:
|
||||
log.error('Cannot migrate data for the DL model: {}'.format(db_model.id))
|
||||
log.error(str(e))
|
||||
|
||||
log_file_object.close()
|
||||
sys.stdout = stdout
|
||||
sys.stderr = stderr
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('engine', '0023_auto_20200113_1323'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='Data',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('chunk_size', models.PositiveIntegerField(default=36)),
|
||||
('size', models.PositiveIntegerField(default=0)),
|
||||
('image_quality', models.PositiveSmallIntegerField(default=50)),
|
||||
('start_frame', models.PositiveIntegerField(default=0)),
|
||||
('stop_frame', models.PositiveIntegerField(default=0)),
|
||||
('frame_filter', models.CharField(blank=True, default='', max_length=256)),
|
||||
('compressed_chunk_type', models.CharField(choices=[('video', 'VIDEO'), ('imageset', 'IMAGESET'), ('list', 'LIST')], default=DataChoice('imageset'), max_length=32)),
|
||||
('original_chunk_type', models.CharField(choices=[('video', 'VIDEO'), ('imageset', 'IMAGESET'), ('list', 'LIST')], default=DataChoice('imageset'), max_length=32)),
|
||||
],
|
||||
options={
|
||||
'default_permissions': (),
|
||||
},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='task',
|
||||
name='data',
|
||||
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='engine.Data'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='image',
|
||||
name='data',
|
||||
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='engine.Data'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='video',
|
||||
name='data',
|
||||
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='video', to='engine.Data'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='clientfile',
|
||||
name='data',
|
||||
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_files', to='engine.Data'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='remotefile',
|
||||
name='data',
|
||||
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='remote_files', to='engine.Data'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='serverfile',
|
||||
name='data',
|
||||
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='server_files', to='engine.Data'),
|
||||
),
|
||||
migrations.RunPython(
|
||||
code=create_data_objects
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='image',
|
||||
name='task',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='remotefile',
|
||||
name='task',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='serverfile',
|
||||
name='task',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='task',
|
||||
name='frame_filter',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='task',
|
||||
name='image_quality',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='task',
|
||||
name='size',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='task',
|
||||
name='start_frame',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='task',
|
||||
name='stop_frame',
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='video',
|
||||
name='task',
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='image',
|
||||
name='path',
|
||||
field=models.CharField(default='', max_length=1024),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='video',
|
||||
name='path',
|
||||
field=models.CharField(default='', max_length=1024),
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='clientfile',
|
||||
unique_together={('data', 'file')},
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='clientfile',
|
||||
name='task',
|
||||
),
|
||||
]
|
||||
@ -0,0 +1,18 @@
|
||||
# Generated by Django 2.2.10 on 2020-03-24 12:22
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('engine', '0024_auto_20191023_1025'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='data',
|
||||
name='chunk_size',
|
||||
field=models.PositiveIntegerField(null=True),
|
||||
),
|
||||
]
|
||||
@ -0,0 +1,13 @@
|
||||
# Copyright (C) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os
|
||||
import mimetypes
|
||||
|
||||
|
||||
_SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))
|
||||
MEDIA_MIMETYPES_FILES = [
|
||||
os.path.join(_SCRIPT_DIR, "media.mimetypes"),
|
||||
]
|
||||
mimetypes.init(files=MEDIA_MIMETYPES_FILES)
|
||||
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,25 +0,0 @@
|
||||
# Copyright (C) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import os.path as osp
|
||||
|
||||
from django.test import TestCase
|
||||
from cvat.apps.engine.models import Task
|
||||
|
||||
|
||||
class TaskModelTest(TestCase):
|
||||
def test_frame_id_path_conversions(self):
|
||||
task_id = 1
|
||||
task = Task(task_id)
|
||||
|
||||
for i in [10 ** p for p in range(6)]:
|
||||
src_path_expected = osp.join(
|
||||
str(i // 10000), str(i // 100), '%s.jpg' % i)
|
||||
src_path = task.get_frame_path(i)
|
||||
|
||||
dst_frame = task.get_image_frame(src_path)
|
||||
|
||||
self.assertTrue(src_path.endswith(src_path_expected),
|
||||
'%s vs. %s' % (src_path, src_path_expected))
|
||||
self.assertEqual(i, dst_frame)
|
||||
Loading…
Reference in New Issue