OpenVino 2020 (#1269)

* added support for OpenVINO 2020

* fixed dextr and tf_annotation

Co-authored-by: Andrey Zhavoronkov <andrey.zhavoronkov@intel.com>
main
Ben Hoff 6 years ago committed by GitHub
parent bfd300039e
commit 14084435bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -52,6 +52,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- React & Redux & Antd based dashboard - React & Redux & Antd based dashboard
- Yolov3 interpretation script fix and changes to mapping.json - Yolov3 interpretation script fix and changes to mapping.json
- YOLO format support ([#1151](https://github.com/opencv/cvat/pull/1151)) - YOLO format support ([#1151](https://github.com/opencv/cvat/pull/1151))
- Added support for OpenVINO 2020
### Fixed ### Fixed
- Exception in Git plugin [#826](https://github.com/opencv/cvat/issues/826) - Exception in Git plugin [#826](https://github.com/opencv/cvat/issues/826)

@ -2,7 +2,7 @@
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
from openvino.inference_engine import IENetwork, IEPlugin from openvino.inference_engine import IENetwork, IEPlugin, IECore, get_version
import subprocess import subprocess
import os import os
@ -19,7 +19,20 @@ def _check_instruction(instruction):
) )
def make_plugin(): def make_plugin_or_core():
version = get_version()
use_core_openvino = False
try:
major, minor, reference = [int(x) for x in version.split('.')]
if major >= 2 and minor >= 1 and reference >= 37988:
use_core_openvino = True
except Exception:
pass
if use_core_openvino:
ie = IECore()
return ie
if _IE_PLUGINS_PATH is None: if _IE_PLUGINS_PATH is None:
raise OSError('Inference engine plugin path env not found in the system.') raise OSError('Inference engine plugin path env not found in the system.')

@ -8,25 +8,22 @@ import cv2
import os import os
import numpy as np import numpy as np
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
class ModelLoader(): class ModelLoader():
def __init__(self, model, weights): def __init__(self, model, weights):
self._model = model self._model = model
self._weights = weights self._weights = weights
IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH") core_or_plugin = make_plugin_or_core()
if not IE_PLUGINS_PATH:
raise OSError("Inference engine plugin path env not found in the system.")
plugin = make_plugin()
network = make_network(self._model, self._weights) network = make_network(self._model, self._weights)
supported_layers = plugin.get_supported_layers(network) if getattr(core_or_plugin, 'get_supported_layers', False):
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers] supported_layers = core_or_plugin.get_supported_layers(network)
if len(not_supported_layers) != 0: not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}". if len(not_supported_layers) != 0:
format(plugin.device, ", ".join(not_supported_layers))) raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
format(core_or_plugin.device, ", ".join(not_supported_layers)))
iter_inputs = iter(network.inputs) iter_inputs = iter(network.inputs)
self._input_blob_name = next(iter_inputs) self._input_blob_name = next(iter_inputs)
@ -45,7 +42,12 @@ class ModelLoader():
if self._input_blob_name in info_names: if self._input_blob_name in info_names:
self._input_blob_name = next(iter_inputs) self._input_blob_name = next(iter_inputs)
self._net = plugin.load(network=network, num_requests=2) if getattr(core_or_plugin, 'load_network', False):
self._net = core_or_plugin.load_network(network,
"CPU",
num_requests=2)
else:
self._net = core_or_plugin.load(network=network, num_requests=2)
input_type = network.inputs[self._input_blob_name] input_type = network.inputs[self._input_blob_name]
self._input_layout = input_type if isinstance(input_type, list) else input_type.shape self._input_layout = input_type if isinstance(input_type, list) else input_type.shape

@ -3,7 +3,7 @@
# #
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
import os import os
import cv2 import cv2
@ -32,12 +32,15 @@ class DEXTR_HANDLER:
def handle(self, im_path, points): def handle(self, im_path, points):
# Lazy initialization # Lazy initialization
if not self._plugin: if not self._plugin:
self._plugin = make_plugin() self._plugin = make_plugin_or_core()
self._network = make_network(os.path.join(_DEXTR_MODEL_DIR, 'dextr.xml'), self._network = make_network(os.path.join(_DEXTR_MODEL_DIR, 'dextr.xml'),
os.path.join(_DEXTR_MODEL_DIR, 'dextr.bin')) os.path.join(_DEXTR_MODEL_DIR, 'dextr.bin'))
self._input_blob = next(iter(self._network.inputs)) self._input_blob = next(iter(self._network.inputs))
self._output_blob = next(iter(self._network.outputs)) self._output_blob = next(iter(self._network.outputs))
self._exec_network = self._plugin.load(network=self._network) if getattr(self._plugin, 'load_network', False):
self._exec_network = self._plugin.load_network(self._network, 'CPU')
else:
self._exec_network = self._plugin.load(network=self._network)
image = PIL.Image.open(im_path) image = PIL.Image.open(im_path)
numpy_image = np.array(image) numpy_image = np.array(image)

@ -30,7 +30,7 @@ def load_image_into_numpy(image):
def run_inference_engine_annotation(image_list, labels_mapping, treshold): def run_inference_engine_annotation(image_list, labels_mapping, treshold):
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network from cvat.apps.auto_annotation.inference_engine import make_plugin_or_core, make_network
def _normalize_box(box, w, h, dw, dh): def _normalize_box(box, w, h, dw, dh):
xmin = min(int(box[0] * dw * w), w) xmin = min(int(box[0] * dw * w), w)
@ -44,11 +44,14 @@ def run_inference_engine_annotation(image_list, labels_mapping, treshold):
if MODEL_PATH is None: if MODEL_PATH is None:
raise OSError('Model path env not found in the system.') raise OSError('Model path env not found in the system.')
plugin = make_plugin() core_or_plugin = make_plugin_or_core()
network = make_network('{}.xml'.format(MODEL_PATH), '{}.bin'.format(MODEL_PATH)) network = make_network('{}.xml'.format(MODEL_PATH), '{}.bin'.format(MODEL_PATH))
input_blob_name = next(iter(network.inputs)) input_blob_name = next(iter(network.inputs))
output_blob_name = next(iter(network.outputs)) output_blob_name = next(iter(network.outputs))
executable_network = plugin.load(network=network) if getattr(core_or_plugin, 'load_network', False):
executable_network = core_or_plugin.load_network(network, 'CPU')
else:
executable_network = core_or_plugin.load(network=network)
job = rq.get_current_job() job = rq.get_current_job()
del network del network

Loading…
Cancel
Save