[Datumaro] Update model interface and OpenVino launcher (#1626)

* Refactor inference wrapper
main
zhiltsov-max 6 years ago committed by GitHub
parent c792c8cd60
commit 39d3c93cfd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -7,45 +7,37 @@ import argparse
import logging as log
import os
import os.path as osp
import shutil
import re
from datumaro.components.config import DEFAULT_FORMAT
from ...util import add_subparser
from datumaro.components.project import Environment
from ...util import add_subparser, MultilineFormatter
from ...util.project import load_project
def build_openvino_add_parser(parser=argparse.ArgumentParser()):
parser.add_argument('-d', '--description', required=True,
help="Path to the model description file (.xml)")
parser.add_argument('-w', '--weights', required=True,
help="Path to the model weights file (.bin)")
parser.add_argument('-i', '--interpretation-script', required=True,
help="Path to the network output interpretation script (.py)")
parser.add_argument('--plugins-path', default=None,
help="Path to the custom Inference Engine plugins directory")
parser.add_argument('--copy', action='store_true',
help="Copy the model data to the project")
return parser
def openvino_args_extractor(args):
my_args = argparse.Namespace()
my_args.description = args.description
my_args.weights = args.weights
my_args.interpretation_script = args.interpretation_script
my_args.plugins_path = args.plugins_path
return my_args
def build_add_parser(parser_ctor=argparse.ArgumentParser):
parser = parser_ctor()
parser.add_argument('name',
help="Name of the model to be added")
launchers_sp = parser.add_subparsers(dest='launcher')
build_openvino_add_parser(launchers_sp.add_parser('openvino')) \
.set_defaults(launcher_args_extractor=openvino_args_extractor)
builtins = sorted(Environment().launchers.items)
parser = parser_ctor(help="Add model to project",
description="""
Registers an executable model into a project. A model requires
a launcher to be executed. Each launcher has its own options, which
are passed after '--' separator, pass '-- -h' for more info.
|n
List of builtin launchers: %s
""" % ', '.join(builtins),
formatter_class=MultilineFormatter)
parser.add_argument('-l', '--launcher', required=True,
help="Model launcher")
parser.add_argument('extra_args', nargs=argparse.REMAINDER, default=None,
help="Additional arguments for converter (pass '-- -h' for help)")
parser.add_argument('--copy', action='store_true',
help="Copy the model to the project")
parser.add_argument('-n', '--name', default=None,
help="Name of the model to be added (default: generate automatically)")
parser.add_argument('--overwrite', action='store_true',
help="Overwrite if exists")
parser.add_argument('-p', '--project', dest='project_dir', default='.',
help="Directory of the project to operate on (default: current dir)")
parser.set_defaults(command=add_command)
@ -55,44 +47,50 @@ def build_add_parser(parser_ctor=argparse.ArgumentParser):
def add_command(args):
project = load_project(args.project_dir)
log.info("Adding '%s' model to '%s' project" % \
(args.launcher, project.config.project_name))
options = args.launcher_args_extractor(args)
if args.launcher == 'openvino' and args.copy:
config = project.config
env_config = project.env.config
model_dir_rel = osp.join(
config.env_dir, env_config.models_dir, args.name)
model_dir = osp.join(
config.project_dir, model_dir_rel)
os.makedirs(model_dir, exist_ok=True)
shutil.copy(options.description,
osp.join(model_dir, osp.basename(options.description)))
options.description = \
osp.join(model_dir_rel, osp.basename(options.description))
shutil.copy(options.weights,
osp.join(model_dir, osp.basename(options.weights)))
options.weights = \
osp.join(model_dir_rel, osp.basename(options.weights))
shutil.copy(options.interpretation_script,
osp.join(model_dir, osp.basename(options.interpretation_script)))
options.interpretation_script = \
osp.join(model_dir_rel, osp.basename(options.interpretation_script))
if args.name:
if not args.overwrite and args.name in project.config.models:
raise CliException("Model '%s' already exists "
"(pass --overwrite to overwrite)" % args.name)
else:
existing_ids = [int(n.split('-')[1]) for n in project.config.models
if re.match(r'model-\d+', n)]
max_idx = max(existing_ids, default=len(project.config.models))
args.name = 'model-%d' % (max_idx + 1)
assert args.name not in project.config.models, args.name
try:
launcher = project.env.launchers.get(args.launcher)
except KeyError:
raise CliException("Launcher '%s' is not found" % args.launcher)
cli_plugin = launcher.cli_plugin
model_args = cli_plugin.from_cmdline(args.extra_args)
if args.copy:
try:
log.info("Copying model data")
model_dir = project.local_model_dir(args.name)
os.makedirs(model_dir, exist_ok=False)
cli_plugin.copy_model(model_dir, model_args)
except NotImplementedError:
log.error("Can't copy: copying is not available for '%s' models" % \
(args.launcher))
log.info("Adding the model")
project.add_model(args.name, {
'launcher': args.launcher,
'options': vars(options),
'options': model_args,
})
log.info("Checking the model")
project.make_executable_model(args.name)
project.save()
log.info("Model '%s' with launcher '%s' has been added to project '%s'" % \
(args.name, args.launcher, project.config.project_name))
return 0
def build_remove_parser(parser_ctor=argparse.ArgumentParser):
@ -134,7 +132,7 @@ def run_command(args):
os.makedirs(dst_dir, exist_ok=False)
project.make_dataset().apply_model(
save_dir=dst_dir,
model_name=args.model_name)
model=args.model_name)
log.info("Inference results have been saved to '%s'" % dst_dir)

@ -6,6 +6,7 @@
import numpy as np
from datumaro.components.extractor import Transform
from datumaro.util import take_by
# pylint: disable=no-self-use
@ -19,42 +20,30 @@ class Launcher:
def preferred_input_size(self):
return None
def get_categories(self):
def categories(self):
return None
# pylint: enable=no-self-use
class InferenceWrapper(Transform):
class ModelTransform(Transform):
def __init__(self, extractor, launcher, batch_size=1):
super().__init__(extractor)
self._launcher = launcher
self._batch_size = batch_size
def __iter__(self):
stop = False
data_iter = iter(self._extractor)
while not stop:
batch_items = []
try:
for _ in range(self._batch_size):
item = next(data_iter)
batch_items.append(item)
except StopIteration:
stop = True
if len(batch_items) == 0:
break
inputs = np.array([item.image.data for item in batch_items])
for batch in take_by(self._extractor, self._batch_size):
inputs = np.array([item.image.data for item in batch])
inference = self._launcher.launch(inputs)
for item, annotations in zip(batch_items, inference):
for item, annotations in zip(batch, inference):
yield self.wrap_item(item, annotations=annotations)
def get_subset(self, name):
subset = self._extractor.get_subset(name)
return InferenceWrapper(subset, self._launcher, self._batch_size)
return __class__(subset, self._launcher, self._batch_size)
def categories(self):
launcher_override = self._launcher.get_categories()
launcher_override = self._launcher.categories()
if launcher_override is not None:
return launcher_override
return self._extractor.categories()

@ -19,7 +19,7 @@ from datumaro.components.config import Config, DEFAULT_FORMAT
from datumaro.components.config_model import (Model, Source,
PROJECT_DEFAULT_CONFIG, PROJECT_SCHEMA)
from datumaro.components.extractor import Extractor
from datumaro.components.launcher import InferenceWrapper
from datumaro.components.launcher import ModelTransform
from datumaro.components.dataset_filter import \
XPathDatasetFilter, XPathAnnotationsFilter
@ -683,7 +683,7 @@ class ProjectDataset(Dataset):
if isinstance(model, str):
launcher = self._project.make_executable_model(model)
self.transform_project(InferenceWrapper, launcher=launcher,
self.transform_project(ModelTransform, launcher=launcher,
save_dir=save_dir, batch_size=batch_size)
def export_project(self, save_dir, converter,

@ -6,15 +6,44 @@
# pylint: disable=exec-used
import cv2
import logging as log
import numpy as np
import os
import os.path as osp
import platform
import shutil
from openvino.inference_engine import IENetwork, IEPlugin
from openvino.inference_engine import IECore
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.launcher import Launcher
from datumaro.util.os_util import check_instruction_set
class OpenVinoImporter(CliPlugin):
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument('-d', '--description', required=True,
help="Path to the model description file (.xml)")
parser.add_argument('-w', '--weights', required=True,
help="Path to the model weights file (.bin)")
parser.add_argument('-i', '--interpreter', required=True,
help="Path to the network output interprter script (.py)")
parser.add_argument('--device', default='CPU',
help="Target device (default: %(default)s)")
return parser
@staticmethod
def copy_model(model_dir, model):
shutil.copy(model['description'],
osp.join(model_dir, osp.basename(model['description'])))
model['description'] = osp.basename(model['description'])
shutil.copy(model['weights'],
osp.join(model_dir, osp.basename(model['weights'])))
model['weights'] = osp.basename(model['weights'])
shutil.copy(model['interpreter'],
osp.join(model_dir, osp.basename(model['interpreter'])))
model['interpreter'] = osp.basename(model['interpreter'])
class InterpreterScript:
@ -25,13 +54,16 @@ class InterpreterScript:
context = {}
exec(script, context, context)
process_outputs = context['process_outputs']
assert callable(process_outputs)
process_outputs = context.get('process_outputs')
if not callable(process_outputs):
raise Exception("Can't find 'process_outputs' function in "
"the interpreter script")
self.__dict__['process_outputs'] = process_outputs
get_categories = context.get('get_categories')
assert callable(get_categories) or get_categories is None
self.__dict__['get_categories'] = get_categories
assert get_categories is None or callable(get_categories)
if get_categories:
self.__dict__['get_categories'] = get_categories
@staticmethod
def get_categories():
@ -39,41 +71,16 @@ class InterpreterScript:
@staticmethod
def process_outputs(inputs, outputs):
return []
raise NotImplementedError(
"Function should be implemented in the interpreter script")
class OpenVinoLauncher(Launcher):
_DEFAULT_IE_PLUGINS_PATH = "/opt/intel/openvino_2019.1.144/deployment_tools/inference_engine/lib/intel64"
_IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH", _DEFAULT_IE_PLUGINS_PATH)
@staticmethod
def make_plugin(device='cpu', plugins_path=_IE_PLUGINS_PATH):
if plugins_path is None or not osp.isdir(plugins_path):
raise Exception('Inference engine plugins directory "%s" not found' % \
(plugins_path))
plugin = IEPlugin(device='CPU', plugin_dirs=[plugins_path])
if (check_instruction_set('avx2')):
plugin.add_cpu_extension(os.path.join(plugins_path,
'libcpu_extension_avx2.so'))
elif (check_instruction_set('sse4')):
plugin.add_cpu_extension(os.path.join(plugins_path,
'libcpu_extension_sse4.so'))
elif platform.system() == 'Darwin':
plugin.add_cpu_extension(os.path.join(plugins_path,
'libcpu_extension.dylib'))
else:
raise Exception('Inference engine requires support of avx2 or sse4')
return plugin
@staticmethod
def make_network(model, weights):
return IENetwork.from_ir(model=model, weights=weights)
class OpenVinoLauncher(Launcher):
cli_plugin = OpenVinoImporter
def __init__(self, description, weights, interpretation_script,
plugins_path=None, model_dir=None, **kwargs):
if model_dir is None:
model_dir = ''
def __init__(self, description, weights, interpreter,
plugins_path=None, device=None, model_dir=None):
model_dir = model_dir or ''
if not osp.isfile(description):
description = osp.join(model_dir, description)
if not osp.isfile(description):
@ -86,34 +93,37 @@ class OpenVinoLauncher(Launcher):
raise Exception('Failed to open model weights file "%s"' % \
(weights))
if not osp.isfile(interpretation_script):
interpretation_script = \
osp.join(model_dir, interpretation_script)
if not osp.isfile(interpretation_script):
raise Exception('Failed to open model interpretation script file "%s"' % \
(interpretation_script))
if not osp.isfile(interpreter):
interpreter = osp.join(model_dir, interpreter)
if not osp.isfile(interpreter):
raise Exception('Failed to open model interpreter script file "%s"' % \
(interpreter))
self._interpreter_script = InterpreterScript(interpretation_script)
self._interpreter = InterpreterScript(interpreter)
if plugins_path is None:
plugins_path = OpenVinoLauncher._IE_PLUGINS_PATH
self._device = device or 'CPU'
plugin = OpenVinoLauncher.make_plugin(plugins_path=plugins_path)
network = OpenVinoLauncher.make_network(description, weights)
self._network = network
self._plugin = plugin
self._ie = IECore()
if hasattr(self._ie, 'read_network'):
self._network = self._ie.read_network(description, weights)
else: # backward compatibility
from openvino.inference_engine import IENetwork
self._network = IENetwork.from_ir(description, weights)
self._check_model_support(self._network, self._device)
self._load_executable_net()
def _check_model_support(self, net, device):
supported_layers = set(self._ie.query_network(net, device))
not_supported_layers = set(net.layers) - supported_layers
if len(not_supported_layers) != 0:
log.error("The following layers are not supported " \
"by the plugin for device '%s': %s." % \
(device, ', '.join(not_supported_layers)))
raise NotImplementedError(
"Some layers are not supported on the device")
def _load_executable_net(self, batch_size=1):
network = self._network
plugin = self._plugin
supported_layers = plugin.get_supported_layers(network)
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
raise Exception('Following layers are not supported by the plugin'
' for the specified device {}:\n {}'. format( \
plugin.device, ", ".join(not_supported_layers)))
iter_inputs = iter(network.inputs)
self._input_blob_name = next(iter_inputs)
@ -131,14 +141,14 @@ class OpenVinoLauncher(Launcher):
network.reshape({self._input_blob_name: self._input_layout})
self._batch_size = batch_size
self._net = plugin.load(network=network, num_requests=1)
self._net = self._ie.load_network(network=network, num_requests=1,
device_name=self._device)
def infer(self, inputs):
assert len(inputs.shape) == 4, \
"Expected an input image in (N, H, W, C) format, got %s" % \
(inputs.shape)
assert inputs.shape[3] == 3, \
"Expected BGR input"
(inputs.shape)
assert inputs.shape[3] == 3, "Expected BGR input, got %s" % inputs.shape
n, c, h, w = self._input_layout
if inputs.shape[1:3] != (h, w):
@ -170,11 +180,11 @@ class OpenVinoLauncher(Launcher):
results = self.process_outputs(inputs, outputs)
return results
def get_categories(self):
return self._interpreter_script.get_categories()
def categories(self):
return self._interpreter.get_categories()
def process_outputs(self, inputs, outputs):
return self._interpreter_script.process_outputs(inputs, outputs)
return self._interpreter.process_outputs(inputs, outputs)
def preferred_input_size(self):
_, _, h, w = self._input_layout

@ -5,6 +5,7 @@
import os
import os.path as osp
from itertools import islice
def find(iterable, pred=lambda x: True, default=None):
@ -59,4 +60,18 @@ def to_snake_case(s):
name.append(char.lower())
else:
name.append(char)
return ''.join(name)
return ''.join(name)
def take_by(iterable, count):
"""
Returns elements from the input iterable by batches of N items.
('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']
"""
it = iter(iterable)
while True:
batch = list(islice(it, count))
if len(batch) == 0:
break
yield batch

@ -6,7 +6,7 @@ from unittest import TestCase
from datumaro.components.project import Project, Environment, Dataset
from datumaro.components.config_model import Source, Model
from datumaro.components.launcher import Launcher, InferenceWrapper
from datumaro.components.launcher import Launcher, ModelTransform
from datumaro.components.converter import Converter
from datumaro.components.extractor import (Extractor, DatasetItem,
Label, Mask, Points, Polygon, PolyLine, Bbox, Caption,
@ -153,7 +153,7 @@ class ProjectTest(TestCase):
extractor = TestExtractor()
batch_size = 3
executor = InferenceWrapper(extractor, model, batch_size=batch_size)
executor = ModelTransform(extractor, model, batch_size=batch_size)
for item in executor:
self.assertEqual(1, len(item.annotations))

Loading…
Cancel
Save