# Preparation
model_name = "mobilenet-v2-pytorch"
# Imports
import json
import sys
from pathlib import Path
from IPython.display import Markdown, display
from openvino.runtime import Core
sys.path.append("../utils")
from notebook_utils import DeviceNotFoundAlert, NotebookAlert
# Settings and Configuration
base_model_dir = Path("~/open_model_zoo_models").expanduser()
omz_cache_dir = Path("~/open_model_zoo_cache").expanduser()
precision = "FP16"
# Check if an iGPU is available on this system to use with Benchmark App
ie = Core()
gpu_available = "GPU" in ie.available_devices
print(
f"base_model_dir: {base_model_dir}, omz_cache_dir: {omz_cache_dir}, gpu_availble: {gpu_available}"
)
# Download Model from Open Model Zoo
download_command = (
f"omz_downloader --name {model_name} --output_dir {base_model_dir} --cache_dir {omz_cache_dir}"
)
display(Markdown(f"Download command: `{download_command}`"))
display(Markdown(f"Downloading {model_name}..."))
! $download_command
# Convert Model to OpenVINO IR format
convert_command = f"omz_converter --name {model_name} --precisions {precision} --download_dir {base_model_dir} --output_dir {base_model_dir}"
display(Markdown(f"Convert command: `{convert_command}`"))
display(Markdown(f"Converting {model_name}..."))
! $convert_command
# Get Model Information
model_info_output = %sx omz_info_dumper --name $model_name
model_info = json.loads(model_info_output.get_nlstr())
if len(model_info) > 1:
NotebookAlert(
f"There are multiple IR files for the {model_name} model. The first model in the "
"omz_info_dumper output will be used for benchmarking. Change "
"`selected_model_info` in the cell below to select a different model from the list.",
"warning",
)
model_info
selected_model_info = model_info[0]
model_path = (
base_model_dir
/ Path(selected_model_info["subdirectory"])
/ Path(f"{precision}/{selected_model_info['name']}.xml")
)
print(model_path, "exists:", model_path.exists())
benchmark_command = f"benchmark_app -m {model_path} -t 15"
display(Markdown(f"Benchmark command: `{benchmark_command}`"))
display(Markdown(f"Benchmarking {model_name} on CPU with async inference for 15 seconds..."))
! $benchmark_command
# Benchmark with Different Settings
def benchmark_model(model_xml, device="CPU", seconds=60, api="async", batch=1):
ie = Core()
model_path = Path(model_xml)
if ("GPU" in device) and ("GPU" not in ie.available_devices):
DeviceNotFoundAlert("GPU")
else:
benchmark_command = f"benchmark_app -m {model_path} -d {device} -t {seconds} -api {api} -b {batch}"
display(Markdown(f"**Benchmark {model_path.name} with {device} for {seconds} seconds with {api} inference**"))
display(Markdown(f"Benchmark command: `{benchmark_command}`"))
benchmark_output = %sx $benchmark_command
print("command ended")
benchmark_result = [line for line in benchmark_output
if not (line.startswith(r"[") or line.startswith(" ") or line == "")]
print("\n".join(benchmark_result))
ie = Core()
# Show devices available for OpenVINO Inference Engine
for device in ie.available_devices:
device_name = ie.get_property(device, "FULL_DEVICE_NAME")
print(f"{device}: {device_name}")
benchmark_model(model_path, device="CPU", seconds=15, api="async")
benchmark_model(model_path, device="AUTO", seconds=15, api="async")
benchmark_model(model_path, device="GPU", seconds=15, api="async")
benchmark_model(model_path, device="MULTI:CPU,GPU", seconds=15, api="async")