# Load Inference Engine and Show Info
from openvino.runtime import Core
ie = Core()
devices = ie.available_devices
for device in devices:
device_name = ie.get_property(device_name=device, name="FULL_DEVICE_NAME")
print(f"{device}: {device_name}")
# Loading a Model
from openvino.runtime import Core
ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
compiled_model = ie.compile_model(model=model, device_name="CPU")
from openvino.runtime import Core
ie = Core()
onnx_model_path = "model/segmentation.onnx"
model_onnx = ie.read_model(model=onnx_model_path)
compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU")
from openvino.offline_transformations import serialize
serialize(model=model_onnx, model_path="model/exported_onnx_model.xml", weights_path="model/exported_onnx_model.bin")
# Getting Information about a Model
from openvino.runtime import Core
ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
model.input(0).any_name
input_layer = model.input(0)
print(f"input precision: {input_layer.element_type}")
print(f"input shape: {input_layer.shape}")
from openvino.runtime import Core
ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
model.output(0).any_name
output_layer = model.output(0)
output_layer
print(f"output precision: {output_layer.element_type}")
print(f"output shape: {output_layer.shape}")
from openvino.runtime import Core
ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
compiled_model = ie.compile_model(model=model, device_name="CPU")
input_layer = compiled_model.input(0)
output_layer = compiled_model.output(0)
import cv2
image_filename = "data/coco_hollywood.jpg"
image = cv2.imread(image_filename)
image.shape
# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer.shape
# OpenCV resize expects the destination size as (width, height)
resized_image = cv2.resize(src=image, dsize=(W, H))
resized_image.shape
import numpy as np
input_data = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0).astype(np.float32)
input_data.shape
# Do Inference
result = compiled_model([input_data])[output_layer]
request = compiled_model.create_infer_request()
request.infer(inputs={input_layer.any_name: input_data})
result = request.get_output_tensor(output_layer.index).data
from openvino.runtime import Core, PartialShape
ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
print("~~~~ ORIGINAL MODEL ~~~~")
print(f"input shape: {segmentation_input_layer.shape}")
print(f"output shape: {segmentation_output_layer.shape}")
new_shape = PartialShape([1, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
# help(segmentation_compiled_model)
print("~~~~ RESHAPED MODEL ~~~~")
print(f"model input shape: {segmentation_input_layer.shape}")
print(
f"compiled_model input shape: "
f"{segmentation_compiled_model.input(index=0).shape}"
)
print(f"compiled_model output shape: {segmentation_output_layer.shape}")
# Change Batch Size
from openvino.runtime import Core, PartialShape
ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
new_shape = PartialShape([2, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
print(f"input shape: {segmentation_input_layer.shape}")
print(f"output shape: {segmentation_output_layer.shape}")
import numpy as np
from openvino.runtime import Core, PartialShape
ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
new_shape = PartialShape([2, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
input_data = np.random.rand(2, 3, 544, 544)
output = segmentation_compiled_model([input_data])
print(f"input data shape: {input_data.shape}")
print(f"result data data shape: {segmentation_output_layer.shape}")
# Caching a Model
import time
from pathlib import Path
from openvino.runtime import Core, PartialShape
ie = Core()
device_name = "GPU" # Model Caching is not available for CPU
if device_name in ie.available_devices and device_name != "CPU":
cache_path = Path("model/model_cache")
cache_path.mkdir(exist_ok=True)
# Enable caching for Inference Engine. To disable caching set enable_caching = False
enable_caching = True
config_dict = {"CACHE_DIR": str(cache_path)} if enable_caching else {}
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
start_time = time.perf_counter()
compiled_model = ie.compile_model(model=model, device_name=device_name, config=config_dict)
end_time = time.perf_counter()
print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
else:
print("Model caching is not available on CPU devices.")
if device_name in ie.available_devices and device_name != "CPU":
del compiled_model
start_time = time.perf_counter()
compiled_model = ie.compile_model(model=model, device_name=device_name, config=config_dict)
end_time = time.perf_counter()
print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
Comments