Preview:
# Imports
import time
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Markdown
from openvino.runtime import Core

# Settings
# The paths of the source and converted models
model_path = Path("model/v3-small_224_1.0_float.pb")
ir_path = Path(model_path).with_suffix(".xml")

# Convert TensorFlow Model to OpenVINO IR Format
# Construct the command for Model Optimizer
mo_command = f"""mo
                 --input_model "{model_path}" 
                 --input_shape "[1,224,224,3]" 
                 --mean_values="[127.5,127.5,127.5]"
                 --scale_values="[127.5]" 
                 --data_type FP16 
                 --output_dir "{model_path.parent}"
                 """
mo_command = " ".join(mo_command.split())
print("Model Optimizer command to convert TensorFlow to OpenVINO:")
display(Markdown(f"`{mo_command}`"))

# Run Model Optimizer if the IR model file does not exist
if not ir_path.exists():
    print("Exporting TensorFlow model to IR... This may take a few minutes.")
    ! $mo_command
else:
    print(f"IR model {ir_path} already exists.")

# Load the Model
ie = Core()
model = ie.read_model(model=ir_path, weights=ir_path.with_suffix(".bin"))
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Get Model Information
input_key = compiled_model.input(0)
output_key = compiled_model.output(0)
network_input_shape = input_key.shape

# Load an Image
# The MobileNet network expects images in RGB format
image = cv2.cvtColor(cv2.imread(filename="data/coco.jpg"), code=cv2.COLOR_BGR2RGB)

# Resize image to network input image shape
resized_image = cv2.resize(src=image, dsize=(224, 224))

# Transpose image to network input shape
input_image = np.expand_dims(resized_image, 0)

plt.imshow(image);

# Do Inference
result = compiled_model([input_image])[output_key]
result_index = np.argmax(result)

# Convert the inference result to a class name.
imagenet_classes = open("utils/imagenet_2012.txt").read().splitlines()

# The model description states that for this model, class 0 is background,
# so we add background at the beginning of imagenet_classes
imagenet_classes = ['background'] + imagenet_classes

imagenet_classes[result_index]

# Timing
num_images = 1000

start = time.perf_counter()

for _ in range(num_images):
    compiled_model([input_image])

end = time.perf_counter()
time_ir = end - start

print(
    f"IR model in Inference Engine/CPU: {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)
downloadDownload PNG downloadDownload JPEG downloadDownload SVG

Tip: You can change the style, width & colours of the snippet with the inspect tool before clicking Download!

Click to optimize width for Twitter