# Imports import os import time import cv2 import matplotlib.pyplot as plt import numpy as np import paddlehub as hub from IPython.display import Markdown, display from PIL import Image from openvino.runtime import Core from paddle.static import InputSpec from scipy.special import softmax # Settings IMAGE_FILENAME = "coco_close.png" MODEL_NAME = "mobilenet_v3_large_imagenet_ssld" hub.config.server = "https://paddlepaddle.org.cn/paddlehub" # Show Inference on PaddlePaddle Model classifier = hub.Module(name=MODEL_NAME) # Load image in BGR format, as specified in model documentation image = cv2.imread(filename=IMAGE_FILENAME) plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) result = classifier.classification(images=[image], top_k=3) for class_name, softmax_probability in result[0].items(): print(f"{class_name}, {softmax_probability:.5f}") from mobilenet_v3_large_imagenet_ssld.data_feed import process_image pil_image = Image.open(IMAGE_FILENAME) processed_image = process_image(pil_image) print(f"Processed image shape: {processed_image.shape}") # Processed image is in (C,H,W) format, convert to (H,W,C) to show the image plt.imshow(np.transpose(processed_image, (1, 2, 0))) # Preparation input_shape = list(classifier.cpu_predictor.get_input_tensor_shape().values()) print("input shape:", input_shape) print("mean:", classifier.get_pretrained_images_mean()) print("std:", classifier.get_pretrained_images_std()) # Convert PaddlePaddle Model to ONNX target_height, target_width = next(iter(input_shape))[2:] x_spec = InputSpec([1, 3, target_height, target_width], "float32", "x") print( "Exporting PaddlePaddle model to ONNX with target_height " f"{target_height} and target_width {target_width}" ) classifier.export_onnx_model(".", input_spec=[x_spec], opset_version=11) # Convert ONNX model to OpenVINO IR Format model_xml = f"{MODEL_NAME}.xml" if not os.path.exists(model_xml): mo_command = f'mo --input_model {MODEL_NAME}.onnx --input_shape "[1,3,{target_height},{target_width}]"' display(Markdown(f"Model Optimizer command to convert the ONNX model to IR: `{mo_command}`")) display(Markdown("_Converting model to IR. This may take a few minutes..._")) ! $mo_command else: print(f"{model_xml} already exists.") # Show Inference on OpenVINO Model # Load Inference Engine and IR model ie = Core() model = ie.read_model(model=f"{MODEL_NAME}.xml", weights=f"{MODEL_NAME}.bin") compiled_model = ie.compile_model(model=model, device_name="CPU") # Get model output output_layer = compiled_model.output(0) # Read, show, and preprocess input image # See the "Show Inference on PaddlePaddle Model" section for source of process_image image = Image.open(IMAGE_FILENAME) plt.imshow(image) input_image = process_image(image)[None,] # Do inference ie_result = compiled_model([input_image])[output_layer][0] # Compute softmax probabilities for the inference result and find the top three values softmax_result = softmax(ie_result) top_indices = np.argsort(softmax_result)[-3:][::-1] top_softmax = softmax_result[top_indices] # Convert the inference results to class names, using the same labels as the PaddlePaddle classifier for index, softmax_probability in zip(top_indices, top_softmax): print(f"{classifier.label_list[index]}, {softmax_probability:.5f}") # Timing and Comparison num_images = 50 # PaddlePaddle's classification method expects a BGR numpy array image = cv2.imread(filename=IMAGE_FILENAME) # The process_image function expects a PIL image pil_image = Image.open(fp=IMAGE_FILENAME) # Show CPU information ie = Core() print(f"CPU: {ie.get_property(device_name='CPU', name='FULL_DEVICE_NAME')}") # Show inference speed on PaddlePaddle model start = time.perf_counter() for _ in range(num_images): result = classifier.classification(images=[image], top_k=3) end = time.perf_counter() time_ir = end - start print( f"PaddlePaddle model on CPU: {time_ir/num_images:.4f} " f"seconds per image, FPS: {num_images/time_ir:.2f}\n" ) print("PaddlePaddle result:") for class_name, softmax_probability in result[0].items(): print(f"{class_name}, {softmax_probability:.5f}") plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)); # Show inference speed on OpenVINO IR model compiled_model = ie.compile_model(model=model, device_name="CPU") output_layer = compiled_model.output(0) start = time.perf_counter() input_image = process_image(pil_image)[None,] for _ in range(num_images): ie_result = compiled_model([input_image])[output_layer][0] result_index = np.argmax(ie_result) class_name = classifier.label_list[np.argmax(ie_result)] softmax_result = softmax(ie_result) top_indices = np.argsort(softmax_result)[-3:][::-1] top_softmax = softmax_result[top_indices] end = time.perf_counter() time_ir = end - start print( f"IR model in Inference Engine (CPU): {time_ir/num_images:.4f} " f"seconds per image, FPS: {num_images/time_ir:.2f}" ) print() print("OpenVINO result:") for index, softmax_probability in zip(top_indices, top_softmax): print(f"{classifier.label_list[index]}, {softmax_probability:.5f}") plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));
Preview:
downloadDownload PNG
downloadDownload JPEG
downloadDownload SVG
Tip: You can change the style, width & colours of the snippet with the inspect tool before clicking Download!
Click to optimize width for Twitter