# Imports import os import sys import zipfile from pathlib import Path import numpy as np from monai.transforms import LoadImage from openvino.inference_engine import IECore sys.path.append("../utils") from models.custom_segmentation import SegmentationModel from notebook_utils import benchmark_model, download_file, show_live_inference # Settings # The directory that contains the IR model (xml and bin) files MODEL_PATH = "pretrained_model/quantized_unet_kits19.xml" # Uncomment the next line to use the FP16 model instead of the quantized model # MODEL_PATH = "pretrained_model/unet_kits19.xml" # Benchmark Model Performance ie = IECore() # By default, benchmark on MULTI:CPU,GPU if a GPU is available, otherwise on CPU. device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU" # Uncomment one of the options below to benchmark on other devices # device = "GPU" # device = "CPU" # device = "AUTO" # Benchmark model benchmark_model(model_path=MODEL_PATH, device=device, seconds=15) # Download and Prepare Data # Directory that contains the CT scan data. This directory should contain subdirectories # case_00XXX where XXX is between 000 and 299 BASEDIR = Path("kits19_frames_1") # The CT scan case number. For example: 16 for data from the case_00016 directory # Currently only 117 is supported CASE = 117 case_path = BASEDIR / f"case_{CASE:05d}" if not case_path.exists(): filename = download_file( f"https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_{CASE:05d}.zip" ) with zipfile.ZipFile(filename, "r") as zip_ref: zip_ref.extractall(path=BASEDIR) os.remove(filename) # remove zipfile print(f"Downloaded and extracted data for case_{CASE:05d}") else: print(f"Data for case_{CASE:05d} exists") # Load Model and List of Image Files ie = IECore() segmentation_model = SegmentationModel( ie=ie, model_path=Path(MODEL_PATH), sigmoid=True, rotate_and_flip=True ) image_paths = sorted(case_path.glob("imaging_frames/*jpg")) print(f"{case_path.name}, {len(image_paths)} images") # Show Inference # Possible options for device include "CPU", "GPU", "AUTO", "MULTI" device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU" reader = LoadImage(image_only=True, dtype=np.uint8) show_live_inference( ie=ie, image_paths=image_paths, model=segmentation_model, device=device, reader=reader )
Preview:
downloadDownload PNG
downloadDownload JPEG
downloadDownload SVG
Tip: You can change the style, width & colours of the snippet with the inspect tool before clicking Download!
Click to optimize width for Twitter