OpenVINO™ notebooks

# Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
from openvino.runtime import Core

# Load the model
ie = Core()
model = ie.read_model(model="model/v3-small_224_1.0_float.xml")
compiled_model = ie.compile_model(model=model, device_name="CPU")
output_layer = compiled_model.output(0)

# Load an Image
# The MobileNet model expects images in RGB format
image = cv2.cvtColor(cv2.imread(filename="data/coco.jpg"), code=cv2.COLOR_BGR2RGB)

# resize to MobileNet image shape
input_image = cv2.resize(src=image, dsize=(224, 224))

# reshape to model input shape
input_image = np.expand_dims(input_image.transpose(2, 0, 1), 0)
plt.imshow(image);

# Do Inference
result_infer = compiled_model([input_image])[output_layer]
result_index = np.argmax(result_infer)

# Convert the inference result to a class name.
imagenet_classes = open("utils/imagenet_2012.txt").read().splitlines()

# The model description states that for this model, class 0 is background,
# so we add background at the beginning of imagenet_classes
imagenet_classes = ['background'] + imagenet_classes

imagenet_classes[result_index]
# Load Inference Engine and Show Info
from openvino.runtime import Core
ie = Core()

devices = ie.available_devices

for device in devices:
    device_name = ie.get_property(device_name=device, name="FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")

# Loading a Model
from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"

model = ie.read_model(model=classification_model_xml)
compiled_model = ie.compile_model(model=model, device_name="CPU")

from openvino.runtime import Core

ie = Core()
onnx_model_path = "model/segmentation.onnx"
model_onnx = ie.read_model(model=onnx_model_path)
compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU")

from openvino.offline_transformations import serialize

serialize(model=model_onnx, model_path="model/exported_onnx_model.xml", weights_path="model/exported_onnx_model.bin")

# Getting Information about a Model
from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
model.input(0).any_name

input_layer = model.input(0)

print(f"input precision: {input_layer.element_type}")
print(f"input shape: {input_layer.shape}")

from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
model.output(0).any_name

output_layer = model.output(0)
output_layer

print(f"output precision: {output_layer.element_type}")
print(f"output shape: {output_layer.shape}")

from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
compiled_model = ie.compile_model(model=model, device_name="CPU")
input_layer = compiled_model.input(0)
output_layer = compiled_model.output(0)

import cv2

image_filename = "data/coco_hollywood.jpg"
image = cv2.imread(image_filename)
image.shape

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer.shape
# OpenCV resize expects the destination size as (width, height)
resized_image = cv2.resize(src=image, dsize=(W, H))
resized_image.shape

import numpy as np

input_data = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0).astype(np.float32)
input_data.shape

# Do Inference
result = compiled_model([input_data])[output_layer]

request = compiled_model.create_infer_request()
request.infer(inputs={input_layer.any_name: input_data})
result = request.get_output_tensor(output_layer.index).data

from openvino.runtime import Core, PartialShape

ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)

print("~~~~ ORIGINAL MODEL ~~~~")
print(f"input shape: {segmentation_input_layer.shape}")
print(f"output shape: {segmentation_output_layer.shape}")

new_shape = PartialShape([1, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
# help(segmentation_compiled_model)
print("~~~~ RESHAPED MODEL ~~~~")
print(f"model input shape: {segmentation_input_layer.shape}")
print(
    f"compiled_model input shape: "
    f"{segmentation_compiled_model.input(index=0).shape}"
)
print(f"compiled_model output shape: {segmentation_output_layer.shape}")

# Change Batch Size
from openvino.runtime import Core, PartialShape

ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
new_shape = PartialShape([2, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")

print(f"input shape: {segmentation_input_layer.shape}")
print(f"output shape: {segmentation_output_layer.shape}")

import numpy as np
from openvino.runtime import Core, PartialShape

ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
new_shape = PartialShape([2, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
input_data = np.random.rand(2, 3, 544, 544)

output = segmentation_compiled_model([input_data])

print(f"input data shape: {input_data.shape}")
print(f"result data data shape: {segmentation_output_layer.shape}")

# Caching a Model
import time
from pathlib import Path

from openvino.runtime import Core, PartialShape

ie = Core()

device_name = "GPU"  # Model Caching is not available for CPU

if device_name in ie.available_devices and device_name != "CPU":
    cache_path = Path("model/model_cache")
    cache_path.mkdir(exist_ok=True)
    # Enable caching for Inference Engine. To disable caching set enable_caching = False
    enable_caching = True
    config_dict = {"CACHE_DIR": str(cache_path)} if enable_caching else {}

    classification_model_xml = "model/classification.xml"
    model = ie.read_model(model=classification_model_xml)

    start_time = time.perf_counter()
    compiled_model = ie.compile_model(model=model, device_name=device_name, config=config_dict)
    end_time = time.perf_counter()
    print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
else:
    print("Model caching is not available on CPU devices.")

if device_name in ie.available_devices and device_name != "CPU":
    del compiled_model
    start_time = time.perf_counter()
    compiled_model = ie.compile_model(model=model, device_name=device_name, config=config_dict)
    end_time = time.perf_counter()
    print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
# Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
import sys
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import segmentation_map_to_image

# Load the Model
ie = Core()

model = ie.read_model(model="model/road-segmentation-adas-0001.xml")
compiled_model = ie.compile_model(model=model, device_name="CPU")

input_layer_ir = compiled_model.input(0)
output_layer_ir = compiled_model.output(0)

# Load an Image
# The segmentation network expects images in BGR format
image = cv2.imread("data/empty_road_mapillary.jpg")

rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_h, image_w, _ = image.shape

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer_ir.shape

# OpenCV resize expects the destination size as (width, height)
resized_image = cv2.resize(image, (W, H))

# reshape to network input shape
input_image = np.expand_dims(
    resized_image.transpose(2, 0, 1), 0
)  
plt.imshow(rgb_image)

# Run the inference
result = compiled_model([input_image])[output_layer_ir]

# Prepare data for visualization
segmentation_mask = np.argmax(result, axis=1)
plt.imshow(segmentation_mask.transpose(1, 2, 0))

# Prepare Data for Visualization
# Define colormap, each color represents a class
colormap = np.array([[68, 1, 84], [48, 103, 141], [53, 183, 120], [199, 216, 52]])

# Define the transparency of the segmentation mask on the photo
alpha = 0.3

# Use function from notebook_utils.py to transform mask to an RGB image
mask = segmentation_map_to_image(segmentation_mask, colormap)
resized_mask = cv2.resize(mask, (image_w, image_h))

# Create image with mask put on
image_with_mask = cv2.addWeighted(resized_mask, alpha, rgb_image, 1 - alpha, 0)

# Visualize data
# Define titles with images
data = {"Base Photo": rgb_image, "Segmentation": mask, "Masked Photo": image_with_mask}

# Create subplot to visualize images
fig, axs = plt.subplots(1, len(data.items()), figsize=(15, 10))

# Fill subplot
for ax, (name, image) in zip(axs, data.items()):
    ax.axis('off')
    ax.set_title(name)
    ax.imshow(image)

# Display image
plt.show(fig)
# Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
from openvino.runtime import Core

# Load the Model
ie = Core()

model = ie.read_model(model="model/horizontal-text-detection-0001.xml")
compiled_model = ie.compile_model(model=model, device_name="CPU")

input_layer_ir = compiled_model.input(0)
output_layer_ir = compiled_model.output("boxes")

# Load an Image
# Text detection models expects image in BGR format
image = cv2.imread("data/intel_rnb.jpg")

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer_ir.shape

# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))

# Reshape to network input shape
input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0)

plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));

# Do Inference
# Create inference request
boxes = compiled_model([input_image])[output_layer_ir]

# Remove zero only boxes
boxes = boxes[~np.all(boxes == 0, axis=1)]

# Visualize Results
# For each detection, the description has the format: [x_min, y_min, x_max, y_max, conf]
# Image passed here is in BGR format with changed width and height. To display it in colors expected by matplotlib we use cvtColor function
def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True):
    # Define colors for boxes and descriptions
    colors = {"red": (255, 0, 0), "green": (0, 255, 0)}

    # Fetch image shapes to calculate ratio
    (real_y, real_x), (resized_y, resized_x) = bgr_image.shape[:2], resized_image.shape[:2]
    ratio_x, ratio_y = real_x / resized_x, real_y / resized_y

    # Convert base image from bgr to rgb format
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    # Iterate through non-zero boxes
    for box in boxes:
        # Pick confidence factor from last place in array
        conf = box[-1]
        if conf > threshold:
            # Convert float to int and multiply corner position of each box by x and y ratio
            # In case that bounding box is found at the top of the image, 
            # we position upper box bar little lower to make it visible on image 
            (x_min, y_min, x_max, y_max) = [
                int(max(corner_position * ratio_y, 10)) if idx % 2 
                else int(corner_position * ratio_x)
                for idx, corner_position in enumerate(box[:-1])
            ]

            # Draw box based on position, parameters in rectangle function are: image, start_point, end_point, color, thickness
            rgb_image = cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3)

            # Add text to image based on position and confidence
            # Parameters in text function are: image, text, bottom-left_corner_textfield, font, font_scale, color, thickness, line_type
            if conf_labels:
                rgb_image = cv2.putText(
                    rgb_image,
                    f"{conf:.2f}",
                    (x_min, y_min - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.8,
                    colors["red"],
                    1,
                    cv2.LINE_AA,
                )

    return rgb_image

plt.figure(figsize=(10, 6))
plt.axis("off")
plt.imshow(convert_result_to_image(image, resized_image, boxes, conf_labels=False));
# Imports
import time
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Markdown
from openvino.runtime import Core

# Settings
# The paths of the source and converted models
model_path = Path("model/v3-small_224_1.0_float.pb")
ir_path = Path(model_path).with_suffix(".xml")

# Convert TensorFlow Model to OpenVINO IR Format
# Construct the command for Model Optimizer
mo_command = f"""mo
                 --input_model "{model_path}" 
                 --input_shape "[1,224,224,3]" 
                 --mean_values="[127.5,127.5,127.5]"
                 --scale_values="[127.5]" 
                 --data_type FP16 
                 --output_dir "{model_path.parent}"
                 """
mo_command = " ".join(mo_command.split())
print("Model Optimizer command to convert TensorFlow to OpenVINO:")
display(Markdown(f"`{mo_command}`"))

# Run Model Optimizer if the IR model file does not exist
if not ir_path.exists():
    print("Exporting TensorFlow model to IR... This may take a few minutes.")
    ! $mo_command
else:
    print(f"IR model {ir_path} already exists.")

# Load the Model
ie = Core()
model = ie.read_model(model=ir_path, weights=ir_path.with_suffix(".bin"))
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Get Model Information
input_key = compiled_model.input(0)
output_key = compiled_model.output(0)
network_input_shape = input_key.shape

# Load an Image
# The MobileNet network expects images in RGB format
image = cv2.cvtColor(cv2.imread(filename="data/coco.jpg"), code=cv2.COLOR_BGR2RGB)

# Resize image to network input image shape
resized_image = cv2.resize(src=image, dsize=(224, 224))

# Transpose image to network input shape
input_image = np.expand_dims(resized_image, 0)

plt.imshow(image);

# Do Inference
result = compiled_model([input_image])[output_key]
result_index = np.argmax(result)

# Convert the inference result to a class name.
imagenet_classes = open("utils/imagenet_2012.txt").read().splitlines()

# The model description states that for this model, class 0 is background,
# so we add background at the beginning of imagenet_classes
imagenet_classes = ['background'] + imagenet_classes

imagenet_classes[result_index]

# Timing
num_images = 1000

start = time.perf_counter()

for _ in range(num_images):
    compiled_model([input_image])

end = time.perf_counter()
time_ir = end - start

print(
    f"IR model in Inference Engine/CPU: {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)
# Imports
import sys
import time
from pathlib import Path

import cv2
import numpy as np
import torch
from IPython.display import Markdown, display
from fastseg import MobileV3Large
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import CityScapesSegmentation, segmentation_map_to_image, viz_result_image

# Settings
IMAGE_WIDTH = 1024  # Suggested values: 2048, 1024 or 512. The minimum width is 512.
# Set IMAGE_HEIGHT manually for custom input sizes. Minimum height is 512
IMAGE_HEIGHT = 1024 if IMAGE_WIDTH == 2048 else 512
DIRECTORY_NAME = "model"
BASE_MODEL_NAME = DIRECTORY_NAME + f"/fastseg{IMAGE_WIDTH}"

# Paths where PyTorch, ONNX and OpenVINO IR models will be stored
model_path = Path(BASE_MODEL_NAME).with_suffix(".pth")
onnx_path = model_path.with_suffix(".onnx")
ir_path = model_path.with_suffix(".xml")

# Download the Fastseg Model
print("Downloading the Fastseg model (if it has not been downloaded before)....")
model = MobileV3Large.from_pretrained().cpu().eval()
print("Loaded PyTorch Fastseg model")

# Save the model
model_path.parent.mkdir(exist_ok=True)
torch.save(model.state_dict(), str(model_path))
print(f"Model saved at {model_path}")

# Convert PyTorch model to ONNX
if not onnx_path.exists():
    dummy_input = torch.randn(1, 3, IMAGE_HEIGHT, IMAGE_WIDTH)

    # For the Fastseg model, setting do_constant_folding to False is required
    # for PyTorch>1.5.1
    torch.onnx.export(
        model,
        dummy_input,
        onnx_path,
        opset_version=11,
        do_constant_folding=False,
    )
    print(f"ONNX model exported to {onnx_path}.")
else:
    print(f"ONNX model {onnx_path} already exists.")

# Convert ONNX Model to OpenVINO IR Format
# Construct the command for Model Optimizer
mo_command = f"""mo
                 --input_model "{onnx_path}"
                 --input_shape "[1,3, {IMAGE_HEIGHT}, {IMAGE_WIDTH}]"
                 --mean_values="[123.675, 116.28 , 103.53]"
                 --scale_values="[58.395, 57.12 , 57.375]"
                 --data_type FP16
                 --output_dir "{model_path.parent}"
                 """
mo_command = " ".join(mo_command.split())
print("Model Optimizer command to convert the ONNX model to OpenVINO:")
display(Markdown(f"`{mo_command}`"))

if not ir_path.exists():
    print("Exporting ONNX model to IR... This may take a few minutes.")
    mo_result = %sx $mo_command
    print("\n".join(mo_result))
else:
    print(f"IR model {ir_path} already exists.")

# Show results: Load and Preprocess an Input Image
def normalize(image: np.ndarray) -> np.ndarray:
    """
    Normalize the image to the given mean and standard deviation
    for CityScapes models.
    """
    image = image.astype(np.float32)
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    image /= 255.0
    image -= mean
    image /= std
    return image

image_filename = "data/street.jpg"
image = cv2.cvtColor(cv2.imread(image_filename), cv2.COLOR_BGR2RGB)

resized_image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT))
normalized_image = normalize(resized_image)

# Convert the resized images to network input shape
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)
normalized_input_image = np.expand_dims(np.transpose(normalized_image, (2, 0, 1)), 0)

# ONNX Model in Inference Engine
# Load network to Inference Engine
ie = Core()
model_onnx = ie.read_model(model=onnx_path)
compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU")

output_layer_onnx = compiled_model_onnx.output(0)

# Run inference on the input image
res_onnx = compiled_model_onnx([normalized_input_image])[output_layer_onnx]

# Convert network result to segmentation map and display the result
result_mask_onnx = np.squeeze(np.argmax(res_onnx, axis=1)).astype(np.uint8)
viz_result_image(
    image,
    segmentation_map_to_image(result_mask_onnx, CityScapesSegmentation.get_colormap()),
    resize=True,
)

# IR Model in Inference Engine
# Load the network in Inference Engine
ie = Core()
model_ir = ie.read_model(model=ir_path)
compiled_model_ir = ie.compile_model(model=model_ir, device_name="CPU")

# Get input and output layers
output_layer_ir = compiled_model_ir.output(0)

# Run inference on the input image
res_ir = compiled_model_ir([input_image])[output_layer_ir]

result_mask_ir = np.squeeze(np.argmax(res_ir, axis=1)).astype(np.uint8)
viz_result_image(
    image,
    segmentation_map_to_image(result=result_mask_ir, colormap=CityScapesSegmentation.get_colormap()),
    resize=True,
)

# PyTorch Comparison
with torch.no_grad():
    result_torch = model(torch.as_tensor(normalized_input_image).float())

result_mask_torch = torch.argmax(result_torch, dim=1).squeeze(0).numpy().astype(np.uint8)
viz_result_image(
    image,
    segmentation_map_to_image(result=result_mask_torch, colormap=CityScapesSegmentation.get_colormap()),
    resize=True,
)

# Performance Comparison
num_images = 20

start = time.perf_counter()
for _ in range(num_images):
    compiled_model_onnx([normalized_input_image])
end = time.perf_counter()
time_onnx = end - start
print(
    f"ONNX model in Inference Engine/CPU: {time_onnx/num_images:.3f} "
    f"seconds per image, FPS: {num_images/time_onnx:.2f}"
)

start = time.perf_counter()
for _ in range(num_images):
    compiled_model_ir([input_image])
end = time.perf_counter()
time_ir = end - start
print(
    f"IR model in Inference Engine/CPU: {time_ir/num_images:.3f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)

with torch.no_grad():
    start = time.perf_counter()
    for _ in range(num_images):
        model(torch.as_tensor(input_image).float())
    end = time.perf_counter()
    time_torch = end - start
print(
    f"PyTorch model on CPU: {time_torch/num_images:.3f} seconds per image, "
    f"FPS: {num_images/time_torch:.2f}"
)

if "GPU" in ie.available_devices:
    compiled_model_onnx_gpu = ie.compile_model(model=model_onnx, device_name="GPU")
    start = time.perf_counter()
    for _ in range(num_images):
        compiled_model_onnx_gpu([input_image])
    end = time.perf_counter()
    time_onnx_gpu = end - start
    print(
        f"ONNX model in Inference Engine/GPU: {time_onnx_gpu/num_images:.3f} "
        f"seconds per image, FPS: {num_images/time_onnx_gpu:.2f}"
    )

    compiled_model_ir_gpu = ie.compile_model(model=model_ir, device_name="GPU")
    start = time.perf_counter()
    for _ in range(num_images):
        compiled_model_ir_gpu([input_image])
    end = time.perf_counter()
    time_ir_gpu = end - start
    print(
        f"IR model in Inference Engine/GPU: {time_ir_gpu/num_images:.3f} "
        f"seconds per image, FPS: {num_images/time_ir_gpu:.2f}"
    )

# Show Device Information
devices = ie.available_devices
for device in devices:
    device_name = ie.get_property(device_name=device, name="FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")
# Imports
import os
import time

import cv2
import matplotlib.pyplot as plt
import numpy as np
import paddlehub as hub
from IPython.display import Markdown, display
from PIL import Image
from openvino.runtime import Core
from paddle.static import InputSpec
from scipy.special import softmax

# Settings
IMAGE_FILENAME = "coco_close.png"

MODEL_NAME = "mobilenet_v3_large_imagenet_ssld"
hub.config.server = "https://paddlepaddle.org.cn/paddlehub"

# Show Inference on PaddlePaddle Model
classifier = hub.Module(name=MODEL_NAME)

# Load image in BGR format, as specified in model documentation
image = cv2.imread(filename=IMAGE_FILENAME)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
result = classifier.classification(images=[image], top_k=3)
for class_name, softmax_probability in result[0].items():
    print(f"{class_name}, {softmax_probability:.5f}")

from mobilenet_v3_large_imagenet_ssld.data_feed import process_image

pil_image = Image.open(IMAGE_FILENAME)
processed_image = process_image(pil_image)
print(f"Processed image shape: {processed_image.shape}")
# Processed image is in (C,H,W) format, convert to (H,W,C) to show the image
plt.imshow(np.transpose(processed_image, (1, 2, 0)))

# Preparation
input_shape = list(classifier.cpu_predictor.get_input_tensor_shape().values())
print("input shape:", input_shape)
print("mean:", classifier.get_pretrained_images_mean())
print("std:", classifier.get_pretrained_images_std())

# Convert PaddlePaddle Model to ONNX
target_height, target_width = next(iter(input_shape))[2:]
x_spec = InputSpec([1, 3, target_height, target_width], "float32", "x")
print(
    "Exporting PaddlePaddle model to ONNX with target_height "
    f"{target_height} and target_width {target_width}"
)
classifier.export_onnx_model(".", input_spec=[x_spec], opset_version=11)

# Convert ONNX model to OpenVINO IR Format
model_xml = f"{MODEL_NAME}.xml"
if not os.path.exists(model_xml):
    mo_command = f'mo --input_model {MODEL_NAME}.onnx --input_shape "[1,3,{target_height},{target_width}]"'
    display(Markdown(f"Model Optimizer command to convert the ONNX model to IR: `{mo_command}`"))
    display(Markdown("_Converting model to IR. This may take a few minutes..._"))
    ! $mo_command
else:
    print(f"{model_xml} already exists.")

# Show Inference on OpenVINO Model
# Load Inference Engine and IR model
ie = Core()
model = ie.read_model(model=f"{MODEL_NAME}.xml", weights=f"{MODEL_NAME}.bin")
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Get model output
output_layer = compiled_model.output(0)

# Read, show, and preprocess input image
# See the "Show Inference on PaddlePaddle Model" section for source of process_image
image = Image.open(IMAGE_FILENAME)
plt.imshow(image)
input_image = process_image(image)[None,]

# Do inference
ie_result = compiled_model([input_image])[output_layer][0]

# Compute softmax probabilities for the inference result and find the top three values
softmax_result = softmax(ie_result)
top_indices = np.argsort(softmax_result)[-3:][::-1]
top_softmax = softmax_result[top_indices]

# Convert the inference results to class names, using the same labels as the PaddlePaddle classifier
for index, softmax_probability in zip(top_indices, top_softmax):
    print(f"{classifier.label_list[index]}, {softmax_probability:.5f}")

# Timing and Comparison
num_images = 50

# PaddlePaddle's classification method expects a BGR numpy array
image = cv2.imread(filename=IMAGE_FILENAME)

# The process_image function expects a PIL image
pil_image = Image.open(fp=IMAGE_FILENAME)

# Show CPU information
ie = Core()
print(f"CPU: {ie.get_property(device_name='CPU', name='FULL_DEVICE_NAME')}")

# Show inference speed on PaddlePaddle model
start = time.perf_counter()
for _ in range(num_images):
    result = classifier.classification(images=[image], top_k=3)
end = time.perf_counter()
time_ir = end - start
print(
    f"PaddlePaddle model on CPU: {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}\n"
)
print("PaddlePaddle result:")
for class_name, softmax_probability in result[0].items():
    print(f"{class_name}, {softmax_probability:.5f}")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));

# Show inference speed on OpenVINO IR model
compiled_model = ie.compile_model(model=model, device_name="CPU")
output_layer = compiled_model.output(0)


start = time.perf_counter()
input_image = process_image(pil_image)[None,]
for _ in range(num_images):
    ie_result = compiled_model([input_image])[output_layer][0]
    result_index = np.argmax(ie_result)
    class_name = classifier.label_list[np.argmax(ie_result)]
    softmax_result = softmax(ie_result)
    top_indices = np.argsort(softmax_result)[-3:][::-1]
    top_softmax = softmax_result[top_indices]

end = time.perf_counter()
time_ir = end - start

print(
    f"IR model in Inference Engine (CPU): {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)
print()
print("OpenVINO result:")
for index, softmax_probability in zip(top_indices, top_softmax):
    print(f"{classifier.label_list[index]}, {softmax_probability:.5f}")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));
# Preparation
model_name = "mobilenet-v2-pytorch"

# Imports
import json
import sys
from pathlib import Path

from IPython.display import Markdown, display
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import DeviceNotFoundAlert, NotebookAlert

# Settings and Configuration
base_model_dir = Path("~/open_model_zoo_models").expanduser()
omz_cache_dir = Path("~/open_model_zoo_cache").expanduser()
precision = "FP16"

# Check if an iGPU is available on this system to use with Benchmark App
ie = Core()
gpu_available = "GPU" in ie.available_devices

print(
    f"base_model_dir: {base_model_dir}, omz_cache_dir: {omz_cache_dir}, gpu_availble: {gpu_available}"
)

# Download Model from Open Model Zoo
download_command = (
    f"omz_downloader --name {model_name} --output_dir {base_model_dir} --cache_dir {omz_cache_dir}"
)
display(Markdown(f"Download command: `{download_command}`"))
display(Markdown(f"Downloading {model_name}..."))
! $download_command

# Convert Model to OpenVINO IR format
convert_command = f"omz_converter --name {model_name} --precisions {precision} --download_dir {base_model_dir} --output_dir {base_model_dir}"
display(Markdown(f"Convert command: `{convert_command}`"))
display(Markdown(f"Converting {model_name}..."))

! $convert_command

# Get Model Information
model_info_output = %sx omz_info_dumper --name $model_name
model_info = json.loads(model_info_output.get_nlstr())

if len(model_info) > 1:
    NotebookAlert(
        f"There are multiple IR files for the {model_name} model. The first model in the "
        "omz_info_dumper output will be used for benchmarking. Change "
        "`selected_model_info` in the cell below to select a different model from the list.",
        "warning",
    )

model_info

selected_model_info = model_info[0]
model_path = (
    base_model_dir
    / Path(selected_model_info["subdirectory"])
    / Path(f"{precision}/{selected_model_info['name']}.xml")
)
print(model_path, "exists:", model_path.exists())

benchmark_command = f"benchmark_app -m {model_path} -t 15"
display(Markdown(f"Benchmark command: `{benchmark_command}`"))
display(Markdown(f"Benchmarking {model_name} on CPU with async inference for 15 seconds..."))

! $benchmark_command

# Benchmark with Different Settings
def benchmark_model(model_xml, device="CPU", seconds=60, api="async", batch=1):
    ie = Core()
    model_path = Path(model_xml)
    if ("GPU" in device) and ("GPU" not in ie.available_devices):
        DeviceNotFoundAlert("GPU")
    else:
        benchmark_command = f"benchmark_app -m {model_path} -d {device} -t {seconds} -api {api} -b {batch}"
        display(Markdown(f"**Benchmark {model_path.name} with {device} for {seconds} seconds with {api} inference**"))
        display(Markdown(f"Benchmark command: `{benchmark_command}`"))

        benchmark_output = %sx $benchmark_command
        print("command ended")
        benchmark_result = [line for line in benchmark_output
                            if not (line.startswith(r"[") or line.startswith("  ") or line == "")]
        print("\n".join(benchmark_result))

ie = Core()

# Show devices available for OpenVINO Inference Engine
for device in ie.available_devices:
    device_name = ie.get_property(device, "FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")

benchmark_model(model_path, device="CPU", seconds=15, api="async")

benchmark_model(model_path, device="AUTO", seconds=15, api="async")

benchmark_model(model_path, device="GPU", seconds=15, api="async")

benchmark_model(model_path, device="MULTI:CPU,GPU", seconds=15, api="async")
# Imports
import os
import sys
import zipfile
from pathlib import Path

import numpy as np
from monai.transforms import LoadImage
from openvino.inference_engine import IECore

sys.path.append("../utils")
from models.custom_segmentation import SegmentationModel
from notebook_utils import benchmark_model, download_file, show_live_inference

# Settings
# The directory that contains the IR model (xml and bin) files
MODEL_PATH = "pretrained_model/quantized_unet_kits19.xml"
# Uncomment the next line to use the FP16 model instead of the quantized model
# MODEL_PATH = "pretrained_model/unet_kits19.xml"

# Benchmark Model Performance
ie = IECore()
# By default, benchmark on MULTI:CPU,GPU if a GPU is available, otherwise on CPU.
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
# Uncomment one of the options below to benchmark on other devices
# device = "GPU"
# device = "CPU"
# device = "AUTO"

# Benchmark model
benchmark_model(model_path=MODEL_PATH, device=device, seconds=15)

# Download and Prepare Data
# Directory that contains the CT scan data. This directory should contain subdirectories
# case_00XXX where XXX is between 000 and 299
BASEDIR = Path("kits19_frames_1")
# The CT scan case number. For example: 16 for data from the case_00016 directory
# Currently only 117 is supported
CASE = 117

case_path = BASEDIR / f"case_{CASE:05d}"

if not case_path.exists():
    filename = download_file(
        f"https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_{CASE:05d}.zip"
    )
    with zipfile.ZipFile(filename, "r") as zip_ref:
        zip_ref.extractall(path=BASEDIR)
    os.remove(filename)  # remove zipfile
    print(f"Downloaded and extracted data for case_{CASE:05d}")
else:
    print(f"Data for case_{CASE:05d} exists")

# Load Model and List of Image Files
ie = IECore()
segmentation_model = SegmentationModel(
    ie=ie, model_path=Path(MODEL_PATH), sigmoid=True, rotate_and_flip=True
)
image_paths = sorted(case_path.glob("imaging_frames/*jpg"))

print(f"{case_path.name}, {len(image_paths)} images")

# Show Inference
# Possible options for device include "CPU", "GPU", "AUTO", "MULTI"
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
reader = LoadImage(image_only=True, dtype=np.uint8)

show_live_inference(
    ie=ie, image_paths=image_paths, model=segmentation_model, device=device, reader=reader
)
# Preparation
# Imports
import sys
import time
from pathlib import Path

import cv2
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import (
    HTML,
    FileLink,
    Pretty,
    ProgressBar,
    Video,
    clear_output,
    display,
)
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import load_image

# Settings
DEVICE = "CPU"
MODEL_FILE = "model/MiDaS_small.xml"

model_xml_path = Path(MODEL_FILE)

# Functions
def normalize_minmax(data):
    """Normalizes the values in `data` between 0 and 1"""
    return (data - data.min()) / (data.max() - data.min())


def convert_result_to_image(result, colormap="viridis"):
    """
    Convert network result of floating point numbers to an RGB image with
    integer values from 0-255 by applying a colormap.

    `result` is expected to be a single network result in 1,H,W shape
    `colormap` is a matplotlib colormap.
    See https://matplotlib.org/stable/tutorials/colors/colormaps.html
    """
    cmap = matplotlib.cm.get_cmap(colormap)
    result = result.squeeze(0)
    result = normalize_minmax(result)
    result = cmap(result)[:, :, :3] * 255
    result = result.astype(np.uint8)
    return result


def to_rgb(image_data) -> np.ndarray:
    """
    Convert image_data from BGR to RGB
    """
    return cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)

# Load the Model
ie = Core()
model = ie.read_model(model=model_xml_path, weights=model_xml_path.with_suffix(".bin"))
compiled_model = ie.compile_model(model=model, device_name=DEVICE)

input_key = compiled_model.input(0)
output_key = compiled_model.output(0)

network_input_shape = list(input_key.shape)
network_image_height, network_image_width = network_input_shape[2:]

# Load, resize and reshape input image
IMAGE_FILE = "data/coco_bike.jpg"
image = load_image(path=IMAGE_FILE)

# resize to input shape for network
resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))

# reshape image to network input shape NCHW
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)

# Do inference on image
result = compiled_model([input_image])[output_key]

# convert network result of disparity map to an image that shows
# distance as colors
result_image = convert_result_to_image(result=result)

# resize back to original image shape. cv2.resize expects shape
# in (width, height), [::-1] reverses the (height, width) shape to match this
result_image = cv2.resize(result_image, image.shape[:2][::-1])

# Display monodepth image
fig, ax = plt.subplots(1, 2, figsize=(20, 15))
ax[0].imshow(to_rgb(image))
ax[1].imshow(result_image);

# Video Settings
# Video source: https://www.youtube.com/watch?v=fu1xcQdJRws (Public Domain)
VIDEO_FILE = "data/Coco Walking in Berkeley.mp4"
# Number of seconds of input video to process. Set to 0 to process
# the full video.
NUM_SECONDS = 4
# Set ADVANCE_FRAMES to 1 to process every frame from the input video
# Set ADVANCE_FRAMES to 2 to process every second frame. This reduces
# the time it takes to process the video
ADVANCE_FRAMES = 2
# Set SCALE_OUTPUT to reduce the size of the result video
# If SCALE_OUTPUT is 0.5, the width and height of the result video
# will be half the width and height of the input video
SCALE_OUTPUT = 0.5
# The format to use for video encoding. vp09 is slow,
# but it works on most systems.
# Try the THEO encoding if you have FFMPEG installed.
# FOURCC = cv2.VideoWriter_fourcc(*"THEO")
FOURCC = cv2.VideoWriter_fourcc(*"vp09")

# Create Path objects for the input video and the resulting video
output_directory = Path("output")
output_directory.mkdir(exist_ok=True)
result_video_path = output_directory / f"{Path(VIDEO_FILE).stem}_monodepth.mp4"

# Load Video
cap = cv2.VideoCapture(str(VIDEO_FILE))
ret, image = cap.read()
if not ret:
    raise ValueError(f"The video at {VIDEO_FILE} cannot be read.")
input_fps = cap.get(cv2.CAP_PROP_FPS)
input_video_frame_height, input_video_frame_width = image.shape[:2]

target_fps = input_fps / ADVANCE_FRAMES
target_frame_height = int(input_video_frame_height * SCALE_OUTPUT)
target_frame_width = int(input_video_frame_width * SCALE_OUTPUT)

cap.release()
print(
    f"The input video has a frame width of {input_video_frame_width}, "
    f"frame height of {input_video_frame_height} and runs at {input_fps:.2f} fps"
)
print(
    "The monodepth video will be scaled with a factor "
    f"{SCALE_OUTPUT}, have width {target_frame_width}, "
    f" height {target_frame_height}, and run at {target_fps:.2f} fps"
)

# Do Inference on a Video and Create Monodepth Video
# Initialize variables
input_video_frame_nr = 0
start_time = time.perf_counter()
total_inference_duration = 0

# Open input video
cap = cv2.VideoCapture(str(VIDEO_FILE))

# Create result video
out_video = cv2.VideoWriter(
    str(result_video_path),
    FOURCC,
    target_fps,
    (target_frame_width * 2, target_frame_height),
)

num_frames = int(NUM_SECONDS * input_fps)
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if num_frames == 0 else num_frames
progress_bar = ProgressBar(total=total_frames)
progress_bar.display()

try:
    while cap.isOpened():
        ret, image = cap.read()
        if not ret:
            cap.release()
            break

        if input_video_frame_nr >= total_frames:
            break

        # Only process every second frame
        # Prepare frame for inference
        # resize to input shape for network
        resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))
        # reshape image to network input shape NCHW
        input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)

        # Do inference
        inference_start_time = time.perf_counter()
        result = compiled_model([input_image])[output_key]
        inference_stop_time = time.perf_counter()
        inference_duration = inference_stop_time - inference_start_time
        total_inference_duration += inference_duration

        if input_video_frame_nr % (10 * ADVANCE_FRAMES) == 0:
            clear_output(wait=True)
            progress_bar.display()
            # input_video_frame_nr // ADVANCE_FRAMES gives the number of
            # frames that have been processed by the network
            display(
                Pretty(
                    f"Processed frame {input_video_frame_nr // ADVANCE_FRAMES}"
                    f"/{total_frames // ADVANCE_FRAMES}. "
                    f"Inference time per frame: {inference_duration:.2f} seconds "
                    f"({1/inference_duration:.2f} FPS)"
                )
            )

        # Transform network result to RGB image
        result_frame = to_rgb(convert_result_to_image(result))
        # Resize image and result to target frame shape
        result_frame = cv2.resize(result_frame, (target_frame_width, target_frame_height))
        image = cv2.resize(image, (target_frame_width, target_frame_height))
        # Put image and result side by side
        stacked_frame = np.hstack((image, result_frame))
        # Save frame to video
        out_video.write(stacked_frame)

        input_video_frame_nr = input_video_frame_nr + ADVANCE_FRAMES
        cap.set(1, input_video_frame_nr)

        progress_bar.progress = input_video_frame_nr
        progress_bar.update()

except KeyboardInterrupt:
    print("Processing interrupted.")
finally:
    clear_output()
    processed_frames = num_frames // ADVANCE_FRAMES
    out_video.release()
    cap.release()
    end_time = time.perf_counter()
    duration = end_time - start_time

    print(
        f"Processed {processed_frames} frames in {duration:.2f} seconds. "
        f"Total FPS (including video processing): {processed_frames/duration:.2f}."
        f"Inference FPS: {processed_frames/total_inference_duration:.2f} "
    )
    print(f"Monodepth Video saved to '{str(result_video_path)}'.")

# Display Monodepth Video
video = Video(result_video_path, width=800, embed=True)
if not result_video_path.exists():
    plt.imshow(stacked_frame)
    raise ValueError("OpenCV was unable to write the video file. Showing one video frame.")
else:
    print(f"Showing monodepth video saved at\n{result_video_path.resolve()}")
    print(
        "If you cannot see the video in your browser, please click on the "
        "following link to download the video "
    )
    video_link = FileLink(result_video_path)
    video_link.html_link_str = "<a href='%s' download>%s</a>"
    display(HTML(video_link._repr_html_()))
    display(video)
# Imports
import os
import sys
import zipfile
from pathlib import Path

import numpy as np
from monai.transforms import LoadImage
from openvino.inference_engine import IECore

sys.path.append("../utils")
from models.custom_segmentation import SegmentationModel
from notebook_utils import benchmark_model, download_file, show_live_inference

# Settings
# The directory that contains the IR model (xml and bin) files
MODEL_PATH = "pretrained_model/quantized_unet_kits19.xml"
# Uncomment the next line to use the FP16 model instead of the quantized model
# MODEL_PATH = "pretrained_model/unet_kits19.xml"

# Benchmark Model Performance
ie = IECore()
# By default, benchmark on MULTI:CPU,GPU if a GPU is available, otherwise on CPU.
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
# Uncomment one of the options below to benchmark on other devices
# device = "GPU"
# device = "CPU"
# device = "AUTO"

# Benchmark model
benchmark_model(model_path=MODEL_PATH, device=device, seconds=15)

# Download and Prepare Data
# Directory that contains the CT scan data. This directory should contain subdirectories
# case_00XXX where XXX is between 000 and 299
BASEDIR = Path("kits19_frames_1")
# The CT scan case number. For example: 16 for data from the case_00016 directory
# Currently only 117 is supported
CASE = 117

case_path = BASEDIR / f"case_{CASE:05d}"

if not case_path.exists():
    filename = download_file(
        f"https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_{CASE:05d}.zip"
    )
    with zipfile.ZipFile(filename, "r") as zip_ref:
        zip_ref.extractall(path=BASEDIR)
    os.remove(filename)  # remove zipfile
    print(f"Downloaded and extracted data for case_{CASE:05d}")
else:
    print(f"Data for case_{CASE:05d} exists")

# Load model
ie = IECore()
segmentation_model = SegmentationModel(
    ie=ie, model_path=Path(MODEL_PATH), sigmoid=True, rotate_and_flip=True
)
image_paths = sorted(case_path.glob("imaging_frames/*jpg"))

print(f"{case_path.name}, {len(image_paths)} images")

# Show Live Inference
# Possible options for device include "CPU", "GPU", "AUTO", "MULTI"
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
reader = LoadImage(image_only=True, dtype=np.uint8)

show_live_inference(
    ie=ie, image_paths=image_paths, model=segmentation_model, device=device, reader=reader
)
# Imports
import collections
import os
import sys
import time

import cv2
import numpy as np
from IPython import display
from openvino.runtime import Core

sys.path.append("../utils")
import notebook_utils as utils

# Download the Model
# directory where model will be downloaded
base_model_dir = "model"

# model name as named in Open Model Zoo
model_name = "ssdlite_mobilenet_v2"

download_command = f"omz_downloader " \
                   f"--name {model_name} " \
                   f"--output_dir {base_model_dir} " \
                   f"--cache_dir {base_model_dir}"
! $download_command

# Convert the Model
precision = "FP16"

# output path for the conversion
converted_model_path = f"model/public/{model_name}/{precision}/{model_name}.xml"

if not os.path.exists(converted_model_path):
    convert_command = f"omz_converter " \
                      f"--name {model_name} " \
                      f"--download_dir {base_model_dir} " \
                      f"--precisions {precision}"
    ! $convert_command

# Load the Model
# initialize inference engine
ie_core = Core()
# read the network and corresponding weights from file
model = ie_core.read_model(model=converted_model_path)
# compile the model for the CPU (you can choose manually CPU, GPU, MYRIAD etc.)
# or let the engine choose the best available device (AUTO)
compiled_model = ie_core.compile_model(model=model, device_name="CPU")

# get input and output nodes
input_layer = compiled_model.input(0)
output_layer = compiled_model.output(0)

# get input size
height, width = list(input_layer.shape)[1:3]

# Process Results
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
classes = [
    "background", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
    "truck", "boat", "traffic light", "fire hydrant", "street sign", "stop sign",
    "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant",
    "bear", "zebra", "giraffe", "hat", "backpack", "umbrella", "shoe", "eye glasses",
    "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
    "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
    "plate", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
    "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
    "couch", "potted plant", "bed", "mirror", "dining table", "window", "desk", "toilet",
    "door", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven",
    "toaster", "sink", "refrigerator", "blender", "book", "clock", "vase", "scissors",
    "teddy bear", "hair drier", "toothbrush", "hair brush"
]

# colors for above classes (Rainbow Color Map)
colors = cv2.applyColorMap(
    src=np.arange(0, 255, 255 / len(classes), dtype=np.float32).astype(np.uint8),
    colormap=cv2.COLORMAP_RAINBOW,
).squeeze()


def process_results(frame, results, thresh=0.6):
    # size of the original frame
    h, w = frame.shape[:2]
    # results is a tensor [1, 1, 100, 7]
    results = results.squeeze()
    boxes = []
    labels = []
    scores = []
    for _, label, score, xmin, ymin, xmax, ymax in results:
        # create a box with pixels coordinates from the box with normalized coordinates [0,1]
        boxes.append(
            tuple(map(int, (xmin * w, ymin * h, (xmax - xmin) * w, (ymax - ymin) * h)))
        )
        labels.append(int(label))
        scores.append(float(score))

    # apply non-maximum suppression to get rid of many overlapping entities
    # see https://paperswithcode.com/method/non-maximum-suppression
    # this algorithm returns indices of objects to keep
    indices = cv2.dnn.NMSBoxes(
        bboxes=boxes, scores=scores, score_threshold=thresh, nms_threshold=0.6
    )

    # if there are no boxes
    if len(indices) == 0:
        return []

    # filter detected objects
    return [(labels[idx], scores[idx], boxes[idx]) for idx in indices.flatten()]


def draw_boxes(frame, boxes):
    for label, score, box in boxes:
        # choose color for the label
        color = tuple(map(int, colors[label]))
        # draw box
        x2 = box[0] + box[2]
        y2 = box[1] + box[3]
        cv2.rectangle(img=frame, pt1=box[:2], pt2=(x2, y2), color=color, thickness=3)

        # draw label name inside the box
        cv2.putText(
            img=frame,
            text=f"{classes[label]} {score:.2f}",
            org=(box[0] + 10, box[1] + 30),
            fontFace=cv2.FONT_HERSHEY_COMPLEX,
            fontScale=frame.shape[1] / 1000,
            color=color,
            thickness=1,
            lineType=cv2.LINE_AA,
        )

    return frame

# Main Processing Function
# main processing function to run object detection
def run_object_detection(source=0, flip=False, use_popup=False, skip_first_frames=0):
    player = None
    try:
        # create video player to play with target fps
        player = utils.VideoPlayer(
            source=source, flip=flip, fps=30, skip_first_frames=skip_first_frames
        )
        # start capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(
                winname=title, flags=cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE
            )

        processing_times = collections.deque()
        while True:
            # grab the frame
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
            # if frame larger than full HD, reduce size to improve the performance
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(
                    src=frame,
                    dsize=None,
                    fx=scale,
                    fy=scale,
                    interpolation=cv2.INTER_AREA,
                )

            # resize image and change dims to fit neural network input
            input_img = cv2.resize(
                src=frame, dsize=(width, height), interpolation=cv2.INTER_AREA
            )
            # create batch of images (size = 1)
            input_img = input_img[np.newaxis, ...]

            # measure processing time

            start_time = time.time()
            # get results
            results = compiled_model([input_img])[output_layer]
            stop_time = time.time()
            # get poses from network results
            boxes = process_results(frame=frame, results=results)

            # draw boxes on a frame
            frame = draw_boxes(frame=frame, boxes=boxes)

            processing_times.append(stop_time - start_time)
            # use processing times from last 200 frames
            if len(processing_times) > 200:
                processing_times.popleft()

            _, f_width = frame.shape[:2]
            # mean processing time [ms]
            processing_time = np.mean(processing_times) * 1000
            fps = 1000 / processing_time
            cv2.putText(
                img=frame,
                text=f"Inference time: {processing_time:.1f}ms ({fps:.1f} FPS)",
                org=(20, 40),
                fontFace=cv2.FONT_HERSHEY_COMPLEX,
                fontScale=f_width / 1000,
                color=(0, 0, 255),
                thickness=1,
                lineType=cv2.LINE_AA,
            )

            # use this workaround if there is flickering
            if use_popup:
                cv2.imshow(winname=title, mat=frame)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # encode numpy array to jpg
                _, encoded_img = cv2.imencode(
                    ext=".jpg", img=frame, params=[cv2.IMWRITE_JPEG_QUALITY, 100]
                )
                # create IPython image
                i = display.Image(data=encoded_img)
                # display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)
    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Live Object Detection
run_object_detection(source=0, flip=True, use_popup=False)

# Run Object Detection on a Video File
video_file = "../201-vision-monodepth/data/Coco Walking in Berkeley.mp4"

run_object_detection(source=video_file, flip=False, use_popup=False)
# Imports
import collections
import os
import sys
import time

import cv2
import numpy as np
from IPython import display
from numpy.lib.stride_tricks import as_strided
from openvino.runtime import Core

from decoder import OpenPoseDecoder

sys.path.append("../utils")
import notebook_utils as utils

# Download the model
# directory where model will be downloaded
base_model_dir = "model"

# model name as named in Open Model Zoo
model_name = "human-pose-estimation-0001"
# selected precision (FP32, FP16, FP16-INT8)
precision = "FP16-INT8"

model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml"
model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin"

if not os.path.exists(model_path):
    download_command = f"omz_downloader " \
                       f"--name {model_name} " \
                       f"--precision {precision} " \
                       f"--output_dir {base_model_dir}"
    ! $download_command

# Load the model
# initialize inference engine
ie_core = Core()
# read the network and corresponding weights from file
model = ie_core.read_model(model=model_path, weights=model_weights_path)
# load the model on the CPU (you can use GPU or MYRIAD as well)
compiled_model = ie_core.compile_model(model=model, device_name="CPU")

# get input and output names of nodes
input_layer = compiled_model.input(0)
output_layers = list(compiled_model.outputs)

# get input size
height, width = list(input_layer.shape)[2:]

# Processing OpenPoseDecoder
decoder = OpenPoseDecoder()

# Process Results
# 2d pooling in numpy (from: htt11ps://stackoverflow.com/a/54966908/1624463)
def pool2d(A, kernel_size, stride, padding, pool_mode="max"):
    """
    2D Pooling

    Parameters:
        A: input 2D array
        kernel_size: int, the size of the window
        stride: int, the stride of the window
        padding: int, implicit zero paddings on both sides of the input
        pool_mode: string, 'max' or 'avg'
    """
    # Padding
    A = np.pad(A, padding, mode="constant")

    # Window view of A
    output_shape = (
        (A.shape[0] - kernel_size) // stride + 1,
        (A.shape[1] - kernel_size) // stride + 1,
    )
    kernel_size = (kernel_size, kernel_size)
    A_w = as_strided(
        A,
        shape=output_shape + kernel_size,
        strides=(stride * A.strides[0], stride * A.strides[1]) + A.strides
    )
    A_w = A_w.reshape(-1, *kernel_size)

    # Return the result of pooling
    if pool_mode == "max":
        return A_w.max(axis=(1, 2)).reshape(output_shape)
    elif pool_mode == "avg":
        return A_w.mean(axis=(1, 2)).reshape(output_shape)


# non maximum suppression
def heatmap_nms(heatmaps, pooled_heatmaps):
    return heatmaps * (heatmaps == pooled_heatmaps)


# get poses from results
def process_results(img, pafs, heatmaps):
    # this processing comes from
    # https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/common/python/models/open_pose.py
    pooled_heatmaps = np.array(
        [[pool2d(h, kernel_size=3, stride=1, padding=1, pool_mode="max") for h in heatmaps[0]]]
    )
    nms_heatmaps = heatmap_nms(heatmaps, pooled_heatmaps)

    # decode poses
    poses, scores = decoder(heatmaps, nms_heatmaps, pafs)
    output_shape = list(compiled_model.output(index=0).partial_shape)
    output_scale = img.shape[1] / output_shape[3].get_length(), img.shape[0] / output_shape[2].get_length()
    # multiply coordinates by scaling factor
    poses[:, :, :2] *= output_scale
    return poses, scores

# Draw Pose Overlays
colors = ((255, 0, 0), (255, 0, 255), (170, 0, 255), (255, 0, 85), (255, 0, 170), (85, 255, 0),
          (255, 170, 0), (0, 255, 0), (255, 255, 0), (0, 255, 85), (170, 255, 0), (0, 85, 255),
          (0, 255, 170), (0, 0, 255), (0, 255, 255), (85, 0, 255), (0, 170, 255))

default_skeleton = ((15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11), (6, 12), (5, 6), (5, 7),
                    (6, 8), (7, 9), (8, 10), (1, 2), (0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6))


def draw_poses(img, poses, point_score_threshold, skeleton=default_skeleton):
    if poses.size == 0:
        return img

    img_limbs = np.copy(img)
    for pose in poses:
        points = pose[:, :2].astype(np.int32)
        points_scores = pose[:, 2]
        # Draw joints.
        for i, (p, v) in enumerate(zip(points, points_scores)):
            if v > point_score_threshold:
                cv2.circle(img, tuple(p), 1, colors[i], 2)
        # Draw limbs.
        for i, j in skeleton:
            if points_scores[i] > point_score_threshold and points_scores[j] > point_score_threshold:
                cv2.line(img_limbs, tuple(points[i]), tuple(points[j]), color=colors[j], thickness=4)
    cv2.addWeighted(img, 0.4, img_limbs, 0.6, 0, dst=img)
    return img

# Main Processing Function
# main processing function to run pose estimation
def run_pose_estimation(source=0, flip=False, use_popup=False, skip_first_frames=0):
    pafs_output_key = compiled_model.output("Mconv7_stage2_L1")
    heatmaps_output_key = compiled_model.output("Mconv7_stage2_L2")
    player = None
    try:
        # create video player to play with target fps
        player = utils.VideoPlayer(source, flip=flip, fps=30, skip_first_frames=skip_first_frames)
        # start capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)

        processing_times = collections.deque()

        while True:
            # grab the frame
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
            # if frame larger than full HD, reduce size to improve the performance
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)

            # resize image and change dims to fit neural network input
            # (see https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/human-pose-estimation-0001)
            input_img = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
            # create batch of images (size = 1)
            input_img = input_img.transpose((2,0,1))[np.newaxis, ...]

            # measure processing time
            start_time = time.time()
            # get results
            results = compiled_model([input_img])
            stop_time = time.time()

            pafs = results[pafs_output_key]
            heatmaps = results[heatmaps_output_key]
            # get poses from network results
            poses, scores = process_results(frame, pafs, heatmaps)

            # draw poses on a frame
            frame = draw_poses(frame, poses, 0.1)

            processing_times.append(stop_time - start_time)
            # use processing times from last 200 frames
            if len(processing_times) > 200:
                processing_times.popleft()

            _, f_width = frame.shape[:2]
            # mean processing time [ms]
            processing_time = np.mean(processing_times) * 1000
            fps = 1000 / processing_time
            cv2.putText(frame, f"Inference time: {processing_time:.1f}ms ({fps:.1f} FPS)", (20, 40),
                        cv2.FONT_HERSHEY_COMPLEX, f_width / 1000, (0, 0, 255), 1, cv2.LINE_AA)

            # use this workaround if there is flickering
            if use_popup:
                cv2.imshow(title, frame)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # encode numpy array to jpg
                _, encoded_img = cv2.imencode(".jpg", frame, params=[cv2.IMWRITE_JPEG_QUALITY, 90])
                # create IPython image
                i = display.Image(data=encoded_img)
                # display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)
    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Live Pose Estimation
run_pose_estimation(source=0, flip=True, use_popup=False)

# Run Pose Estimation on a Video File
video_file = "https://github.com/intel-iot-devkit/sample-videos/blob/master/store-aisle-detection.mp4?raw=true"

run_pose_estimation(video_file, flip=False, use_popup=False, skip_first_frames=500)
# Imports
import collections
import os
import sys
import time
from typing import Tuple, List

import cv2
import numpy as np
from IPython import display
from openvino.runtime import Core
from openvino.runtime.ie_api import CompiledModel

sys.path.append("../utils")
import notebook_utils as utils

# Download the models
# Directory where model will be downloaded
base_model_dir = "model"
# Model name as named in Open Model Zoo
model_name = "action-recognition-0001"
# Selected precision (FP32, FP16, FP16-INT8)
precision = "FP16"
model_path_decoder = (
    f"model/intel/{model_name}/{model_name}-decoder/{precision}/{model_name}-decoder.xml"
)
model_path_encoder = (
    f"model/intel/{model_name}/{model_name}-encoder/{precision}/{model_name}-encoder.xml"
)
if not os.path.exists(model_path_decoder) or not os.path.exists(model_path_encoder):
    download_command = f"omz_downloader " \
                       f"--name {model_name} " \
                       f"--precision {precision} " \
                       f"--output_dir {base_model_dir}"
    ! $download_command

# Load your labels
labels = "data/kinetics.txt"

with open(labels) as f:
    labels = [line.strip() for line in f]

print(labels[0:9], np.shape(labels))

# Model Initialization function
# Initialize inference engine
ie_core = Core()


def model_init(model_path: str) -> Tuple:
    """
    Read the network and weights from file, load the
    model on the CPU and get input and output names of nodes

    :param: model: model architecture path *.xml
    :retuns:
            compiled_model: Compiled model 
            input_key: Input node for model
            output_key: Output node for model
    """

    # Read the network and corresponding weights from file
    model = ie_core.read_model(model=model_path)
    # compile the model for the CPU (you can use GPU or MYRIAD as well)
    compiled_model = ie_core.compile_model(model=model, device_name="CPU")
    # Get input and output names of nodes
    input_keys = compiled_model.input(0)
    output_keys = compiled_model.output(0)
    return input_keys, output_keys, compiled_model

# Initialization for Encoder and Decoder
# Encoder initialization
input_key_en, output_keys_en, compiled_model_en = model_init(model_path_encoder)
# Decoder initialization
input_key_de, output_keys_de, compiled_model_de = model_init(model_path_decoder)

# Get input size - Encoder
height_en, width_en = list(input_key_en.shape)[2:]
# Get input size - Decoder
frames2decode = list(input_key_de.shape)[0:][1]

# Helper functions
def center_crop(frame: np.ndarray) -> np.ndarray:
    """
    Center crop squared the original frame to standardize the input image to the encoder model

    :param frame: input frame
    :returns: center-crop-squared frame
    """
    img_h, img_w, _ = frame.shape
    min_dim = min(img_h, img_w)
    start_x = int((img_w - min_dim) / 2.0)
    start_y = int((img_h - min_dim) / 2.0)
    roi = [start_y, (start_y + min_dim), start_x, (start_x + min_dim)]
    return frame[start_y : (start_y + min_dim), start_x : (start_x + min_dim), ...], roi


def adaptive_resize(frame: np.ndarray, size: int) -> np.ndarray:
    """
     The frame going to be resized to have a height of size or a width of size

    :param frame: input frame
    :param size: input size to encoder model
    :returns: resized frame, np.array type
    """
    h, w, _ = frame.shape
    scale = size / min(h, w)
    w_scaled, h_scaled = int(w * scale), int(h * scale)
    if w_scaled == w and h_scaled == h:
        return frame
    return cv2.resize(frame, (w_scaled, h_scaled))


def decode_output(probs: np.ndarray, labels: np.ndarray, top_k: int = 3) -> np.ndarray:
    """
    Decodes top probabilities into corresponding label names

    :param probs: confidence vector for 400 actions
    :param labels: list of actions
    :param top_k: The k most probable positions in the list of labels
    :returns: decoded_labels: The k most probable actions from the labels list
              decoded_top_probs: confidence for the k most probable actions
    """
    top_ind = np.argsort(-1 * probs)[:top_k]
    out_label = np.array(labels)[top_ind.astype(int)]
    decoded_labels = [out_label[0][0], out_label[0][1], out_label[0][2]]
    top_probs = np.array(probs)[0][top_ind.astype(int)]
    decoded_top_probs = [top_probs[0][0], top_probs[0][1], top_probs[0][2]]
    return decoded_labels, decoded_top_probs


def rec_frame_display(frame: np.ndarray, roi) -> np.ndarray:
    """
    Draw a rec frame over actual frame

    :param frame: input frame
    :param roi: Region of interest, image section processed by the Encoder
    :returns: frame with drawed shape

    """

    cv2.line(frame, (roi[2] + 3, roi[0] + 3), (roi[2] + 3, roi[0] + 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[2] + 3, roi[0] + 3), (roi[2] + 100, roi[0] + 3), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[1] - 3), (roi[3] - 3, roi[1] - 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[1] - 3), (roi[3] - 100, roi[1] - 3), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[0] + 3), (roi[3] - 3, roi[0] + 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[0] + 3), (roi[3] - 100, roi[0] + 3), (0, 200, 0), 2)
    cv2.line(frame, (roi[2] + 3, roi[1] - 3), (roi[2] + 3, roi[1] - 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[2] + 3, roi[1] - 3), (roi[2] + 100, roi[1] - 3), (0, 200, 0), 2)
    # Write ROI over actual frame
    FONT_STYLE = cv2.FONT_HERSHEY_SIMPLEX
    org = (roi[2] + 3, roi[1] - 3)
    org2 = (roi[2] + 2, roi[1] - 2)
    FONT_SIZE = 0.5
    FONT_COLOR = (0, 200, 0)
    FONT_COLOR2 = (0, 0, 0)
    cv2.putText(frame, "ROI", org2, FONT_STYLE, FONT_SIZE, FONT_COLOR2)
    cv2.putText(frame, "ROI", org, FONT_STYLE, FONT_SIZE, FONT_COLOR)
    return frame


def display_text_fnc(frame: np.ndarray, display_text: str, index: int):
    """
    Include text on the analized frame

    :param frame: input frame
    :param display_text: text to add on the frame
    :param index: index line dor adding text

    """
    # Configuration for displaying images with text
    FONT_COLOR = (255, 255, 255)
    FONT_COLOR2 = (0, 0, 0)
    FONT_STYLE = cv2.FONT_HERSHEY_DUPLEX
    FONT_SIZE = 0.7
    TEXT_VERTICAL_INTERVAL = 25
    TEXT_LEFT_MARGIN = 15
    # ROI over actual frame
    (processed, roi) = center_crop(frame)
    # Draw a ROI over actual frame
    frame = rec_frame_display(frame, roi)
    # Put text over actual frame
    text_loc = (TEXT_LEFT_MARGIN, TEXT_VERTICAL_INTERVAL * (index + 1))
    text_loc2 = (TEXT_LEFT_MARGIN + 1, TEXT_VERTICAL_INTERVAL * (index + 1) + 1)
    cv2.putText(frame, display_text, text_loc2, FONT_STYLE, FONT_SIZE, FONT_COLOR2)
    cv2.putText(frame, display_text, text_loc, FONT_STYLE, FONT_SIZE, FONT_COLOR)

# AI Functions
def preprocessing(frame: np.ndarray, size: int) -> np.ndarray:
    """
    Preparing frame before Encoder.
    The image should be scaled to its shortest dimension at "size"
    and cropped, centered, and squared so that both width and
    height have lengths "size". Frame must be transposed from
    Height-Width-Channels (HWC) to Channels-Height-Width (CHW).

    :param frame: input frame
    :param size: input size to encoder model
    :returns: resized and cropped frame
    """
    # Adaptative resize
    preprocessed = adaptive_resize(frame, size)
    # Center_crop
    (preprocessed, roi) = center_crop(preprocessed)
    # Transpose frame HWC -> CHW
    preprocessed = preprocessed.transpose((2, 0, 1))[None,]  # HWC -> CHW
    return preprocessed, roi


def encoder(
    preprocessed: np.ndarray,
    compiled_model: CompiledModel
) -> List:
    """
    Encoder Inference per frame. This function calls the network previously
    configured for the encoder model (compiled_model), extracts the data
    from the output node, and appends it in an array to be used by the decoder.

    :param: preprocessed: preprocessing frame
    :param: compiled_model: Encoder model network
    :returns: encoder_output: embedding layer that is appended with each arriving frame
    """
    output_key_en = compiled_model.output(0)
    
    # Get results on action-recognition-0001-encoder model
    infer_result_encoder = compiled_model([preprocessed])[output_key_en]
    return infer_result_encoder


def decoder(encoder_output: List, compiled_model_de: CompiledModel) -> List:
    """
    Decoder inference per set of frames. This function concatenates the embedding layer
    froms the encoder output, transpose the array to match with the decoder input size.
    Calls the network previously configured for the decoder model (compiled_model_de), extracts
    the logits and normalize those to get confidence values along specified axis.
    Decodes top probabilities into corresponding label names

    :param: encoder_output: embedding layer for 16 frames
    :param: compiled_model_de: Decoder model network
    :returns: decoded_labels: The k most probable actions from the labels list
              decoded_top_probs: confidence for the k most probable actions
    """
    # Concatenate sample_duration frames in just one array
    decoder_input = np.concatenate(encoder_output, axis=0)
    # Organize input shape vector to the Decoder (shape: [1x16x512]]
    decoder_input = decoder_input.transpose((2, 0, 1, 3))
    decoder_input = np.squeeze(decoder_input, axis=3)
    output_key_de = compiled_model_de.output(0)
    # Get results on action-recognition-0001-decoder model
    result_de = compiled_model_de([decoder_input])[output_key_de]
    # Normalize logits to get confidence values along specified axis
    probs = softmax(result_de - np.max(result_de))
    # Decodes top probabilities into corresponding label names
    decoded_labels, decoded_top_probs = decode_output(probs, labels, top_k=3)
    return decoded_labels, decoded_top_probs


def softmax(x: np.ndarray) -> np.ndarray:
    """
    Normalizes logits to get confidence values along specified axis
    x: np.array, axis=None
    """
    exp = np.exp(x)
    return exp / np.sum(exp, axis=None)

# Main Processing Function
def run_action_recognition(
    source: str = "0",
    flip: bool = True,
    use_popup: bool = False,
    compiled_model_en: CompiledModel = compiled_model_en,
    compiled_model_de: CompiledModel = compiled_model_de,
    skip_first_frames: int = 0,
):
    """
    Use the "source" webcam or video file to run the complete pipeline for action-recognition problem
    1. Create a video player to play with target fps
    2. Prepare a set of frames to be encoded-decoded
    3. Preprocess frame before Encoder
    4. Encoder Inference per frame
    5. Decoder inference per set of frames
    6. Visualize the results

    :param: source: webcam "0" or video path
    :param: flip: to be used by VideoPlayer function for flipping capture image
    :param: use_popup: False for showing encoded frames over this notebook, True for creating a popup window.
    :param: skip_first_frames: Number of frames to skip at the beginning of the video.
    :returns: display video over the notebook or in a popup window

    """
    size = height_en  # Endoder input size - From Cell 5_9
    sample_duration = frames2decode  # Decoder input size - From Cell 5_7
    # Select frames per second of your source
    fps = 30
    player = None
    try:
        # Create a video player
        player = utils.VideoPlayer(source, flip=flip, fps=fps, skip_first_frames=skip_first_frames)
        # Start capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)

        processing_times = collections.deque()
        processing_time = 0
        encoder_output = []
        decoded_labels = [0, 0, 0]
        decoded_top_probs = [0, 0, 0]
        counter = 0
        # Create a text template to show inference results over video
        text_inference_template = "Infer Time:{Time:.1f}ms,{fps:.1f}FPS"
        text_template = "{label},{conf:.2f}%"

        while True:
            counter = counter + 1

            # read a frame from the video stream
            frame = player.next()
            if frame is None:
                print("Source ended")
                break

            scale = 1280 / max(frame.shape)

            # Adaptative resize for visualization
            if scale < 1:
                frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)

            # Select one frame every two for processing through the encoder.
            # After 16 frames are processed, the decoder will find the action,
            # and the label will be printed over the frames.

            if counter % 2 == 0:
                # Preprocess frame before Encoder
                (preprocessed, _) = preprocessing(frame, size)

                # Measure processing time
                start_time = time.time()

                # Encoder Inference per frame
                encoder_output.append(encoder(preprocessed, compiled_model_en))

                # Decoder inference per set of frames
                # Wait for sample duration to work with decoder model
                if len(encoder_output) == sample_duration:
                    decoded_labels, decoded_top_probs = decoder(encoder_output, compiled_model_de)
                    encoder_output = []

                # Inference has finished ... Let's to display results
                stop_time = time.time()

                # Calculate processing time
                processing_times.append(stop_time - start_time)

                # Use processing times from last 200 frames
                if len(processing_times) > 200:
                    processing_times.popleft()

                # Mean processing time [ms]
                processing_time = np.mean(processing_times) * 1000
                fps = 1000 / processing_time

            # Visualize the results
            for i in range(0, 3):
                display_text = text_template.format(
                    label=decoded_labels[i],
                    conf=decoded_top_probs[i] * 100,
                )
                display_text_fnc(frame, display_text, i)

            display_text = text_inference_template.format(Time=processing_time, fps=fps)
            display_text_fnc(frame, display_text, 3)

            # Use this workaround you experience flickering
            if use_popup:
                cv2.imshow(title, frame)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # Encode numpy array to jpg
                _, encoded_img = cv2.imencode(".jpg", frame, params=[cv2.IMWRITE_JPEG_QUALITY, 90])
                # Create IPython image
                i = display.Image(data=encoded_img)
                # Display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)

    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # Any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Action Recognition on a Video File
video_file = "https://archive.org/serve/ISSVideoResourceLifeOnStation720p/ISS%20Video%20Resource_LifeOnStation_720p.mp4"
run_action_recognition(source=video_file, flip=False, use_popup=False, skip_first_frames=600)

# Run Action Recognition using your webcam
run_action_recognition(source=0, flip=False, use_popup=False, skip_first_frames=0)
# Imports
from pathlib import Path

import IPython.display as ipd
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import scipy
from openvino.runtime import Core

# Settings
model_folder = "model"
download_folder = "output"
data_folder = "data"

precision = "FP16"
model_name = "quartznet-15x5-en"

# Download Model
# Check if model is already downloaded in download directory
path_to_model_weights = Path(f'{download_folder}/public/{model_name}/models')
downloaded_model_file = list(path_to_model_weights.glob('*.pth'))

if not path_to_model_weights.is_dir() or len(downloaded_model_file) == 0:
    download_command = f"omz_downloader --name {model_name} --output_dir {download_folder} --precision {precision}"
    ! $download_command

# Convert Model
# Check if model is already converted in model directory
path_to_converted_weights = Path(f'{model_folder}/public/{model_name}/{precision}/{model_name}.bin')

if not path_to_converted_weights.is_file():
    convert_command = f"omz_converter --name {model_name} --precisions {precision} --download_dir {download_folder} --output_dir {model_folder}"
    ! $convert_command

# Defining constants
audio_file_name = "edge_to_cloud.ogg"
alphabet = " abcdefghijklmnopqrstuvwxyz'~"

# Load Audio File
audio, sampling_rate = librosa.load(path=f'{data_folder}/{audio_file_name}', sr=16000)
ipd.Audio(audio, rate=sampling_rate)

# Visualise Audio File
plt.figure()
librosa.display.waveplot(y=audio, sr=sampling_rate, max_points=50000.0, x_axis='time', offset=0.0, max_sr=1000);
plt.show()
specto_audio = librosa.stft(audio)
specto_audio = librosa.amplitude_to_db(np.abs(specto_audio), ref=np.max)
print(specto_audio.shape)
librosa.display.specshow(specto_audio, sr=sampling_rate, x_axis='time', y_axis='hz');

# Change Type of Data
if max(np.abs(audio)) <= 1:
    audio = (audio * (2**15 - 1))
audio = audio.astype(np.int16)

# Convert Audio to Mel Spectrum
def audio_to_mel(audio, sampling_rate):
    assert sampling_rate == 16000, "Only 16 KHz audio supported"
    preemph = 0.97
    preemphased = np.concatenate([audio[:1], audio[1:] - preemph * audio[:-1].astype(np.float32)])

    # Calculate window length
    win_length = round(sampling_rate * 0.02)

    # Based on previously calculated window length run short-time Fourier transform
    spec = np.abs(librosa.core.spectrum.stft(preemphased, n_fft=512, hop_length=round(sampling_rate * 0.01),
                  win_length=win_length, center=True, window=scipy.signal.windows.hann(win_length), pad_mode='reflect'))

    # Create mel filter-bank, produce transformation matrix to project current values onto Mel-frequency bins
    mel_basis = librosa.filters.mel(sampling_rate, 512, n_mels=64, fmin=0.0, fmax=8000.0, htk=False)
    return mel_basis, spec


def mel_to_input(mel_basis, spec, padding=16):
    # Convert to logarithmic scale
    log_melspectrum = np.log(np.dot(mel_basis, np.power(spec, 2)) + 2 ** -24)

    # Normalize output
    normalized = (log_melspectrum - log_melspectrum.mean(1)[:, None]) / (log_melspectrum.std(1)[:, None] + 1e-5)

    # Calculate padding
    remainder = normalized.shape[1] % padding
    if remainder != 0:
        return np.pad(normalized, ((0, 0), (0, padding - remainder)))[None]
    return normalized[None]

# Run Conversion from Audio to Mel Format
mel_basis, spec = audio_to_mel(audio=audio.flatten(), sampling_rate=sampling_rate)

# Visualise Mel Spectogram
librosa.display.specshow(data=spec, sr=sampling_rate, x_axis='time', y_axis='log');
plt.show();
librosa.display.specshow(data=mel_basis, sr=sampling_rate, x_axis='linear');
plt.ylabel('Mel filter');

# Adjust Mel scale to Input
audio = mel_to_input(mel_basis=mel_basis, spec=spec)

# Load Model
ie = Core()

model = ie.read_model(
    model=f"{model_folder}/public/{model_name}/{precision}/{model_name}.xml"
)
model_input_layer = model.input(0)
shape = model_input_layer.partial_shape
shape[2] = -1
model.reshape({model_input_layer: shape})
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Do Inference
output_layer_ir = compiled_model.output(0)

character_probabilities = compiled_model([audio])[output_layer_ir]

# Read Output
# Remove unnececery dimension
character_probabilities = np.squeeze(character_probabilities)

# Run argmax to pick most possible symbols
character_probabilities = np.argmax(character_probabilities, axis=1)

# Implementation of Decoding
def ctc_greedy_decode(predictions):
    previous_letter_id = blank_id = len(alphabet) - 1
    transcription = list()
    for letter_index in predictions:
        if previous_letter_id != letter_index != blank_id:
            transcription.append(alphabet[letter_index])
        previous_letter_id = letter_index
    return ''.join(transcription)

# Run Decoding and Print Output
transcription = ctc_greedy_decode(character_probabilities)
print(transcription)
# Imports
import operator
import time
from urllib import parse

import numpy as np
from openvino.runtime import Core

import html_reader as reader
import tokens_bert as tokens

# Download the model
# directory where model will be downloaded
base_model_dir = "model"

# desired precision
precision = "FP16-INT8"

# model name as named in Open Model Zoo
model_name = "bert-small-uncased-whole-word-masking-squad-int8-0002"

model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml"
model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin"

download_command = f"omz_downloader " \
                   f"--name {model_name} " \
                   f"--precision {precision} " \
                   f"--output_dir {base_model_dir} " \
                   f"--cache_dir {base_model_dir}"
! $download_command

# Load the model
# initialize inference engine
core = Core()
# read the network and corresponding weights from file
model = core.read_model(model=model_path, weights=model_weights_path)
# load the model on the CPU (you can use GPU as well)
compiled_model = core.compile_model(model=model, device_name="CPU")

# get input and output names of nodes
input_keys = list(compiled_model.inputs)
output_keys = list(compiled_model.outputs)

# get network input size
input_size = compiled_model.input(0).shape[1]

# Processing
# path to vocabulary file
vocab_file_path = "data/vocab.txt"

# create dictionary with words and their indices
vocab = tokens.load_vocab_file(vocab_file_path)

# define special tokens
cls_token = vocab["[CLS]"]
pad_token = vocab["[PAD]"]
sep_token = vocab["[SEP]"]


# function to load text from given urls
def load_context(sources):
    input_urls = []
    paragraphs = []
    for source in sources:
        result = parse.urlparse(source)
        if all([result.scheme, result.netloc]):
            input_urls.append(source)
        else:
            paragraphs.append(source)

    paragraphs.extend(reader.get_paragraphs(input_urls))
    # produce one big context string
    return "\n".join(paragraphs)

# Preprocessing
# generator of a sequence of inputs
def prepare_input(question_tokens, context_tokens):
    # length of question in tokens
    question_len = len(question_tokens)
    # context part size
    context_len = input_size - question_len - 3

    if context_len < 16:
        raise RuntimeError("Question is too long in comparison to input size. No space for context")

    # take parts of context with overlapping by 0.5
    for start in range(0, max(1, len(context_tokens) - context_len), context_len // 2):
        # part of context
        part_context_tokens = context_tokens[start:start + context_len]
        # input: question and context separated by special tokens
        input_ids = [cls_token] + question_tokens + [sep_token] + part_context_tokens + [sep_token]
        # 1 for any index if there is no padding token, 0 otherwise
        attention_mask = [1] * len(input_ids)
        # 0 for question tokens, 1 for context part
        token_type_ids = [0] * (question_len + 2) + [1] * (len(part_context_tokens) + 1)

        # add padding at the end
        (input_ids, attention_mask, token_type_ids), pad_number = pad(input_ids=input_ids,
                                                                      attention_mask=attention_mask,
                                                                      token_type_ids=token_type_ids)

        # create input to feed the model
        input_dict = {
            "input_ids": np.array([input_ids], dtype=np.int32),
            "attention_mask": np.array([attention_mask], dtype=np.int32),
            "token_type_ids": np.array([token_type_ids], dtype=np.int32),
        }

        # some models require additional position_ids
        if "position_ids" in [i_key.any_name for i_key in input_keys]:
            position_ids = np.arange(len(input_ids))
            input_dict["position_ids"] = np.array([position_ids], dtype=np.int32)

        yield input_dict, pad_number, start


# function to add padding
def pad(input_ids, attention_mask, token_type_ids):
    # how many padding tokens
    diff_input_size = input_size - len(input_ids)

    if diff_input_size > 0:
        # add padding to all inputs
        input_ids = input_ids + [pad_token] * diff_input_size
        attention_mask = attention_mask + [0] * diff_input_size
        token_type_ids = token_type_ids + [0] * diff_input_size

    return (input_ids, attention_mask, token_type_ids), diff_input_size

# Postprocessing
# based on https://github.com/openvinotoolkit/open_model_zoo/blob/bf03f505a650bafe8da03d2747a8b55c5cb2ef16/demos/common/python/openvino/model_zoo/model_api/models/bert.py#L163
def postprocess(output_start, output_end, question_tokens, context_tokens_start_end, padding, start_idx):

    def get_score(logits):
        out = np.exp(logits)
        return out / out.sum(axis=-1)

    # get start-end scores for context
    score_start = get_score(output_start)
    score_end = get_score(output_end)

    # index of first context token in tensor
    context_start_idx = len(question_tokens) + 2
    # index of last+1 context token in tensor
    context_end_idx = input_size - padding - 1

    # find product of all start-end combinations to find the best one
    max_score, max_start, max_end = find_best_answer_window(start_score=score_start,
                                                            end_score=score_end,
                                                            context_start_idx=context_start_idx,
                                                            context_end_idx=context_end_idx)

    # convert to context text start-end index
    max_start = context_tokens_start_end[max_start + start_idx][0]
    max_end = context_tokens_start_end[max_end + start_idx][1]

    return max_score, max_start, max_end


# based on https://github.com/openvinotoolkit/open_model_zoo/blob/bf03f505a650bafe8da03d2747a8b55c5cb2ef16/demos/common/python/openvino/model_zoo/model_api/models/bert.py#L188
def find_best_answer_window(start_score, end_score, context_start_idx, context_end_idx):
    context_len = context_end_idx - context_start_idx
    score_mat = np.matmul(
        start_score[context_start_idx:context_end_idx].reshape((context_len, 1)),
        end_score[context_start_idx:context_end_idx].reshape((1, context_len)),
    )
    # reset candidates with end before start
    score_mat = np.triu(score_mat)
    # reset long candidates (>16 words)
    score_mat = np.tril(score_mat, 16)
    # find the best start-end pair
    max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
    max_score = score_mat[max_s, max_e]

    return max_score, max_s, max_e

def get_best_answer(question, context):
    # convert context string to tokens
    context_tokens, context_tokens_start_end = tokens.text_to_tokens(text=context.lower(),
                                                                     vocab=vocab)
    # convert question string to tokens
    question_tokens, _ = tokens.text_to_tokens(text=question.lower(), vocab=vocab)

    results = []
    # iterate through different parts of context
    for network_input, padding, start_idx in prepare_input(question_tokens=question_tokens,
                                                           context_tokens=context_tokens):
        # get output layers
        output_start_key = compiled_model.output("output_s")
        output_end_key = compiled_model.output("output_e")

        # openvino inference
        result = compiled_model(network_input)
        # postprocess the result getting the score and context range for the answer
        score_start_end = postprocess(output_start=result[output_start_key][0],
                                      output_end=result[output_end_key][0],
                                      question_tokens=question_tokens,
                                      context_tokens_start_end=context_tokens_start_end,
                                      padding=padding,
                                      start_idx=start_idx)
        results.append(score_start_end)

    # find the highest score
    answer = max(results, key=operator.itemgetter(0))
    # return the part of the context, which is already an answer
    return context[answer[1]:answer[2]], answer[0]

# Main Processing Function
def run_question_answering(sources):
    print(f"Context: {sources}", flush=True)
    context = load_context(sources)

    if len(context) == 0:
        print("Error: Empty context or outside paragraphs")
        return

    while True:
        question = input()
        # if no question - break
        if question == "":
            break

        # measure processing time
        start_time = time.perf_counter()
        answer, score = get_best_answer(question=question, context=context)
        end_time = time.perf_counter()

        print(f"Question: {question}")
        print(f"Answer: {answer}")
        print(f"Score: {score:.2f}")
        print(f"Time: {end_time - start_time:.2f}s")

# Run on local paragraphs
sources = ["Computational complexity theory is a branch of the theory of computation in theoretical computer "
           "science that focuses on classifying computational problems according to their inherent difficulty, "
           "and relating those classes to each other. A computational problem is understood to be a task that "
           "is in principle amenable to being solved by a computer, which is equivalent to stating that the "
           "problem may be solved by mechanical application of mathematical steps, such as an algorithm."]

run_question_answering(sources)

# Run on websites
sources = ["https://en.wikipedia.org/wiki/OpenVINO"]

run_question_answering(sources)
# Imports
import shutil
import sys
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Markdown, display
from PIL import Image
from openvino.runtime import Core
from yaspin import yaspin

sys.path.append("../utils")
from notebook_utils import load_image

# Settings
ie = Core()

model_dir = Path("model")
precision = "FP16"
detection_model = "horizontal-text-detection-0001"
recognition_model = "text-recognition-resnet-fc"
base_model_dir = Path("~/open_model_zoo_models").expanduser()
omz_cache_dir = Path("~/open_model_zoo_cache").expanduser()

model_dir.mkdir(exist_ok=True)

# Download Models
download_command = f"omz_downloader --name {detection_model},{recognition_model} --output_dir {base_model_dir} --cache_dir {omz_cache_dir} --precision {precision}"
display(Markdown(f"Download command: `{download_command}`"))
with yaspin(text=f"Downloading {detection_model}, {recognition_model}") as sp:
    download_result = !$download_command
    print(download_result)
    sp.text = f"Finished downloading {detection_model}, {recognition_model}"
    sp.ok("✔")

# Convert Models
convert_command = f"omz_converter --name {recognition_model} --precisions {precision} --download_dir {base_model_dir} --output_dir {base_model_dir}"
display(Markdown(f"Convert command: `{convert_command}`"))
display(Markdown(f"Converting {recognition_model}..."))
! $convert_command

# Copy Models
models_info_output = %sx omz_info_dumper --name $detection_model,$recognition_model
print(f'sx omz_info_dumper --name {detection_model},{recognition_model}')
detection_model_info, recognition_model_info = [
    {
        "name": "horizontal-text-detection-0001",
        "composite_model_name": None,
        "description": "Horizontal text detector based on FCOS with light MobileNetV2 backbone",
        "framework": "dldt",
        "license_url": "https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE",
        "precisions": [
            "FP16",
            "FP16-INT8",
            "FP32"
        ],
        "quantization_output_precisions": [],
        "subdirectory": "intel/horizontal-text-detection-0001",
        "task_type": "detection"
    },
    {
        "name": "text-recognition-resnet-fc",
        "composite_model_name": None,
        "description": "\"text-recognition-resnet-fc\" is a simple and preformant scene text recognition model based on ResNet with Fully Connected text recognition head. Source implementation on a PyTorch* framework could be found here <https://github.com/Media-Smart/vedastr>. Model is able to recognize alphanumeric text.",
        "framework": "pytorch",
        "license_url": "https://raw.githubusercontent.com/Media-Smart/vedastr/0fd2a0bd7819ae4daa2a161501e9f1c2ac67e96a/LICENSE",
        "precisions": [
            "FP16",
            "FP32"
        ],
        "quantization_output_precisions": [],
        "subdirectory": "public/text-recognition-resnet-fc",
        "task_type": "optical_character_recognition"
    }
]

for model_info in (detection_model_info, recognition_model_info):
    omz_dir = Path(model_info["subdirectory"])
    omz_model_dir = base_model_dir / omz_dir / precision
    print(omz_model_dir) 
    for model_file in omz_model_dir.iterdir():
        try:
            shutil.copyfile(model_file, model_dir / model_file.name)
        except FileExistsError:
            pass

detection_model_path = (model_dir / detection_model).with_suffix(".xml")
recognition_model_path = (model_dir / recognition_model).with_suffix(".xml")

# Load Detection Model
detection_model = ie.read_model(
    model=detection_model_path, weights=detection_model_path.with_suffix(".bin")
)
detection_compiled_model = ie.compile_model(model=detection_model, device_name="CPU")

detection_input_layer = detection_compiled_model.input(0)

# Load an Image
# image_file can point to a URL or local image
image_file = "https://github.com/openvinotoolkit/openvino_notebooks/raw/main/notebooks/004-hello-detection/data/intel_rnb.jpg"

image = load_image(image_file)

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = detection_input_layer.shape

# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))

# Reshape to network input shape
input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0)

plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));

# Do Inference
output_key = detection_compiled_model.output("boxes")
boxes = detection_compiled_model([input_image])[output_key]

# Remove zero only boxes
boxes = boxes[~np.all(boxes == 0, axis=1)]

# Get Detection Results
def multiply_by_ratio(ratio_x, ratio_y, box):
    return [
        max(shape * ratio_y, 10) if idx % 2 else shape * ratio_x
        for idx, shape in enumerate(box[:-1])
    ]


def run_preprocesing_on_crop(crop, net_shape):
    temp_img = cv2.resize(crop, net_shape)
    temp_img = temp_img.reshape((1,) * 2 + temp_img.shape)
    return temp_img


def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True):
    # Define colors for boxes and descriptions
    colors = {"red": (255, 0, 0), "green": (0, 255, 0), "white": (255, 255, 255)}

    # Fetch image shapes to calculate ratio
    (real_y, real_x), (resized_y, resized_x) = image.shape[:2], resized_image.shape[:2]
    ratio_x, ratio_y = real_x / resized_x, real_y / resized_y

    # Convert base image from bgr to rgb format
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    # Iterate through non-zero boxes
    for box, annotation in boxes:
        # Pick confidence factor from last place in array
        conf = box[-1]
        if conf > threshold:
            # Convert float to int and multiply position of each box by x and y ratio
            (x_min, y_min, x_max, y_max) = map(int, multiply_by_ratio(ratio_x, ratio_y, box))

            # Draw box based on position, parameters in rectangle function are: image, start_point, end_point, color, thickness
            cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3)

            # Add text to image based on position and confidence, parameters in putText function are: image, text, bottomleft_corner_textfield, font, font_scale, color, thickness, line_type
            if conf_labels:
                # Create background box based on annotation length
                (text_w, text_h), _ = cv2.getTextSize(
                    f"{annotation}", cv2.FONT_HERSHEY_TRIPLEX, 0.8, 1
                )
                image_copy = rgb_image.copy()
                cv2.rectangle(
                    image_copy,
                    (x_min, y_min - text_h - 10),
                    (x_min + text_w, y_min - 10),
                    colors["white"],
                    -1,
                )
                # Add weighted image copy with white boxes under text
                cv2.addWeighted(image_copy, 0.4, rgb_image, 0.6, 0, rgb_image)
                cv2.putText(
                    rgb_image,
                    f"{annotation}",
                    (x_min, y_min - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.8,
                    colors["red"],
                    1,
                    cv2.LINE_AA,
                )

    return rgb_image

# Load Text Recognition Model
recognition_model = ie.read_model(
    model=recognition_model_path, weights=recognition_model_path.with_suffix(".bin")
)

recognition_compiled_model = ie.compile_model(model=recognition_model, device_name="CPU")

recognition_output_layer = recognition_compiled_model.output(0)
recognition_input_layer = recognition_compiled_model.input(0)

# Get height and width of input layer
_, _, H, W = recognition_input_layer.shape

# Do Inference
# Calculate scale for image resizing
(real_y, real_x), (resized_y, resized_x) = image.shape[:2], resized_image.shape[:2]
ratio_x, ratio_y = real_x / resized_x, real_y / resized_y

# Convert image to grayscale for text recognition model
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Get dictionary to encode output, based on model documentation
letters = "~0123456789abcdefghijklmnopqrstuvwxyz"

# Prepare empty list for annotations
annotations = list()
cropped_images = list()
# fig, ax = plt.subplots(len(boxes), 1, figsize=(5,15), sharex=True, sharey=True)
# For each crop, based on boxes given by detection model we want to get annotations
for i, crop in enumerate(boxes):
    # Get coordinates on corners of crop
    (x_min, y_min, x_max, y_max) = map(int, multiply_by_ratio(ratio_x, ratio_y, crop))
    image_crop = run_preprocesing_on_crop(grayscale_image[y_min:y_max, x_min:x_max], (W, H))

    # Run inference with recognition model
    result = recognition_compiled_model([image_crop])[recognition_output_layer]

    # Squeeze output to remove unnececery dimension
    recognition_results_test = np.squeeze(result)

    # Read annotation based on probabilities from output layer
    annotation = list()
    for letter in recognition_results_test:
        parsed_letter = letters[letter.argmax()]

        # Returning 0 index from argmax signalises end of string
        if parsed_letter == letters[0]:
            break
        annotation.append(parsed_letter)
    annotations.append("".join(annotation))
    cropped_image = Image.fromarray(image[y_min:y_max, x_min:x_max])
    cropped_images.append(cropped_image)

boxes_with_annotations = list(zip(boxes, annotations))

# Show Detected Text Boxes and OCR Results for the Image
plt.figure(figsize=(12, 12))
plt.imshow(convert_result_to_image(image, resized_image, boxes_with_annotations, conf_labels=True));

# Show the OCR Result per Bounding Box
for cropped_image, annotation in zip(cropped_images, annotations):
    display(cropped_image, Markdown("".join(annotation)))

# Print Annotations in Plain Text Format
[
    annotation
    for _, annotation in sorted(zip(boxes, annotations), key=lambda x: x[0][0] ** 2 + x[0][1] ** 2)
]
# Imports
from collections import namedtuple
from itertools import groupby
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from openvino.runtime import Core

# Settings
# Directories where data will be placed
model_folder = "model"
data_folder = "data"
charlist_folder = f"{data_folder}/charlists"

# Precision used by model
precision = "FP16"

Language = namedtuple(
    typename="Language", field_names=["model_name", "charlist_name", "demo_image_name"]
)
chinese_files = Language(
    model_name="handwritten-simplified-chinese-recognition-0001",
    charlist_name="chinese_charlist.txt",
    demo_image_name="handwritten_chinese_test.jpg",
)
japanese_files = Language(
    model_name="handwritten-japanese-recognition-0001",
    charlist_name="japanese_charlist.txt",
    demo_image_name="handwritten_japanese_test.png",
)

# Select Language
# Select language by using either language='chinese' or language='japanese'
language = "chinese"

languages = {"chinese": chinese_files, "japanese": japanese_files}

selected_language = languages.get(language)

# Download Model
path_to_model_weights = Path(f'{model_folder}/intel/{selected_language.model_name}/{precision}/{selected_language.model_name}.bin')
if not path_to_model_weights.is_file():
    download_command = f'omz_downloader --name {selected_language.model_name} --output_dir {model_folder} --precision {precision}'
    print(download_command)
    ! $download_command

# Load Network and Execute
ie = Core()
path_to_model = path_to_model_weights.with_suffix(".xml")
model = ie.read_model(model=path_to_model)

# Select Device Name
# To check available device names run the line below
# print(ie.available_devices)

compiled_model = ie.compile_model(model=model, device_name="CPU")

# Fetch Information About Input and Output Layers
recognition_output_layer = compiled_model.output(0)
recognition_input_layer = compiled_model.input(0)

# Load an Image
# Read file name of demo file based on the selected model

file_name = selected_language.demo_image_name

# Text detection models expects an image in grayscale format
# IMPORTANT!!! This model allows to read only one line at time

# Read image
image = cv2.imread(filename=f"{data_folder}/{file_name}", flags=cv2.IMREAD_GRAYSCALE)

# Fetch shape
image_height, _ = image.shape

# B,C,H,W = batch size, number of channels, height, width
_, _, H, W = recognition_input_layer.shape

# Calculate scale ratio between input shape height and image height to resize image
scale_ratio = H / image_height

# Resize image to expected input sizes
resized_image = cv2.resize(
    image, None, fx=scale_ratio, fy=scale_ratio, interpolation=cv2.INTER_AREA
)

# Pad image to match input size, without changing aspect ratio
resized_image = np.pad(
    resized_image, ((0, 0), (0, W - resized_image.shape[1])), mode="edge"
)

# Reshape to network the input shape
input_image = resized_image[None, None, :, :]

# Visualise Input Image
plt.figure(figsize=(20, 1))
plt.axis("off")
plt.imshow(resized_image, cmap="gray", vmin=0, vmax=255);

# Prepare Charlist
# Get dictionary to encode output, based on model documentation
used_charlist = selected_language.charlist_name

# With both models, there should be blank symbol added at index 0 of each charlist
blank_char = "~"

with open(f"{charlist_folder}/{used_charlist}", "r", encoding="utf-8") as charlist:
    letters = blank_char + "".join(line.strip() for line in charlist)

# Run Inference
# Run inference on the model
predictions = compiled_model([input_image])[recognition_output_layer]

# Process Output Data
# Remove batch dimension
predictions = np.squeeze(predictions)

# Run argmax to pick the symbols with the highest probability
predictions_indexes = np.argmax(predictions, axis=1)

# Use groupby to remove concurrent letters, as required by CTC greedy decoding
output_text_indexes = list(groupby(predictions_indexes))

# Remove grouper objects
output_text_indexes, _ = np.transpose(output_text_indexes, (1, 0))

# Remove blank symbols
output_text_indexes = output_text_indexes[output_text_indexes != 0]

# Assign letters to indexes from output array
output_text = [letters[letter_index] for letter_index in output_text_indexes]

# Print Output
plt.figure(figsize=(20, 1))
plt.axis("off")
plt.imshow(resized_image, cmap="gray", vmin=0, vmax=255)

print("".join(output_text))
# Imports
import sys
import os
import cv2
import numpy as np
import paddle
import math
import time
import collections
from PIL import Image
from pathlib import Path
import tarfile
import urllib.request

from openvino.runtime import Core
from IPython import display
import copy

sys.path.append("../utils")
import notebook_utils as utils
import pre_post_processing as processing

# Models for PaddleOCR
# Define the function to download text detection and recognition models from PaddleOCR resources

def run_model_download(model_url, model_file_path):
    """
    Download pre-trained models from PaddleOCR resources

    Parameters:
        model_url: url link to pre-trained models
        model_file_path: file path to store the downloaded model
    """
    model_name = model_url.split("/")[-1]
    
    if model_file_path.is_file(): 
        print("Model already exists")
    else:
        # Download the model from the server, and untar it.
        print("Downloading the pre-trained model... May take a while...")

        # create a directory
        os.makedirs("model", exist_ok=True)
        urllib.request.urlretrieve(model_url, f"model/{model_name} ")
        print("Model Downloaded")

        file = tarfile.open(f"model/{model_name} ")
        res = file.extractall("model")
        file.close()
        if not res:
            print(f"Model Extracted to {model_file_path}.")
        else:
            print("Error Extracting the model. Please check the network.")

# Download the Model for Text Detection
# Directory where model will be downloaded

det_model_url = "https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar"
det_model_file_path = Path("model/ch_ppocr_mobile_v2.0_det_infer/inference.pdmodel")

run_model_download(det_model_url, det_model_file_path)

# Load the Model for Text Detection
# initialize inference engine for text detection
core = Core()
det_model = core.read_model(model=det_model_file_path)
det_compiled_model = core.compile_model(model=det_model, device_name="CPU")

# get input and output nodes for text detection
det_input_layer = det_compiled_model.input(0)
det_output_layer = det_compiled_model.output(0)

# Download the Model for Text Recognition
rec_model_url = "https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar"
rec_model_file_path = Path("model/ch_ppocr_mobile_v2.0_rec_infer/inference.pdmodel")

run_model_download(rec_model_url, rec_model_file_path)

# Load the Model for Text Recognition with Dynamic Shape
# read the model and corresponding weights from file
rec_model = core.read_model(model=rec_model_file_path)

# assign dynamic shapes to every input layer on the last dimension
for input_layer in rec_model.inputs:
    input_shape = input_layer.partial_shape
    input_shape[3] = -1
    rec_model.reshape({input_layer: input_shape})

rec_compiled_model = core.compile_model(model=rec_model, device_name="CPU")

# get input and output nodes
rec_input_layer = rec_compiled_model.input(0)
rec_output_layer = rec_compiled_model.output(0)

# Preprocessing image functions for text detection and recognition
# Preprocess for text detection
def image_preprocess(input_image, size):
    """
    Preprocess input image for text detection

    Parameters:
        input_image: input image 
        size: value for the image to be resized for text detection model
    """
    img = cv2.resize(input_image, (size, size))
    img = np.transpose(img, [2, 0, 1]) / 255
    img = np.expand_dims(img, 0)
    # NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    img -= img_mean
    img /= img_std
    return img.astype(np.float32)

# Preprocess for text recognition
def resize_norm_img(img, max_wh_ratio):
    """
    Resize input image for text recognition

    Parameters:
        img: bounding box image from text detection 
        max_wh_ratio: value for the resizing for text recognition model
    """
    rec_image_shape = [3, 32, 320]
    imgC, imgH, imgW = rec_image_shape
    assert imgC == img.shape[2]
    character_type = "ch"
    if character_type == "ch":
        imgW = int((32 * max_wh_ratio))
    h, w = img.shape[:2]
    ratio = w / float(h)
    if math.ceil(imgH * ratio) > imgW:
        resized_w = imgW
    else:
        resized_w = int(math.ceil(imgH * ratio))
    resized_image = cv2.resize(img, (resized_w, imgH))
    resized_image = resized_image.astype('float32')
    resized_image = resized_image.transpose((2, 0, 1)) / 255
    resized_image -= 0.5
    resized_image /= 0.5
    padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
    padding_im[:, :, 0:resized_w] = resized_image
    return padding_im


def prep_for_rec(dt_boxes, frame):
    """
    Preprocessing of the detected bounding boxes for text recognition

    Parameters:
        dt_boxes: detected bounding boxes from text detection 
        frame: original input frame 
    """
    ori_im = frame.copy()
    img_crop_list = [] 
    for bno in range(len(dt_boxes)):
        tmp_box = copy.deepcopy(dt_boxes[bno])
        img_crop = processing.get_rotate_crop_image(ori_im, tmp_box)
        img_crop_list.append(img_crop)
        
    img_num = len(img_crop_list)
    # Calculate the aspect ratio of all text bars
    width_list = []
    for img in img_crop_list:
        width_list.append(img.shape[1] / float(img.shape[0]))
    
    # Sorting can speed up the recognition process
    indices = np.argsort(np.array(width_list))
    return img_crop_list, img_num, indices


def batch_text_box(img_crop_list, img_num, indices, beg_img_no, batch_num):
    """
    Batch for text recognition

    Parameters:
        img_crop_list: processed detected bounding box images 
        img_num: number of bounding boxes from text detection
        indices: sorting for bounding boxes to speed up text recognition
        beg_img_no: the beginning number of bounding boxes for each batch of text recognition inference
        batch_num: number of images for each batch
    """
    norm_img_batch = []
    max_wh_ratio = 0
    end_img_no = min(img_num, beg_img_no + batch_num)
    for ino in range(beg_img_no, end_img_no):
        h, w = img_crop_list[indices[ino]].shape[0:2]
        wh_ratio = w * 1.0 / h
        max_wh_ratio = max(max_wh_ratio, wh_ratio)
    for ino in range(beg_img_no, end_img_no):
        norm_img = resize_norm_img(img_crop_list[indices[ino]], max_wh_ratio)
        norm_img = norm_img[np.newaxis, :]
        norm_img_batch.append(norm_img)

    norm_img_batch = np.concatenate(norm_img_batch)
    norm_img_batch = norm_img_batch.copy()
    return norm_img_batch

# Postprocessing image for text detection
def post_processing_detection(frame, det_results):
    """
    Postprocess the results from text detection into bounding boxes

    Parameters:
        frame: input image 
        det_results: inference results from text detection model
    """   
    ori_im = frame.copy()
    data = {'image': frame}
    data_resize = processing.DetResizeForTest(data)
    data_list = []
    keep_keys = ['image', 'shape']
    for key in keep_keys:
        data_list.append(data_resize[key])
    img, shape_list = data_list

    shape_list = np.expand_dims(shape_list, axis=0) 
    pred = det_results[0]    
    if isinstance(pred, paddle.Tensor):
        pred = pred.numpy()
    segmentation = pred > 0.3

    boxes_batch = []
    for batch_index in range(pred.shape[0]):
        src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
        mask = segmentation[batch_index]
        boxes, scores = processing.boxes_from_bitmap(pred[batch_index], mask, src_w, src_h)
        boxes_batch.append({'points': boxes})
    post_result = boxes_batch
    dt_boxes = post_result[0]['points']
    dt_boxes = processing.filter_tag_det_res(dt_boxes, ori_im.shape)    
    return dt_boxes

# Main processing function for PaddleOCR
def run_paddle_ocr(source=0, flip=False, use_popup=False, skip_first_frames=0):
    """
    Main function to run the paddleOCR inference:
    1. Create a video player to play with target fps (utils.VideoPlayer).
    2. Prepare a set of frames for text detection and recognition.
    3. Run AI inference for both text detection and recognition.
    4. Visualize the results.

    Parameters:
        source: the webcam number to feed the video stream with primary webcam set to "0", or the video path.  
        flip: to be used by VideoPlayer function for flipping capture image
        use_popup: False for showing encoded frames over this notebook, True for creating a popup window.
        skip_first_frames: Number of frames to skip at the beginning of the video. 
    """
    # create video player to play with target fps
    player = None
    try:
        player = utils.VideoPlayer(source=source, flip=flip, fps=30, skip_first_frames=skip_first_frames)
        # Start video capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(winname=title, flags=cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)

        processing_times = collections.deque()
        while True:
            # grab the frame
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
            # if frame larger than full HD, reduce size to improve the performance
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(src=frame, dsize=None, fx=scale, fy=scale,
                                   interpolation=cv2.INTER_AREA)
            # preprocess image for text detection
            test_image = image_preprocess(frame, 640)
                
            # measure processing time for text detection
            start_time = time.time()
            # perform the inference step
            det_results = det_compiled_model([test_image])[det_output_layer]
            stop_time = time.time()

            # Postprocessing for Paddle Detection
            dt_boxes = post_processing_detection(frame, det_results)

            processing_times.append(stop_time - start_time)
            # use processing times from last 200 frames
            if len(processing_times) > 200:
                processing_times.popleft()
            processing_time_det = np.mean(processing_times) * 1000

            # Preprocess detection results for recognition
            dt_boxes = processing.sorted_boxes(dt_boxes)  
            batch_num = 6
            img_crop_list, img_num, indices = prep_for_rec(dt_boxes, frame)
            
            # For storing recognition results, include two parts:
            # txts are the recognized text results, scores are the recognition confidence level 
            rec_res = [['', 0.0]] * img_num
            txts = [] 
            scores = []

            for beg_img_no in range(0, img_num, batch_num):

                # Recognition starts from here
                norm_img_batch = batch_text_box(
                    img_crop_list, img_num, indices, beg_img_no, batch_num)

                # Run inference for text recognition 
                rec_results = rec_compiled_model([norm_img_batch])[rec_output_layer]

                # Postprocessing recognition results
                postprocess_op = processing.build_post_process(processing.postprocess_params)
                rec_result = postprocess_op(rec_results)
                for rno in range(len(rec_result)):
                    rec_res[indices[beg_img_no + rno]] = rec_result[rno]   
                if rec_res:
                    txts = [rec_res[i][0] for i in range(len(rec_res))] 
                    scores = [rec_res[i][1] for i in range(len(rec_res))]
                                   
            image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            boxes = dt_boxes
            # draw text recognition results beside the image
            draw_img = processing.draw_ocr_box_txt(
                image,
                boxes,
                txts,
                scores,
                drop_score=0.5)

            # Visualize PaddleOCR results
            f_height, f_width = draw_img.shape[:2]
            fps = 1000 / processing_time_det
            cv2.putText(img=draw_img, text=f"Inference time: {processing_time_det:.1f}ms ({fps:.1f} FPS)", 
                        org=(20, 40),fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=f_width / 1000,
                        color=(0, 0, 255), thickness=1, lineType=cv2.LINE_AA)
            
            # use this workaround if there is flickering
            if use_popup: 
                draw_img = cv2.cvtColor(draw_img, cv2.COLOR_RGB2BGR)
                cv2.imshow(winname=title, mat=draw_img)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # encode numpy array to jpg
                draw_img = cv2.cvtColor(draw_img, cv2.COLOR_RGB2BGR)
                _, encoded_img = cv2.imencode(ext=".jpg", img=draw_img,
                                              params=[cv2.IMWRITE_JPEG_QUALITY, 100])
                # create IPython image
                i = display.Image(data=encoded_img)
                # display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)
            
    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Live PaddleOCR with OpenVINO
run_paddle_ocr(source=0, flip=False, use_popup=False)

# Test OCR results on video file

video_file = "https://raw.githubusercontent.com/yoyowz/classification/master/images/test.mp4"
run_paddle_ocr(source=video_file, flip=False, use_popup=False, skip_first_frames=0)
# Imports and Settings
from pathlib import Path
import logging

import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.python.keras import layers
from tensorflow.python.keras import models

from nncf import NNCFConfig
from nncf.tensorflow.helpers.model_creation import create_compressed_model
from nncf.tensorflow.initialization import register_default_init_args
from nncf.common.utils.logger import set_log_level

set_log_level(logging.ERROR)

MODEL_DIR = Path("model")
OUTPUT_DIR = Path("output")
MODEL_DIR.mkdir(exist_ok=True)
OUTPUT_DIR.mkdir(exist_ok=True)

BASE_MODEL_NAME = "ResNet-18"

fp32_h5_path = Path(MODEL_DIR / (BASE_MODEL_NAME + "_fp32")).with_suffix(".h5")
fp32_sm_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_fp32"))
fp32_ir_path = Path(OUTPUT_DIR / "saved_model").with_suffix(".xml")
int8_pb_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_int8")).with_suffix(".pb")
int8_pb_name = Path(BASE_MODEL_NAME + "_int8").with_suffix(".pb")
int8_ir_path = int8_pb_path.with_suffix(".xml")

BATCH_SIZE = 128
IMG_SIZE = (64, 64)  # Default Imagenet image size
NUM_CLASSES = 10  # For Imagenette dataset

LR = 1e-5

MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)  # From Imagenet dataset
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)  # From Imagenet dataset

fp32_pth_url = "https://storage.openvinotoolkit.org/repositories/nncf/openvino_notebook_ckpts/305_resnet18_imagenette_fp32_v1.h5"
_ = tf.keras.utils.get_file(fp32_h5_path.resolve(), fp32_pth_url)
print(f'Absolute path where the model weights are saved:\n {fp32_h5_path.resolve()}')

# Dataset Preprocessing
datasets, datasets_info = tfds.load('imagenette/160px', shuffle_files=True, as_supervised=True, with_info=True,
                                    read_config=tfds.ReadConfig(shuffle_seed=0))
train_dataset, validation_dataset = datasets['train'], datasets['validation']
fig = tfds.show_examples(train_dataset, datasets_info)

def preprocessing(image, label):
    image = tf.image.resize(image, IMG_SIZE)
    image = image - MEAN_RGB
    image = image / STDDEV_RGB
    label = tf.one_hot(label, NUM_CLASSES)
    return image, label


train_dataset = (train_dataset.map(preprocessing, num_parallel_calls=tf.data.experimental.AUTOTUNE)
                              .batch(BATCH_SIZE)
                              .prefetch(tf.data.experimental.AUTOTUNE))

validation_dataset = (validation_dataset.map(preprocessing, num_parallel_calls=tf.data.experimental.AUTOTUNE)
                                        .batch(BATCH_SIZE)
                                        .prefetch(tf.data.experimental.AUTOTUNE))

# Define a Floating-Point Model
def residual_conv_block(filters, stage, block, strides=(1, 1), cut='pre'):
    def layer(input_tensor):
        x = layers.BatchNormalization(epsilon=2e-5)(input_tensor)
        x = layers.Activation('relu')(x)

        # defining shortcut connection
        if cut == 'pre':
            shortcut = input_tensor
        elif cut == 'post':
            shortcut = layers.Conv2D(filters, (1, 1), strides=strides, kernel_initializer='he_uniform', 
                                     use_bias=False)(x)

        # continue with convolution layers
        x = layers.ZeroPadding2D(padding=(1, 1))(x)
        x = layers.Conv2D(filters, (3, 3), strides=strides, kernel_initializer='he_uniform', use_bias=False)(x)

        x = layers.BatchNormalization(epsilon=2e-5)(x)
        x = layers.Activation('relu')(x)
        x = layers.ZeroPadding2D(padding=(1, 1))(x)
        x = layers.Conv2D(filters, (3, 3), kernel_initializer='he_uniform', use_bias=False)(x)

        # add residual connection
        x = layers.Add()([x, shortcut])
        return x

    return layer


def ResNet18(input_shape=None):
    """Instantiates the ResNet18 architecture."""
    img_input = layers.Input(shape=input_shape, name='data')

    # ResNet18 bottom
    x = layers.BatchNormalization(epsilon=2e-5, scale=False)(img_input)
    x = layers.ZeroPadding2D(padding=(3, 3))(x)
    x = layers.Conv2D(64, (7, 7), strides=(2, 2), kernel_initializer='he_uniform', use_bias=False)(x)
    x = layers.BatchNormalization(epsilon=2e-5)(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1))(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)

    # ResNet18 body
    repetitions = (2, 2, 2, 2)
    for stage, rep in enumerate(repetitions):
        for block in range(rep):
            filters = 64 * (2 ** stage)
            if block == 0 and stage == 0:
                x = residual_conv_block(filters, stage, block, strides=(1, 1), cut='post')(x)
            elif block == 0:
                x = residual_conv_block(filters, stage, block, strides=(2, 2), cut='post')(x)
            else:
                x = residual_conv_block(filters, stage, block, strides=(1, 1), cut='pre')(x)
    x = layers.BatchNormalization(epsilon=2e-5)(x)
    x = layers.Activation('relu')(x)

    # ResNet18 top
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(NUM_CLASSES)(x)
    x = layers.Activation('softmax')(x)

    # Create model
    model = models.Model(img_input, x)

    return model

IMG_SHAPE = IMG_SIZE + (3,)
model = ResNet18(input_shape=IMG_SHAPE)

# Pre-train Floating-Point Model
# Load the floating-point weights
model.load_weights(fp32_h5_path)

# Compile the floating-point model
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
              metrics=[tf.keras.metrics.CategoricalAccuracy(name='acc@1')])

# Validate the floating-point model
test_loss, acc_fp32 = model.evaluate(validation_dataset,
                                     callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))
print(f"\nAccuracy of FP32 model: {acc_fp32:.3f}")

model.save(fp32_sm_path)
print(f'Absolute path where the model is saved:\n {fp32_sm_path.resolve()}')

# Create and Initialize Quantization
nncf_config_dict = {
    "input_info": {"sample_size": [1, 3] + list(IMG_SIZE)},
    "log_dir": str(OUTPUT_DIR),  # log directory for NNCF-specific logging outputs
    "compression": {
        "algorithm": "quantization",  # specify the algorithm here
    },
}
nncf_config = NNCFConfig.from_dict(nncf_config_dict)

nncf_config = register_default_init_args(nncf_config=nncf_config,
                                         data_loader=train_dataset,
                                         batch_size=BATCH_SIZE)

compression_ctrl, model = create_compressed_model(model, nncf_config)

# Compile the int8 model
model.compile(optimizer=tf.keras.optimizers.Adam(lr=LR),
              loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
              metrics=[tf.keras.metrics.CategoricalAccuracy(name='acc@1')])

# Validate the int8 model
test_loss, test_acc = model.evaluate(validation_dataset,
                                     callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))
print(f"\nAccuracy of INT8 model after initialization: {test_acc:.3f}")

# Fine-tune the Compressed Model
# Train the int8 model
model.fit(train_dataset,
          epochs=2)

# Validate the int8 model
test_loss, acc_int8 = model.evaluate(validation_dataset,
                                     callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))
print(f"\nAccuracy of INT8 model after fine-tuning: {acc_int8:.3f}")
print(f"\nAccuracy drop of tuned INT8 model over pre-trained FP32 model: {acc_fp32 - acc_int8:.3f}")

compression_ctrl.export_model(int8_pb_path, 'frozen_graph')
print(f'Absolute path where the int8 model is saved:\n {int8_pb_path.resolve()}')

# Export Frozen Graph Models to OpenVINO Intermediate Representation (IR)
!mo --framework=tf --input_shape=[1,64,64,3] --input=data --saved_model_dir=$fp32_sm_path --output_dir=$OUTPUT_DIR

!mo --framework=tf --input_shape=[1,64,64,3] --input=Placeholder --input_model=$int8_pb_path --output_dir=$OUTPUT_DIR

# Benchmark Model Performance by Computing Inference Time
def parse_benchmark_output(benchmark_output):
    parsed_output = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line == "")]
    print(*parsed_output, sep='\n')


print('Benchmark FP32 model (IR)')
benchmark_output = ! benchmark_app -m $fp32_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

print('\nBenchmark INT8 model (IR)')
benchmark_output = ! benchmark_app -m $int8_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

# Show CPU Information for reference
from openvino.runtime import Core

ie = Core()
ie.get_property(device_name='CPU', name="FULL_DEVICE_NAME")
# Imports and Settings
# On Windows, add the directory that contains cl.exe to the PATH to enable PyTorch to find the
# required C++ tools. This code assumes that Visual Studio 2019 is installed in the default
# directory. If you have a different C++ compiler, please add the correct path to os.environ["PATH"]
# directly. Note that the C++ Redistributable is not enough to run this notebook.

# Adding the path to os.environ["LIB"] is not always required - it depends on the system's configuration

import sys

if sys.platform == "win32":
    import distutils.command.build_ext
    import os
    from pathlib import Path

    VS_INSTALL_DIR = r"C:/Program Files (x86)/Microsoft Visual Studio"
    cl_paths = sorted(list(Path(VS_INSTALL_DIR).glob("**/Hostx86/x64/cl.exe")))
    if len(cl_paths) == 0:
        raise ValueError(
            "Cannot find Visual Studio. This notebook requires a C++ compiler. If you installed "
            "a C++ compiler, please add the directory that contains cl.exe to `os.environ['PATH']`."
        )
    else:
        # If multiple versions of MSVC are installed, get the most recent version
        cl_path = cl_paths[-1]
        vs_dir = str(cl_path.parent)
        os.environ["PATH"] += f"{os.pathsep}{vs_dir}"
        # Code for finding the library dirs from
        # https://stackoverflow.com/questions/47423246/get-pythons-lib-path
        d = distutils.core.Distribution()
        b = distutils.command.build_ext.build_ext(d)
        b.finalize_options()
        os.environ["LIB"] = os.pathsep.join(b.library_dirs)
        print(f"Added {vs_dir} to PATH")

import sys
import time
import warnings  # to disable warnings on export to ONNX
import zipfile
from pathlib import Path
import logging

import torch
import nncf  # Important - should be imported directly after torch

import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms

from nncf.common.utils.logger import set_log_level
set_log_level(logging.ERROR)  # Disables all NNCF info and warning messages
from nncf import NNCFConfig
from nncf.torch import create_compressed_model, register_default_init_args
from openvino.runtime import Core
from torch.jit import TracerWarning

sys.path.append("../utils")
from notebook_utils import download_file

torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using {device} device")

MODEL_DIR = Path("model")
OUTPUT_DIR = Path("output")
DATA_DIR = Path("data")
BASE_MODEL_NAME = "resnet18"
image_size = 64

OUTPUT_DIR.mkdir(exist_ok=True)
MODEL_DIR.mkdir(exist_ok=True)
DATA_DIR.mkdir(exist_ok=True)

# Paths where PyTorch, ONNX and OpenVINO IR models will be stored
fp32_pth_path = Path(MODEL_DIR / (BASE_MODEL_NAME + "_fp32")).with_suffix(".pth")
fp32_onnx_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_fp32")).with_suffix(".onnx")
fp32_ir_path = fp32_onnx_path.with_suffix(".xml")
int8_onnx_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_int8")).with_suffix(".onnx")
int8_ir_path = int8_onnx_path.with_suffix(".xml")

# It's possible to train FP32 model from scratch, but it might be slow. So the pre-trained weights are downloaded by default.
pretrained_on_tiny_imagenet = True
fp32_pth_url = "https://storage.openvinotoolkit.org/repositories/nncf/openvino_notebook_ckpts/302_resnet18_fp32_v1.pth"
download_file(fp32_pth_url, directory=MODEL_DIR, filename=fp32_pth_path.name)

# Download Tiny ImageNet dataset
def download_tiny_imagenet_200(
    data_dir: Path,
    url="http://cs231n.stanford.edu/tiny-imagenet-200.zip",
    tarname="tiny-imagenet-200.zip",
):
    archive_path = data_dir / tarname
    download_file(url, directory=data_dir, filename=tarname)
    zip_ref = zipfile.ZipFile(archive_path, "r")
    zip_ref.extractall(path=data_dir)
    zip_ref.close()

def prepare_tiny_imagenet_200(dataset_dir: Path):
    # format validation set the same way as train set is formatted
    val_data_dir = dataset_dir / 'val'
    val_annotations_file = val_data_dir / 'val_annotations.txt'
    with open(val_annotations_file, 'r') as f:
        val_annotation_data = map(lambda line: line.split('\t')[:2], f.readlines())
    val_images_dir = val_data_dir / 'images'
    for image_filename, image_label in val_annotation_data:
        from_image_filepath = val_images_dir / image_filename
        to_image_dir = val_data_dir / image_label
        if not to_image_dir.exists():
            to_image_dir.mkdir()
        to_image_filepath = to_image_dir / image_filename
        from_image_filepath.rename(to_image_filepath)
    val_annotations_file.unlink()
    val_images_dir.rmdir()
    

DATASET_DIR = DATA_DIR / "tiny-imagenet-200"
if not DATASET_DIR.exists():
    download_tiny_imagenet_200(DATA_DIR)
    prepare_tiny_imagenet_200(DATASET_DIR)
    print(f"Successfully downloaded and prepared dataset at: {DATASET_DIR}")

# Pre-train Floating-Point Model
# Train Function
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter("Time", ":3.3f")
    losses = AverageMeter("Loss", ":2.3f")
    top1 = AverageMeter("Acc@1", ":2.2f")
    top5 = AverageMeter("Acc@5", ":2.2f")
    progress = ProgressMeter(
        len(train_loader), [batch_time, losses, top1, top5], prefix="Epoch:[{}]".format(epoch)
    )

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        images = images.to(device)
        target = target.to(device)

        # compute output
        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))

        # compute gradient and do opt step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        print_frequency = 50
        if i % print_frequency == 0:
            progress.display(i)

# Validate Function
def validate(val_loader, model, criterion):
    batch_time = AverageMeter("Time", ":3.3f")
    losses = AverageMeter("Loss", ":2.3f")
    top1 = AverageMeter("Acc@1", ":2.2f")
    top5 = AverageMeter("Acc@5", ":2.2f")
    progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix="Test: ")

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.to(device)
            target = target.to(device)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            print_frequency = 10
            if i % print_frequency == 0:
                progress.display(i)

        print(" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5))
    return top1.avg

# Helpers
class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self, name, fmt=":f"):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

    def __str__(self):
        fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        print("\t".join(entries))

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = "{:" + str(num_digits) + "d}"
        return "[" + fmt + "/" + fmt.format(num_batches) + "]"


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res

# Get a Pre-trained FP32 Model
num_classes = 200  # 200 is for Tiny ImageNet, default is 1000 for ImageNet
init_lr = 1e-4
batch_size = 128
epochs = 4

model = models.resnet18(pretrained=not pretrained_on_tiny_imagenet)
# update the last FC layer for Tiny ImageNet number of classes
model.fc = nn.Linear(in_features=512, out_features=num_classes, bias=True)
model.to(device)

# Data loading code
train_dir = DATASET_DIR / "train"
val_dir = DATASET_DIR / "val"
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

train_dataset = datasets.ImageFolder(
    train_dir,
    transforms.Compose(
        [
            transforms.Resize(image_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]
    ),
)
val_dataset = datasets.ImageFolder(
    val_dir,
    transforms.Compose(
        [
            transforms.Resize(image_size),
            transforms.ToTensor(),
            normalize,
        ]
    ),
)

train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, sampler=None
)

val_loader = torch.utils.data.DataLoader(
    val_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True
)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=init_lr)

if pretrained_on_tiny_imagenet:
    #
    # ** WARNING: torch.load functionality uses Python's pickling module that
    # may be used to perform arbitrary code execution during unpickling. Only load data that you
    # trust.
    #
    checkpoint = torch.load(str(fp32_pth_path), map_location="cpu")
    model.load_state_dict(checkpoint["state_dict"], strict=True)
    acc1_fp32 = checkpoint["acc1"]
else:
    best_acc1 = 0
    # Training loop
    for epoch in range(0, epochs):
        # run a single training epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion)

        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if is_best:
            checkpoint = {"state_dict": model.state_dict(), "acc1": acc1}
            torch.save(checkpoint, fp32_pth_path)
    acc1_fp32 = best_acc1
    
print(f"Accuracy of FP32 model: {acc1_fp32:.3f}")

dummy_input = torch.randn(1, 3, image_size, image_size).to(device)

torch.onnx.export(model, dummy_input, fp32_onnx_path)
print(f"FP32 ONNX model was exported to {fp32_onnx_path}.")

# Create and Initialize Quantization
nncf_config_dict = {
    "input_info": {"sample_size": [1, 3, image_size, image_size]},
    "log_dir": str(OUTPUT_DIR),  # log directory for NNCF-specific logging outputs
    "compression": {
        "algorithm": "quantization",  # specify the algorithm here
    },
}
nncf_config = NNCFConfig.from_dict(nncf_config_dict)

nncf_config = register_default_init_args(nncf_config, train_loader)
compression_ctrl, model = create_compressed_model(model, nncf_config)
acc1 = validate(val_loader, model, criterion)
print(f"Accuracy of initialized INT8 model: {acc1:.3f}")

# Fine-tune the Compressed Model
compression_lr = init_lr / 10
optimizer = torch.optim.Adam(model.parameters(), lr=compression_lr)

# train for one epoch with NNCF
train(train_loader, model, criterion, optimizer, epoch=0)

# evaluate on validation set after Quantization-Aware Training (QAT case)
acc1_int8 = validate(val_loader, model, criterion)

print(f"Accuracy of tuned INT8 model: {acc1_int8:.3f}")
print(f"Accuracy drop of tuned INT8 model over pre-trained FP32 model: {acc1_fp32 - acc1_int8:.3f}")

# Export INT8 Model to ONNX
if not int8_onnx_path.exists():
    warnings.filterwarnings("ignore", category=TracerWarning)
    warnings.filterwarnings("ignore", category=UserWarning)
    # Export INT8 model to ONNX that is supported by the OpenVINO™ toolkit
    compression_ctrl.export_model(int8_onnx_path)
    print(f"INT8 ONNX model exported to {int8_onnx_path}.")

# Convert ONNX models to OpenVINO Intermediate Representation (IR)
if not fp32_ir_path.exists():
    !mo --input_model $fp32_onnx_path --input_shape "[1,3, $image_size, $image_size]" --mean_values "[123.675, 116.28 , 103.53]" --scale_values "[58.395, 57.12 , 57.375]" --data_type FP16 --output_dir $OUTPUT_DIR

if not int8_ir_path.exists():
    !mo --input_model $int8_onnx_path --input_shape "[1,3, $image_size, $image_size]" --mean_values "[123.675, 116.28 , 103.53]" --scale_values "[58.395, 57.12 , 57.375]" --data_type FP16 --output_dir $OUTPUT_DIR

# Benchmark Model Performance by Computing Inference Time
def parse_benchmark_output(benchmark_output):
    parsed_output = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line == "")]
    print(*parsed_output, sep='\n')


print('Benchmark FP32 model (IR)')
benchmark_output = ! benchmark_app -m $fp32_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

print('Benchmark INT8 model (IR)')
benchmark_output = ! benchmark_app -m $int8_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

# Show CPU Information for reference
ie = Core()
ie.get_property(device_name="CPU", name="FULL_DEVICE_NAME")
# Preparation
from pathlib import Path

import tensorflow as tf

model_xml = Path("model/flower/flower_ir.xml")
dataset_url = (
    "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
)
data_dir = Path(tf.keras.utils.get_file("flower_photos", origin=dataset_url, untar=True))

if not model_xml.exists():
    print("Executing training notebook. This will take a while...")
    %run 301-tensorflow-training-openvino.ipynb

# Imports
import copy
import os
import sys

import cv2
import matplotlib.pyplot as plt
import numpy as np
from addict import Dict
from openvino.tools.pot.api import Metric, DataLoader
from openvino.tools.pot.graph import load_model, save_model
from openvino.tools.pot.graph.model_utils import compress_model_weights
from openvino.tools.pot.engines.ie_engine import IEEngine
from openvino.tools.pot.pipeline.initializer import create_pipeline
from openvino.runtime import Core
from PIL import Image

sys.path.append("../utils")
from notebook_utils import benchmark_model, download_file

# Settings
model_config = Dict(
    {
        "model_name": "flower",
        "model": "model/flower/flower_ir.xml",
        "weights": "model/flower/flower_ir.bin",
    }
)

engine_config = Dict({"device": "CPU", "stat_requests_number": 2, "eval_requests_number": 2})

algorithms = [
    {
        "name": "DefaultQuantization",
        "params": {
            "target_device": "CPU",
            "preset": "performance",
            "stat_subset_size": 1000,
        },
    }
]

# Create DataLoader Class
class ClassificationDataLoader(DataLoader):
    """
    DataLoader for image data that is stored in a directory per category. For example, for
    categories _rose_ and _daisy_, rose images are expected in data_source/rose, daisy images
    in data_source/daisy.
    """

    def __init__(self, data_source):
        """
        :param data_source: path to data directory
        """
        self.data_source = Path(data_source)
        self.dataset = [p for p in data_dir.glob("**/*") if p.suffix in (".png", ".jpg")]
        self.class_names = sorted([item.name for item in Path(data_dir).iterdir() if item.is_dir()])

    def __len__(self):
        """
        Returns the number of elements in the dataset
        """
        return len(self.dataset)

    def __getitem__(self, index):
        """
        Get item from self.dataset at the specified index.
        Returns (annotation, image), where annotation is a tuple (index, class_index)
        and image a preprocessed image in network shape
        """
        if index >= len(self):
            raise IndexError
        filepath = self.dataset[index]
        annotation = (index, self.class_names.index(filepath.parent.name))
        image = self._read_image(filepath)
        return annotation, image

    def _read_image(self, index):
        """
        Read image at dataset[index] to memory, resize, convert to BGR and to network shape

        :param index: dataset index to read
        :return ndarray representation of image batch
        """
        image = cv2.imread(os.path.join(self.data_source, index))[:, :, (2, 1, 0)]
        image = cv2.resize(image, (180, 180)).astype(np.float32)
        return image

# Create Accuracy Metric Class
class Accuracy(Metric):
    def __init__(self):
        super().__init__()
        self._name = "accuracy"
        self._matches = []

    @property
    def value(self):
        """Returns accuracy metric value for the last model output."""
        return {self._name: self._matches[-1]}

    @property
    def avg_value(self):
        """
        Returns accuracy metric value for all model outputs. Results per image are stored in
        self._matches, where True means a correct prediction and False a wrong prediction.
        Accuracy is computed as the number of correct predictions divided by the total
        number of predictions.
        """
        num_correct = np.count_nonzero(self._matches)
        return {self._name: num_correct / len(self._matches)}

    def update(self, output, target):
        """Updates prediction matches.

        :param output: model output
        :param target: annotations
        """
        predict = np.argmax(output[0], axis=1)
        match = predict == target
        self._matches.append(match)

    def reset(self):
        """
        Resets the Accuracy metric. This is a required method that should initialize all
        attributes to their initial value.
        """
        self._matches = []

    def get_attributes(self):
        """
        Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}.
        Required attributes: 'direction': 'higher-better' or 'higher-worse'
                             'type': metric type
        """
        return {self._name: {"direction": "higher-better", "type": "accuracy"}}

# POT Optimization
# Step 1: Load the model
model = load_model(model_config=model_config)
original_model = copy.deepcopy(model)

# Step 2: Initialize the data loader
data_loader = ClassificationDataLoader(data_source=data_dir)

# Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric
#        Compute metric results on original model
metric = Accuracy()

# Step 4: Initialize the engine for metric calculation and statistics collection
engine = IEEngine(config=engine_config, data_loader=data_loader, metric=metric)

# Step 5: Create a pipeline of compression algorithms
pipeline = create_pipeline(algo_config=algorithms, engine=engine)

# Step 6: Execute the pipeline
compressed_model = pipeline.run(model=model)

# Step 7 (Optional): Compress model weights quantized precision
#                    in order to reduce the size of final .bin file
compress_model_weights(model=compressed_model)

# Step 8: Save the compressed model and get the path to the model
compressed_model_paths = save_model(
    model=compressed_model, save_path=os.path.join(os.path.curdir, "model/optimized")
)
compressed_model_xml = Path(compressed_model_paths[0]["model"])
print(f"The quantized model is stored in {compressed_model_xml}")

# Step 9 (Optional): Evaluate the original and compressed model. Print the results
original_metric_results = pipeline.evaluate(original_model)
if original_metric_results:
    print(f"Accuracy of the original model:  {next(iter(original_metric_results.values())):.5f}")

quantized_metric_results = pipeline.evaluate(compressed_model)
if quantized_metric_results:
    print(f"Accuracy of the quantized model: {next(iter(quantized_metric_results.values())):.5f}")

# Run Inference on Quantized Model
def pre_process_image(imagePath, img_height=180):
    # Model input format
    n, c, h, w = [1, 3, img_height, img_height]
    image = Image.open(imagePath)
    image = image.resize((h, w), resample=Image.BILINEAR)

    # Convert to array and change data layout from HWC to CHW
    image = np.array(image)

    input_image = image.reshape((n, h, w, c))

    return input_image

# Load the optimized model and get the names of the input and output layer
ie = Core()
model_pot = ie.read_model(model="model/optimized/flower_ir.xml")
compiled_model_pot = ie.compile_model(model=model_pot, device_name="CPU")
input_layer = compiled_model_pot.input(0)
output_layer = compiled_model_pot.output(0)

# Get the class names: a list of directory names in alphabetical order
class_names = sorted([item.name for item in Path(data_dir).iterdir() if item.is_dir()])

# Run inference on an input image...
inp_img_url = (
    "https://upload.wikimedia.org/wikipedia/commons/4/48/A_Close_Up_Photo_of_a_Dandelion.jpg"
)
directory = "output"
inp_file_name = "A_Close_Up_Photo_of_a_Dandelion.jpg"
file_path = Path(directory)/Path(inp_file_name)
# Download the image if it does not exist yet
if not Path(inp_file_name).exists():
    download_file(inp_img_url, inp_file_name, directory=directory)

# Pre-process the image and get it ready for inference.
input_image = pre_process_image(imagePath=file_path)
print(f'input image shape: {input_image.shape}')
print(f'input layer shape: {input_layer.shape}')

res = compiled_model_pot([input_image])[output_layer]

score = tf.nn.softmax(res[0])

# Show the results
image = Image.open(file_path)
plt.imshow(image)
print(
    "This image most likely belongs to {} with a {:.2f} percent confidence.".format(
        class_names[np.argmax(score)], 100 * np.max(score)
    )
)

# Compare Inference Speed
# print the available devices on this system
ie = Core()
print("Device information:")
print(ie.get_property("CPU", "FULL_DEVICE_NAME"))
if "GPU" in ie.available_devices:
    print(ie.get_property("GPU", "FULL_DEVICE_NAME"))

# Original model - CPU
benchmark_model(model_path=model_xml, device="CPU", seconds=15, api='async')

# Quantized model - CPU
benchmark_model(model_path=compressed_model_xml, device="CPU", seconds=15, api='async')

# Original model - MULTI:CPU,GPU
if "GPU" in ie.available_devices:
    benchmark_model(model_path=model_xml, device="MULTI:CPU,GPU", seconds=15, api='async')
else:
    print("A supported integrated GPU is not available on this system.")

# Quantized model - MULTI:CPU,GPU
if "GPU" in ie.available_devices:
    benchmark_model(model_path=compressed_model_xml, device="MULTI:CPU,GPU", seconds=15, api='async')
else:
    print("A supported integrated GPU is not available on this system.")

# print the available devices on this system
print("Device information:")
print(ie.get_property("CPU", "FULL_DEVICE_NAME"))
if "GPU" in ie.available_devices:
    print(ie.get_property("GPU", "FULL_DEVICE_NAME"))

# Original IR model - CPU
benchmark_output = %sx benchmark_app -m $model_xml -t 15 -api async
# Remove logging info from benchmark_app output and show only the results
benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
print("\n".join(benchmark_result))

# Quantized IR model - CPU
benchmark_output = %sx benchmark_app -m $compressed_model_xml -t 15 -api async
# Remove logging info from benchmark_app output and show only the results
benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
print("\n".join(benchmark_result))

# Original IR model - MULTI:CPU,GPU
ie = Core()
if "GPU" in ie.available_devices:
    benchmark_output = %sx benchmark_app -m $model_xml -d MULTI:CPU,GPU -t 15 -api async
    # Remove logging info from benchmark_app output and show only the results
    benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
    print("\n".join(benchmark_result))
else:
    print("An integrated GPU is not available on this system.")

# Quantized IR model - MULTI:CPU,GPU
ie = Core()
if "GPU" in ie.available_devices:
    benchmark_output = %sx benchmark_app -m $compressed_model_xml -d MULTI:CPU,GPU -t 15 -api async
    # Remove logging info from benchmark_app output and show only the results
    benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
    print("\n".join(benchmark_result))
else:
    print("An integrated GPU is not available on this system.")
# Import TensorFlow and Other Libraries
import os
import sys
from pathlib import Path

import PIL
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image
from openvino.runtime import Core
from openvino.tools.mo import mo_tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential

sys.path.append("../utils")
from notebook_utils import download_file

# Download and Explore the Dataset
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)

image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)

roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
PIL.Image.open(str(roses[1]))

tulips = list(data_dir.glob('tulips/*'))
PIL.Image.open(str(tulips[0]))
PIL.Image.open(str(tulips[1]))

# Create a Dataset
batch_size = 32
img_height = 180
img_width = 180

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
  data_dir,
  validation_split=0.2,
  subset="training",
  seed=123,
  image_size=(img_height, img_width),
  batch_size=batch_size)

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
  data_dir,
  validation_split=0.2,
  subset="validation",
  seed=123,
  image_size=(img_height, img_width),
  batch_size=batch_size)

class_names = train_ds.class_names
print(class_names)

# Visualize the Data
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(images[i].numpy().astype("uint8"))
        plt.title(class_names[labels[i]])
        plt.axis("off")

for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break

# Configure the Dataset for Performance
# AUTOTUNE = tf.data.AUTOTUNE
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

# Standardize the Data
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)

normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixels values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image)) 

# Create the Model
num_classes = 5

model = Sequential([
  layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
  layers.Conv2D(16, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(32, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(64, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Flatten(),
  layers.Dense(128, activation='relu'),
  layers.Dense(num_classes)
])

# Compile the Model
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
# Imports
import time
import json

import numpy as np
import tokens_bert as tokens

from openvino.runtime import Core
from openvino.runtime import Dimension

# Download the model
# directory where model will be downloaded
base_model_dir = "model"

# desired precision
precision = "FP16-INT8"

# model name as named in Open Model Zoo
model_name = "bert-small-uncased-whole-word-masking-squad-int8-0002"

model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml"
model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin"

download_command = f"omz_downloader " \
                   f"--name {model_name} " \
                   f"--precision {precision} " \
                   f"--output_dir {base_model_dir} " \
                   f"--cache_dir {base_model_dir}"
! $download_command

# Load the model for Entity Extraction with Dynamic Shape
# initialize inference engine
ie_core = Core()
# read the network and corresponding weights from file
model = ie_core.read_model(model=model_path, weights=model_weights_path)

# assign dynamic shapes to every input layer on the last dimension
for input_layer in model.inputs:
    input_shape = input_layer.partial_shape
    input_shape[1] = Dimension(1, 384)
    model.reshape({input_layer: input_shape})

# compile the model for the CPU
compiled_model = ie_core.compile_model(model=model, device_name="CPU")

# get input names of nodes
input_keys = list(compiled_model.inputs)

# Processing
# path to vocabulary file
vocab_file_path = "data/vocab.txt"

# create dictionary with words and their indices
vocab = tokens.load_vocab_file(vocab_file_path)

# define special tokens
cls_token = vocab["[CLS]"]
sep_token = vocab["[SEP]"]

# set a confidence score threshold
confidence_threshold = 0.4

# Preprocessing
# generator of a sequence of inputs
def prepare_input(entity_tokens, context_tokens):
    input_ids = [cls_token] + entity_tokens + [sep_token] + \
        context_tokens + [sep_token]
    # 1 for any index
    attention_mask = [1] * len(input_ids)
    # 0 for entity tokens, 1 for context part
    token_type_ids = [0] * (len(entity_tokens) + 2) + \
        [1] * (len(context_tokens) + 1)

    # create input to feed the model
    input_dict = {
        "input_ids": np.array([input_ids], dtype=np.int32),
        "attention_mask": np.array([attention_mask], dtype=np.int32),
        "token_type_ids": np.array([token_type_ids], dtype=np.int32),
    }

    # some models require additional position_ids
    if "position_ids" in [i_key.any_name for i_key in input_keys]:
        position_ids = np.arange(len(input_ids))
        input_dict["position_ids"] = np.array([position_ids], dtype=np.int32)

    return input_dict

# Postprocessing
def postprocess(output_start, output_end, entity_tokens,
                context_tokens_start_end, input_size):

    def get_score(logits):
        out = np.exp(logits)
        return out / out.sum(axis=-1)

    # get start-end scores for context
    score_start = get_score(output_start)
    score_end = get_score(output_end)

    # index of first context token in tensor
    context_start_idx = len(entity_tokens) + 2
    # index of last+1 context token in tensor
    context_end_idx = input_size - 1

    # find product of all start-end combinations to find the best one
    max_score, max_start, max_end = find_best_entity_window(
        start_score=score_start, end_score=score_end,
        context_start_idx=context_start_idx, context_end_idx=context_end_idx
    )

    # convert to context text start-end index
    max_start = context_tokens_start_end[max_start][0]
    max_end = context_tokens_start_end[max_end][1]

    return max_score, max_start, max_end


def find_best_entity_window(start_score, end_score,
                            context_start_idx, context_end_idx):
    context_len = context_end_idx - context_start_idx
    score_mat = np.matmul(
        start_score[context_start_idx:context_end_idx].reshape(
            (context_len, 1)),
        end_score[context_start_idx:context_end_idx].reshape(
            (1, context_len)),
    )
    # reset candidates with end before start
    score_mat = np.triu(score_mat)
    # reset long candidates (>16 words)
    score_mat = np.tril(score_mat, 16)
    # find the best start-end pair
    max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
    max_score = score_mat[max_s, max_e]

    return max_score, max_s, max_e

def get_best_entity(entity, context, vocab):
    # convert context string to tokens
    context_tokens, context_tokens_end = tokens.text_to_tokens(
        text=context.lower(), vocab=vocab)
    # convert entity string to tokens
    entity_tokens, _ = tokens.text_to_tokens(text=entity.lower(), vocab=vocab)

    network_input = prepare_input(entity_tokens, context_tokens)
    input_size = len(context_tokens) + len(entity_tokens) + 3

    # openvino inference
    output_start_key = compiled_model.output("output_s")
    output_end_key = compiled_model.output("output_e")
    result = compiled_model(network_input)

    # postprocess the result getting the score and context range for the answer
    score_start_end = postprocess(output_start=result[output_start_key][0],
                                  output_end=result[output_end_key][0],
                                  entity_tokens=entity_tokens,
                                  context_tokens_start_end=context_tokens_end,
                                  input_size=input_size)

    # return the part of the context, which is already an answer
    return context[score_start_end[1]:score_start_end[2]], score_start_end[0]

# Set the Entity Recognition Template
template = ["building", "company", "persons", "city",
            "state", "height", "floor", "address"]

def run_analyze_entities(context):
    print(f"Context: {context}\n", flush=True)

    if len(context) == 0:
        print("Error: Empty context or outside paragraphs")
        return

    if len(context) > 380:
        print("Error: The context is too long for this particular model. "
              "Try with context shorter than 380 words.")
        return

    # measure processing time
    start_time = time.perf_counter()
    extract = []
    for field in template:
        entity_to_find = field + "?"
        entity, score = get_best_entity(entity=entity_to_find,
                                        context=context,
                                        vocab=vocab)
        if score >= confidence_threshold:
            extract.append({"Entity": entity, "Type": field,
                            "Score": f"{score:.2f}"})
    end_time = time.perf_counter()
    res = {"Extraction": extract, "Time": f"{end_time - start_time:.2f}s"}
    print("\nJSON Output:")
    print(json.dumps(res, sort_keys=False, indent=4))

# Run on Simple Text
# Sample 1
source_text = "Intel Corporation is an American multinational and technology" \
    " company headquartered in Santa Clara, California."
run_analyze_entities(source_text)

# Sample 2
source_text = "Intel was founded in Mountain View, California, " \
    "in 1968 by Gordon E. Moore, a chemist, and Robert Noyce, " \
    "a physicist and co-inventor of the integrated circuit."
run_analyze_entities(source_text)

# Sample 3
source_text = "The Robert Noyce Building in Santa Clara, California, " \
    "is the headquarters for Intel Corporation. It was constructed in 1992 " \
    "and is located at 2200 Mission College Boulevard - 95054. It has an " \
    "estimated height of 22.20 meters and 6 floors above ground."
run_analyze_entities(source_text)

Similiar Collections

Python strftime reference pandas.Period.strftime python - Formatting Quarter time in pandas columns - Stack Overflow python - Pandas: Change day - Stack Overflow python - Check if multiple columns exist in a df - Stack Overflow Pandas DataFrame apply() - sending arguments examples python - How to filter a dataframe of dates by a particular month/day? - Stack Overflow python - replace a value in the entire pandas data frame - Stack Overflow python - Replacing blank values (white space) with NaN in pandas - Stack Overflow python - get list from pandas dataframe column - Stack Overflow python - How to drop rows of Pandas DataFrame whose value in a certain column is NaN - Stack Overflow python - How to drop rows of Pandas DataFrame whose value in a certain column is NaN - Stack Overflow python - How to lowercase a pandas dataframe string column if it has missing values? - Stack Overflow How to Convert Integers to Strings in Pandas DataFrame - Data to Fish How to Convert Integers to Strings in Pandas DataFrame - Data to Fish create a dictionary of two pandas Dataframe columns? - Stack Overflow python - ValueError: No axis named node2 for object type <class 'pandas.core.frame.DataFrame'> - Stack Overflow Python Pandas iterate over rows and access column names - Stack Overflow python - Creating dataframe from a dictionary where entries have different lengths - Stack Overflow python - Deleting DataFrame row in Pandas based on column value - Stack Overflow python - How to check if a column exists in Pandas - Stack Overflow python - Import pandas dataframe column as string not int - Stack Overflow python - What is the most efficient way to create a dictionary of two pandas Dataframe columns? - Stack Overflow Python Loop through Excel sheets, place into one df - Stack Overflow python - How do I get the row count of a Pandas DataFrame? - Stack Overflow python - How to save a new sheet in an existing excel file, using Pandas? - Stack Overflow Python Loop through Excel sheets, place into one df - Stack Overflow How do I select a subset of a DataFrame? — pandas 1.2.4 documentation python - Delete column from pandas DataFrame - Stack Overflow python - Convert list of dictionaries to a pandas DataFrame - Stack Overflow How to Add or Insert Row to Pandas DataFrame? - Python Examples python - Check if a value exists in pandas dataframe index - Stack Overflow python - Set value for particular cell in pandas DataFrame using index - Stack Overflow python - Pandas Dataframe How to cut off float decimal points without rounding? - Stack Overflow python - Pandas: Change day - Stack Overflow python - Clean way to convert quarterly periods to datetime in pandas - Stack Overflow Pandas - Number of Months Between Two Dates - Stack Overflow python - MonthEnd object result in <11 * MonthEnds> instead of number - Stack Overflow python - Extracting the first day of month of a datetime type column in pandas - Stack Overflow
MySQL MULTIPLES INNER JOIN How to Use EXISTS, UNIQUE, DISTINCT, and OVERLAPS in SQL Statements - dummies postgresql - SQL OVERLAPS PostgreSQL Joins: Inner, Outer, Left, Right, Natural with Examples PostgreSQL Joins: A Visual Explanation of PostgreSQL Joins PL/pgSQL Variables ( Format Dates ) The Ultimate Guide to PostgreSQL Date By Examples Data Type Formatting Functions PostgreSQL - How to calculate difference between two timestamps? | TablePlus Date/Time Functions and Operators PostgreSQL - DATEDIFF - Datetime Difference in Seconds, Days, Months, Weeks etc - SQLines CASE Statements in PostgreSQL - DataCamp SQL Optimizations in PostgreSQL: IN vs EXISTS vs ANY/ALL vs JOIN PostgreSQL DESCRIBE TABLE Quick and best way to Compare Two Tables in SQL - DWgeek.com sql - Best way to select random rows PostgreSQL - Stack Overflow PostgreSQL: Documentation: 13: 70.1. Row Estimation Examples Faster PostgreSQL Counting How to Add a Default Value to a Column in PostgreSQL - PopSQL How to Add a Default Value to a Column in PostgreSQL - PopSQL SQL Subquery - Dofactory SQL IN - SQL NOT IN - JournalDev DROP FUNCTION (Transact-SQL) - SQL Server | Microsoft Docs SQL : Multiple Row and Column Subqueries - w3resource PostgreSQL: Documentation: 9.5: CREATE FUNCTION PostgreSQL CREATE FUNCTION By Practical Examples datetime - PHP Sort a multidimensional array by element containing date - Stack Overflow database - Oracle order NULL LAST by default - Stack Overflow PostgreSQL: Documentation: 9.5: Modifying Tables PostgreSQL: Documentation: 14: SELECT postgresql - sql ORDER BY multiple values in specific order? - Stack Overflow How do I get the current unix timestamp from PostgreSQL? - Database Administrators Stack Exchange SQL MAX() with HAVING, WHERE, IN - w3resource linux - Which version of PostgreSQL am I running? - Stack Overflow Copying Data Between Tables in a Postgres Database php - How to remove all numbers from string? - Stack Overflow sql - How to get a list column names and datatypes of a table in PostgreSQL? - Stack Overflow postgresql - How do I remove all spaces from a field in a Postgres database in an update query? - Stack Overflow sql - How to get a list column names and datatypes of a table in PostgreSQL? - Stack Overflow How to change PRIMARY KEY of an existing PostgreSQL table? · GitHub Drop tables w Dependency Tracking ( constraints ) Import CSV File Into PosgreSQL Table How To Import a CSV into a PostgreSQL Database How can I drop all the tables in a PostgreSQL database? - Stack Overflow How can I drop all the tables in a PostgreSQL database? - Stack Overflow PostgreSQL CASE Statements & Examples using WHEN-THEN, if-else and switch | DataCamp PostgreSQL LEFT: Get First N Characters in a String How can I drop all the tables in a PostgreSQL database? - Stack Overflow How can I drop all the tables in a PostgreSQL database? - Stack Overflow PostgreSQL - Copy Table - GeeksforGeeks PostgreSQL BETWEEN Query with Example sql - Postgres Query: finding values that are not numbers - Stack Overflow PostgreSQL UPDATE Join with A Practical Example
Request API Data with JavaScript or PHP (Access a Json data with PHP API) PHPUnit – The PHP Testing Framework phpspec array_column How to get closest date compared to an array of dates in PHP Calculating past and future dates < PHP | The Art of Web PHP: How to check which item in an array is closest to a given number? - Stack Overflow implode php - Calculate difference between two dates using Carbon and Blade php - Create a Laravel Request object on the fly testing - How can I measure the speed of code written in PHP? testing - How can I measure the speed of code written in PHP? What to include in gitignore for a Laravel and PHPStorm project Laravel Chunk Eloquent Method Example - Tuts Make html - How to solve PHP error 'Notice: Array to string conversion in...' - Stack Overflow PHP - Merging two arrays into one array (also Remove Duplicates) - Stack Overflow php - Check if all values in array are the same - Stack Overflow PHP code - 6 lines - codepad php - Convert array of single-element arrays to one a dimensional array - Stack Overflow datetime - PHP Sort a multidimensional array by element containing date - Stack Overflow sql - Division ( / ) not giving my answer in postgresql - Stack Overflow Get current date, given a timezone in PHP? - Stack Overflow php - Get characters after last / in url - Stack Overflow Add space after 7 characters - PHP Coding Help - PHP Freaks php - Laravel Advanced Wheres how to pass variable into function? - Stack Overflow php - How can I manually return or throw a validation error/exception in Laravel? - Stack Overflow php - How to add meta data in laravel api resource - Stack Overflow php - How do I create a webhook? - Stack Overflow Webhooks - Examples | SugarOutfitters Accessing cells - PhpSpreadsheet Documentation Reading and writing to file - PhpSpreadsheet Documentation PHP 7.1: Numbers shown with scientific notation even if explicitely formatted as text · Issue #357 · PHPOffice/PhpSpreadsheet · GitHub How do I install Java on Ubuntu? nginx - How to execute java command from php page with shell_exec() function? - Stack Overflow exec - Executing a java .jar file with php - Stack Overflow Measuring script execution time in PHP - GeeksforGeeks How to CONVERT seconds to minutes? PHP: Check if variable exist but also if has a value equal to something - Stack Overflow How to declare a global variable in php? - Stack Overflow How to zip a whole folder using PHP - Stack Overflow php - Saving file into a prespecified directory using FPDF - Stack Overflow PHP 7.0 get_magic_quotes_runtime error - Stack Overflow How to Create an Object Without Class in PHP ? - GeeksforGeeks Recursion in PHP | PHPenthusiast PHP PDO Insert Tutorial Example - DEV Community PHP Error : Unparenthesized `a ? b : c ? d : e` is deprecate | Laravel.io mysql - Which is faster: multiple single INSERTs or one multiple-row INSERT? - Stack Overflow Display All PHP Errors: Basic & Advanced Usage Need to write at beginning of file with PHP - Stack Overflow Append at the beginning of the file in PHP - Stack Overflow PDO – Insert, update, and delete records in PHP – BrainBell php - How to execute a raw sql query with multiple statement with laravel - Stack Overflow
Clear config cache Eloquent DB::Table RAW Query / WhereNull Laravel Eloquent "IN" Query get single column value in laravel eloquent php - How to use CASE WHEN in Eloquent ORM? - Stack Overflow AND-OR-AND + brackets with Eloquent - Laravel Daily Database: Query Builder - Laravel - The PHP Framework For Web Artisans ( RAW ) Combine Foreach Loop and Eloquent to perform a search | Laravel.io Access Controller method from another controller in Laravel 5 How to Call a controller function in another Controller in Laravel 5 php - Create a Laravel Request object on the fly php - Laravel 5.6 Upgrade caused Logging to break Artisan Console - Laravel - The PHP Framework For Web Artisans What to include in gitignore for a Laravel and PHPStorm project php - Create a Laravel Request object on the fly Process big DB table with chunk() method - Laravel Daily How to insert big data on the laravel? - Stack Overflow php - How can I build a condition based query in Laravel? - Stack Overflow Laravel Chunk Eloquent Method Example - Tuts Make Database: Migrations - Laravel - The PHP Framework For Web Artisans php - Laravel Model Error Handling when Creating - Exception Laravel - Inner Join with Multiple Conditions Example using Query Builder - ItSolutionStuff.com laravel cache disable phpunit code example | Newbedev In PHP, how to check if a multidimensional array is empty? · Humblix php - Laravel firstOrNew how to check if it's first or new? - Stack Overflow get base url laravel 8 Code Example Using gmail smtp via Laravel: Connection could not be established with host smtp.gmail.com [Connection timed out #110] - Stack Overflow php - Get the Last Inserted Id Using Laravel Eloquent - Stack Overflow php - Laravel-5 'LIKE' equivalent (Eloquent) - Stack Overflow Accessing cells - PhpSpreadsheet Documentation How to update chunk records in Laravel php - How to execute external shell commands from laravel controller? - Stack Overflow How to convert php array to laravel collection object 3 Best Laravel Redis examples to make your site load faster How to Create an Object Without Class in PHP ? - GeeksforGeeks Case insensitive search with Eloquent | Laravel.io How to Run Specific Seeder in Laravel? - ItSolutionStuff.com PHP Error : Unparenthesized `a ? b : c ? d : e` is deprecate | Laravel.io How to chunk query results in Laravel php - How to execute a raw sql query with multiple statement with laravel - Stack Overflow
PostgreSQL POSITION() function PostgresQL ANY / SOME Operator ( IN vs ANY ) PostgreSQL Substring - Extracting a substring from a String How to add an auto-incrementing primary key to an existing table, in PostgreSQL PostgreSQL STRING_TO_ARRAY()function mysql FIND_IN_SET equivalent to postgresql PL/pgSQL Variables ( Format Dates ) The Ultimate Guide to PostgreSQL Date By Examples Data Type Formatting Functions PostgreSQL - How to calculate difference between two timestamps? | TablePlus Date/Time Functions and Operators PostgreSQL - DATEDIFF - Datetime Difference in Seconds, Days, Months, Weeks etc - SQLines CASE Statements in PostgreSQL - DataCamp SQL Optimizations in PostgreSQL: IN vs EXISTS vs ANY/ALL vs JOIN PL/pgSQL Variables PostgreSQL: Documentation: 11: CREATE PROCEDURE Reading a Postgres EXPLAIN ANALYZE Query Plan Faster PostgreSQL Counting sql - Fast way to discover the row count of a table in PostgreSQL - Stack Overflow PostgreSQL: Documentation: 9.1: tablefunc PostgreSQL DESCRIBE TABLE Quick and best way to Compare Two Tables in SQL - DWgeek.com sql - Best way to select random rows PostgreSQL - Stack Overflow How to Add a Default Value to a Column in PostgreSQL - PopSQL How to Add a Default Value to a Column in PostgreSQL - PopSQL PL/pgSQL IF Statement PostgreSQL: Documentation: 9.1: Declarations SQL Subquery - Dofactory SQL IN - SQL NOT IN - JournalDev PostgreSQL - IF Statement - GeeksforGeeks How to work with control structures in PostgreSQL stored procedures: Using IF, CASE, and LOOP statements | EDB PL/pgSQL IF Statement How to combine multiple selects in one query - Databases - ( loop reference ) DROP FUNCTION (Transact-SQL) - SQL Server | Microsoft Docs SQL : Multiple Row and Column Subqueries - w3resource PostgreSQL: Documentation: 9.5: CREATE FUNCTION PostgreSQL CREATE FUNCTION By Practical Examples datetime - PHP Sort a multidimensional array by element containing date - Stack Overflow database - Oracle order NULL LAST by default - Stack Overflow PostgreSQL: Documentation: 9.5: Modifying Tables PostgreSQL: Documentation: 14: SELECT PostgreSQL Array: The ANY and Contains trick - Postgres OnLine Journal postgresql - sql ORDER BY multiple values in specific order? - Stack Overflow sql - How to aggregate two PostgreSQL columns to an array separated by brackets - Stack Overflow How do I get the current unix timestamp from PostgreSQL? - Database Administrators Stack Exchange SQL MAX() with HAVING, WHERE, IN - w3resource linux - Which version of PostgreSQL am I running? - Stack Overflow Postgres login: How to log into a Postgresql database | alvinalexander.com Copying Data Between Tables in a Postgres Database PostgreSQL CREATE FUNCTION By Practical Examples php - How to remove all numbers from string? - Stack Overflow sql - How to get a list column names and datatypes of a table in PostgreSQL? - Stack Overflow postgresql - How do I remove all spaces from a field in a Postgres database in an update query? - Stack Overflow sql - How to get a list column names and datatypes of a table in PostgreSQL? - Stack Overflow A Step-by-Step Guide To PostgreSQL Temporary Table How to change PRIMARY KEY of an existing PostgreSQL table? · GitHub PostgreSQL UPDATE Join with A Practical Example PostgreSQL: Documentation: 15: CREATE SEQUENCE How can I drop all the tables in a PostgreSQL database? - Stack Overflow PostgreSQL Show Tables Drop tables w Dependency Tracking ( constraints ) Import CSV File Into PosgreSQL Table How To Import a CSV into a PostgreSQL Database How can I drop all the tables in a PostgreSQL database? - Stack Overflow How can I drop all the tables in a PostgreSQL database? - Stack Overflow PostgreSQL CASE Statements & Examples using WHEN-THEN, if-else and switch | DataCamp PostgreSQL LEFT: Get First N Characters in a String How can I drop all the tables in a PostgreSQL database? - Stack Overflow How can I drop all the tables in a PostgreSQL database? - Stack Overflow postgresql - Binary path in the pgAdmin preferences - Database Administrators Stack Exchange postgresql - Binary path in the pgAdmin preferences - Database Administrators Stack Exchange PostgreSQL - Copy Table - GeeksforGeeks postgresql duplicate key violates unique constraint - Stack Overflow PostgreSQL BETWEEN Query with Example VACUUM FULL - PostgreSQL wiki How To Remove Spaces Between Characters In PostgreSQL? - Database Administrators Stack Exchange sql - Postgres Query: finding values that are not numbers - Stack Overflow PostgreSQL LEFT: Get First N Characters in a String unaccent: Getting rid of umlauts, accents and special characters
כמה עוד נשאר למשלוח חינם גם לעגלה ולצקאאוט הוספת צ'קבוקס לאישור דיוור בצ'קאאוט הסתרת אפשרויות משלוח אחרות כאשר משלוח חינם זמין דילוג על מילוי כתובת במקרה שנבחרה אפשרות איסוף עצמי הוספת צ'קבוקס לאישור דיוור בצ'קאאוט שינוי האפשרויות בתפריט ה-סידור לפי בווקומרס שינוי הטקסט "אזל מהמלאי" הערה אישית לסוף עמוד העגלה הגבלת רכישה לכל המוצרים למקסימום 1 מכל מוצר קבלת שם המוצר לפי ה-ID בעזרת שורטקוד הוספת כפתור וואטסאפ לקנייה בלופ ארכיון מוצרים הפיכה של מיקוד בצ'קאאוט ללא חובה מעבר ישיר לצ'קאאוט בלחיתה על הוספה לסל (דילוג עגלה) התראה לקבלת משלוח חינם בדף עגלת הקניות גרסה 1 התראה לקבלת משלוח חינם בדף עגלת הקניות גרסה 2 קביעה של מחיר הזמנה מינימלי (מוצג בעגלה ובצ'קאאוט) העברת קוד הקופון ל-ORDER REVIEW העברת קוד הקופון ל-ORDER REVIEW Kadence WooCommerce Email Designer קביעת פונט אסיסנט לכל המייל בתוסף מוצרים שאזלו מהמלאי - יופיעו מסומנים באתר, אבל בתחתית הארכיון הוספת כפתור "קנה עכשיו" למוצרים הסתרת אפשרויות משלוח אחרות כאשר משלוח חינם זמין שיטה 2 שינוי סימן מטבע ש"ח ל-ILS להפוך סטטוס הזמנה מ"השהייה" ל"הושלם" באופן אוטומטי תצוגת הנחה באחוזים שינוי טקסט "בחר אפשרויות" במוצרים עם וריאציות חיפוש מוצר לפי מק"ט שינוי תמונת מוצר לפי וריאציה אחרי בחירה של וריאציה אחת במקרה של וריאציות מרובות הנחה קבועה לפי תפקיד בתעריף קבוע הנחה קבועה לפי תפקיד באחוזים הסרה של שדות משלוח לקבצים וירטואליים הסתרת טאבים מעמוד מוצר הצגת תגית "אזל מהמלאי" בלופ המוצרים להפוך שדות ל-לא חובה בצ'קאאוט שינוי טקסט "אזל מהמלאי" לוריאציות שינוי צבע ההודעות המובנות של ווקומרס הצגת ה-ID של קטגוריות המוצרים בעמוד הקטגוריות אזל מהמלאי- שינוי ההודעה, תגית בלופ, הודעה בדף המוצר והוספת אזל מהמלאי על וריאציה הוספת שדה מחיר ספק לדף העריכה שינוי טקסט אזל מהמלאי תמונות מוצר במאונך לצד תמונת המוצר הראשית באלמנטור הוספת כפתור קנה עכשיו לעמוד המוצר בקניה הזו חסכת XX ש''ח לאפשר למנהל חנות לנקות קאש ברוקט לאפשר רק מוצר אחד בעגלת קניות הוספת סימון אריזת מתנה ואזור להוראות בצ'קאאוט של ווקומרס הצגת הנחה במספר (גודל ההנחה) הוספת "אישור תקנון" לדף התשלום הצגת רשימת תכונות המוצר בפרונט שינוי כמות מוצרים בצ'קאאוט ביטול השדות בצ'קאאוט שינוי כותרות ופלייסהולדר של השדות בצ'קאאוט
החלפת טקסט באתר (מתאים גם לתרגום נקודתי) הסרת פונטים של גוגל מתבנית KAVA ביטול התראות במייל על עדכון וורדפרס אוטומטי הוספת תמיכה בקבצי VCF באתר (קבצי איש קשר VCARD) - חלק 1 להחריג קטגוריה מסוימת מתוצאות החיפוש שליפת תוכן של ריפיטר יצירת כפתור שיתוף למובייל זיהוי אלו אלמנטים גורמים לגלילה אופקית התקנת SMTP הגדרת טקסט חלופי לתמונות לפי שם הקובץ הוספת התאמת תוספים לגרסת WP הוספת טור ID למשתמשים הסרת כותרת בתבנית HELLO הסרת תגובות באופן גורף הרשאת SVG חילוץ החלק האחרון של כתובת העמוד הנוכחי חילוץ הסלאג של העמוד חילוץ כתובת העמוד הנוכחי מניעת יצירת תמונות מוקטנות התקנת SMTP הצגת ה-ID של קטגוריות בעמוד הקטגוריות להוריד מתפריט הניהול עמודים הוספת Favicon שונה לכל דף ודף הוספת אפשרות שכפול פוסטים ובכלל (של שמעון סביר) הסרת תגובות באופן גורף 2 בקניה הזו חסכת XX ש''ח חיפוש אלמנטים סוררים, גלישה צדית במובייל שיטה 1 לאפשר רק מוצר אחד בעגלת קניות הצגת הנחה במספר (גודל ההנחה) הוספת "אישור תקנון" לדף התשלום שינוי צבע האדמין לפי סטטוס העמוד/פוסט שינוי צבע אדמין לכולם לפי הסכמות של וורדפרס תצוגת כמות צפיות מתוך הדשבורד של וורדפרס הצגת סוג משתמש בפרונט גלילה אין סופית במדיה שפת הממשק של אלמנטור תואמת לשפת המשתמש אורך תקציר מותאם אישית
הודעת שגיאה מותאמת אישית בטפסים להפוך כל סקשן/עמודה לקליקבילית (לחיצה) - שיטה 1 להפוך כל סקשן/עמודה לקליקבילית (לחיצה) - שיטה 2 שינוי הגבלת הזיכרון בשרת הוספת לינק להורדת מסמך מהאתר במייל הנשלח ללקוח להפוך כל סקשן/עמודה לקליקבילית (לחיצה) - שיטה 3 יצירת כפתור שיתוף למובייל פתיחת דף תודה בטאב חדש בזמן שליחת טופס אלמנטור - טופס בודד בדף פתיחת דף תודה בטאב חדש בזמן שליחת טופס אלמנטור - טפסים מרובים בדף ביי ביי לאריק ג'ונס (חסימת ספאם בטפסים) זיהוי אלו אלמנטים גורמים לגלילה אופקית לייבלים מרחפים בטפסי אלמנטור יצירת אנימציה של "חדשות רצות" בג'ט (marquee) שינוי פונט באופן דינאמי בג'ט פונקציה ששולפת שדות מטא מתוך JET ומאפשרת לשים הכל בתוך שדה SELECT בטופס אלמנטור הוספת קו בין רכיבי התפריט בדסקטופ ולדציה למספרי טלפון בטפסי אלמנטור חיבור שני שדות בטופס לשדה אחד שאיבת נתון מתוך כתובת ה-URL לתוך שדה בטופס וקידוד לעברית מדיה קוורי למובייל Media Query תמונות מוצר במאונך לצד תמונת המוצר הראשית באלמנטור הצגת תאריך עברי פורמט תאריך מותאם אישית תיקון שדה תאריך בטופס אלמנטור במובייל שאיבת פרמטר מתוך הכתובת והזנתו לתוך שדה בטופס (PARAMETER, URL, INPUT) עמודות ברוחב מלא באלמנטור עמודה דביקה בתוך אלמנטור יצירת "צל" אומנותי קוד לסוויצ'ר, שני כפתורים ושני אלמנטים סקריפט לסגירת פופאפ של תפריט לאחר לחיצה על אחד העמודים הוספת כפתור קרא עוד שפת הממשק של אלמנטור תואמת לשפת המשתמש להריץ קוד JS אחרי שטופס אלמנטור נשלח בהצלחה מצב ממורכז לקרוסלת תמונות של אלמנטור לייבלים מרחפים בטפסי פלואנטפורמס
What is the fastest or most elegant way to compute a set difference using Javascript arrays? - Stack Overflow javascript - Class Binding ternary operator - Stack Overflow Class and Style Bindings — Vue.js How to remove an item from an Array in JavaScript javascript - How to create a GUID / UUID - Stack Overflow json - Remove properties from objects (JavaScript) - Stack Overflow javascript - Remove property for all objects in array - Stack Overflow convert array to dictionary javascript Code Example JavaScript: Map an array of objects to a dictionary - DEV Community JavaScript: Map an array of objects to a dictionary - DEV Community javascript - How to replace item in array? - Stack Overflow javascript - How to replace item in array? - Stack Overflow How can I replace Space with %20 in javascript? - Stack Overflow JavaScript: Check If Array Has All Elements From Another Array - Designcise javascript - How to use format() on a moment.js duration? - Stack Overflow javascript - Elegant method to generate array of random dates within two dates - Stack Overflow Compare two dates in JavaScript using moment.js - Poopcode javascript - Can ES6 template literals be substituted at runtime (or reused)? - Stack Overflow javascript - How to check if array is empty or does not exist? - Stack Overflow How To Use .map() to Iterate Through Array Items in JavaScript | DigitalOcean Check if a Value Is Object in JavaScript | Delft Stack vuejs onclick open url in new tab Code Example javascript - Trying to use fetch and pass in mode: no-cors - Stack Overflow Here are the most popular ways to make an HTTP request in JavaScript JavaScript Array Distinct(). Ever wanted to get distinct element - List of object to a Set How to get distinct values from an array of objects in JavaScript? - Stack Overflow Sorting an array by multiple criteria with vanilla JavaScript | Go Make Things javascript - Map and filter an array at the same time - Stack Overflow ecmascript 6 - JavaScript Reduce Array of objects to object dictionary - Stack Overflow javascript - Convert js Array to Dictionary/Hashmap - Stack Overflow javascript - Is there any way to use a numeric type as an object key? - Stack Overflow
Hooks Cheatsheet React Tutorial Testing Overview – React performance Animating Between Views in React | CSS-Tricks - CSS-Tricks Building an Animated Counter with React and CSS - DEV Community Animate When Element is In-View with Framer Motion | by Chad Murobayashi | JavaScript in Plain English Handling Scroll Based Animation in React (2-ways) - Ryosuke react-animate-on-scroll - npm Bring Life to Your Website | Parallax Scrolling using React and CSS - YouTube react-cool-inview: React hook to monitor an element enters or leaves the viewport (or another element) - DEV Community Improve React Performance using Lazy Loading💤 and Suspense | by Chidume Nnamdi 🔥💻🎵🎮 | Bits and Pieces Mithi's Epic React Exercises React Hooks - Understanding Component Re-renders | by Gupta Garuda | Medium reactjs - When to use useImperativeHandle, useLayoutEffect, and useDebugValue - Stack Overflow useEffect vs useLayoutEffect When to useMemo and useCallback useLockBody hook hooks React animate fade on mount How to Make a React Website with Page Transitions using Framer Motion - YouTube Build a React Image Slider Carousel from Scratch Tutorial - YouTube React Router: useParams | by Megan Lo | Geek Culture | Medium useEffect Fetch POST in React | React Tutorial Updating Arrays in State Background Images in React React Context API : A complete guide | by Pulkit Sharma | Medium Code-Splitting – React Lazy Loading React Components (with react.lazy and suspense) | by Nwose Lotanna | Bits and Pieces Lazy loading React components - LogRocket Blog Code Splitting a Redux Application | Pluralsight react.js folder structure | by Vinoth kumar | Medium react boilerplate React State Management best practices for 2021 (aka no Redux) | by Emmanuel Meric de Bellefon | Medium Create an advanced scroll lock React Hook - LogRocket Blog UseOnClickOutside : Custom hook to detect the mouse click on outside (typescript) - Hashnode react topics 8 Awesome React Hooks web-vitals: Essential metrics for a healthy site. scripts explained warnings and dependency errors Learn the useContext Hook in React - Programming with Mosh Learn useReducer Hook in React - Programming with Mosh Guide To Learn useEffect Hook in React - Programming with Mosh Getting Started With Jest - Testing is Fun - Programming with Mosh Building an accessible React Modal component - Programming with Mosh Kent C. Dodds's free React course
SAVED SEARCH DATE FORMAT(page wise) AND LOOP THROUGH EACH DATA || With Pagination || Avoids 4000 data Limt Rest in Suitlet CALL ANOTHER SCRIPT | SUITELET FROM ANY OTHER SCRIPT | SUITELET | ALSO PASSING OF PARAMETERS CALLING RESTLET |FROM SUITELET Where Is The “IF” Statement In A Saved Search Formula? – > script everything Where Is The “IF” Statement In A Saved Search Formula? – > script everything ClientScript Add Sublist Line items etc Saving Record In different way. BFO AND FREEMarkup || advance pdf html template Join In Lookup Field Search || Alternative of saved search Use Saved Serch or Lookup field search for Getting value or id of Fields not visible On UI or xml https://www.xmlvalidation.com/ XML Validate Load Record--->selectLine--->CommitLine--->Save [Set Sublist field Data After Submit] Null Check Custom Check SEND MAIL WITH FILE ATTACHED EXECUTION CONTEXT || runtime.context In BeforeLoad Use This When Trying to Get Value In BeforeLoad Convert String Into JSON Freemarker Below 2.3.31 use ?eval || above use ?eval_json Design Of Full Advance PDF Template using Suitescript PART 1: Design Of Full Advance PDF Template using Suitescript PART 2: Iterate Through Complex Nested JSON Object Freemarker || BFO || Advance PDF HTML Template WORKING JSON OBJ ITERATE FreeMarker get String convert to JSON ,Iterate through it ,Print Values In Table Series Addition Freemarker Using Loop(List) Modified Null Check || Keep Updated One Here Navigations In Netsuite || Records || etc DATE FORMAT Netsuite Javascript Transform Invoice To Credit Memo NULLCHECK Freemarker||BFO|| XML||Template Before Accessing Any Value Refresh Page Without Pop Up | | client script || Reload Page Easy Manner To Format Date || Date to simple Format || MM/DD/YYYY Format ||Simple Date CUSTOM ERROR MESSAGE CREATE HOW TO STOP MAP/REDUCE SCRIPT RUNNING FILTER ON JSON OBJECT SAVED SEARCH CRITERIA LOGIC FOR WITHIN DATE | | WITHIN START DATE END DATE TO GET Line no. also of Error NETSUITE SERVER TIMEZONE : (GMT-05:00) Eastern Time (US & Canada) || Problem resolved for Map/reduce Timezone issue for Start Date in map/reduce || RESOLVED THE ISSUE compare/check date that it lies between two dates or not || Use of .getTime() object of Date TO FIND ALL TASKS WITHIN SO START DATE AND END DATE || SAVED SEARCH CODE WORDS Saved Search Get only one Result || getRange netsuite - SuiteScript 2.0 Add filters to saved search in script - Stack Overflow || Addition in saved search filter MASS DELETE ALL RCORD FROM SAVED SEARCH DATA IN PARAMETER|| ADD SS IN THE DEPLOYMENT PARAMETER SAVED SEARCH DATE COMPARE AND GET RESULT IN DAYS HOURS MINUTES Multiple Formula Columns Comment In Saved Search || Saved search used SQL language for writing formula Logic Set Addressbook Values || Addressbook is a subrecord SuiteQL || N/Query Module || Support SQL query VALIDATE 2 DATES LIE BETWEEN 2 DATES OR NOT || OVERLAPPING CASE ALSO Weeks Within two dates:
Delete Duplication SQL Using Subquery to find salary above average Sorting Rank Number In Sql find nth Salary in SQL sql - Counting rows in the table which have 1 or more missing values - Stack Overflow How to create customized quarter based on a given date column sql sql server - Stack Overflow get total sales per quarter, every year delete duplicate for table without primary key SQL Server REPLACE Function By Practical Examples adding row_number row_id to table find total sales per cateogry each year change empty string into null values Case when to update table update table using replace function How to Use CASE WHEN With SUM() in SQL | LearnSQL.com find each year sale in each city and state, then sum them all using regexp_replace to remove special chracter postgresql find quarter sale in each state, and sum it using unbounded preceding, and using percent rank create delimiter for the phone number regexp_replace to remove special characters using row between to cummulative sum update null values using WHERE filter insert into table command sum cummulative value in SQL SQL Important syntax Auto Increment in SQL read json file on postgresql Moving Average n Moving Total pivot on potgresql Rollup and Cube date function in postgresql select specify part of string using substring change column data type exclude null values in sql how to exclude zero in our counting finding outlier in sql how to import data with identifier (primary key) email and name filtering in sql trimming and removing particular string from sql replace string on sql regexp to find string in SQL answers only about email, identifier, lower, substring, date_part, to_char find percentage of null values in total remove duplicate on both tables v2 any and in operator string function moreee bucket function find perfomance index in sql find top max and top min finding world gdp formula (SUM OVER) find month percentage change find highest height in sql with row_number and subquery JOIN AND UNION offset and limit in sql function and variables using views on postgresql find cummulative sums in postgresql find null and not null percentage find specific strings or number in SQL using case when and CAST function Lpad function in Postgresql and string function in general Regexp function in postgresql regular expressions example in postgresql And FUZZYSTRMATCH updated method of deleting duplicates determine column types
Initiate MS Team Group Chat via PowerApps Save the current users email as a variable Creates variable with a theme colour palette Send an email from the current user Filters a data source based on the current logged in user Patch data fields, including a choice column to a data source Changes the colour of a selected item in a Gallery Filter and search a data source via a search box and dropdown Changes visibility based on the selection of another Gallery - used for "Tabs" Display current users first name Fix: Combobox/Search is empty check not working - Power Platform Community Retrive a user photo from SharePoint Get user photo from office365 connector from gallery Set a variable with the current users first name, from the currentUser variable Extract values from a collection Extract value from combo box Extract vale from combo box and convert to string/text Convert collection to JSON Combo box values to collection Show newly created items first / sort by most recent entry, will only show items created today Validate/Validation text box length and/or combo boxes contain data Text input validation - turns border red Lookup value against a text input and disable or enable displaymode Lookup items from a textbox Sets items to choices drop down or combo box Change text value based on lookup results returns tops 10 results and sorts by most recent created date Sets a variable with spilt text from a link - YouTube in this case Pass a null or empty value from Power Apps to a flow
Linuxteaching | linux console browser javascript Debugging in Visual Studio Code C# - Visual Studio Marketplace C# - Visual Studio Marketplace dotnet-install scripts - .NET CLI | Microsoft Docs dotnet-install scripts - .NET CLI | Microsoft Docs undefined .NET Tutorial | Hello World in 5 minutes Configuration files – Nordic Developer Academy CacheStorage.open() - Web APIs | MDN TransIP API Install .NET Core SDK on Linux | Snap Store .NET Tutorial | Hello World in 5 minutes Creating Your First Application in Python - GeeksforGeeks Riverbank Computing | Download Managing Application Dependencies — Python Packaging User Guide Building your first mobile application using Python | Engineering Education (EngEd) Program | Section Building your first mobile application using Python | Engineering Education (EngEd) Program | Section Building your first mobile application using Python | Engineering Education (EngEd) Program | Section Building your first mobile application using Python | Engineering Education (EngEd) Program | Section ActivePython-2.7 - ActiveState - Builds - ActiveState Platform Installation guidance for SQL Server on Linux - SQL Server | Microsoft Docs Ellabusby2006/Anzelmo2022 Ik wil de PHP-versie updaten van Ubuntu / Debian | TransIP Ik wil de PHP-versie updaten van Ubuntu / Debian | TransIP W3Schools Tryit Editor .NET installeren op Debian - .NET | Microsoft Learn .NET installeren op Debian - .NET | Microsoft Learn .NET installeren op Debian - .NET | Microsoft Learn .NET installeren op Debian - .NET | Microsoft Learn How To Build A Simple Star Rating System - Simon Ugorji | Tealfeed Visual Studio Code language identifiers Running Visual Studio Code on Linux HTML Forms Installeren .NET op Debian - .NET | Microsoft Learn StarCoderEx (AI code generator) - Visual Studio Marketplace Installeren .NET op Linux zonder een pakketbeheerder te gebruiken - .NET | Microsoft Learn ASP.NET Tutorial | Hello World in 5 minutes | .NET Deploy and connect to SQL Server Linux containers - SQL Server | Microsoft Learn Settings Sync in Visual Studio Code Settings Sync in Visual Studio Code TransIP API Monitoring as Code
.NET Tutorial | Hello World in 5 minutes docx2html - npm Running Visual Studio Code on Linux Connect to an ODBC Data Source (SQL Server Import and Export Wizard) - SQL Server Integration Services (SSIS) | Microsoft Docs .NET installeren in Linux zonder pakketbeheer - .NET | Microsoft Docs TransIP API TransIP API TransIP API TransIP API .NET installeren in Alpine - .NET | Microsoft Docs .NET installeren op Ubuntu - .NET | Microsoft Docs .NET installeren op Ubuntu - .NET | Microsoft Docs Geïnstalleerde .NET-versies controleren op Windows, Linux en macOS - .NET | Microsoft Docs Install .NET Core SDK on Linux | Snap Store .NET Tutorial | Hello World in 5 minutes Riverbank Computing | Download Managing Application Dependencies — Python Packaging User Guide Building your first mobile application using Python | Engineering Education (EngEd) Program | Section Building your first mobile application using Python | Engineering Education (EngEd) Program | Section Building your first mobile application using Python | Engineering Education (EngEd) Program | Section Building your first mobile application using Python | Engineering Education (EngEd) Program | Section ActivePython-2.7 - ActiveState - Builds - ActiveState Platform html - How to get mp3 files to play in iPhone Safari web browser? - Stack Overflow Work with review data  |  Google Business Profile APIs  |  Google Developers Javascript save text file - Javascript .NET installeren op Debian - .NET | Microsoft Learn Deploy and connect to SQL Server Linux containers - SQL Server | Microsoft Learn Settings Sync in Visual Studio Code Settings Sync in Visual Studio Code
Working with JSON in Freemarker - Liferay Community Freemarker parse a String as Json - Stack Overflow Online FreeMarker Template Tester Compiler Validate XML files Convert String Into JSON Freemarker Below 2.3.31 use ?eval || above use ?eval_json Working with JSON in Freemarker - Liferay Community Working with JSON in Freemarker - Liferay Community java - Freemarker iterating over hashmap keys - Stack Overflow java - Freemarker iterating over hashmap keys - Stack Overflow FreeMarker get String convert to JSON ,Iterate through it ,Print Values In Table Online FreeMarker Template Tester || freemarker compiler Series Addition Freemarker Using Loop(List) How to Convert a string to number in freemarker template - Stack Overflow javascript - Grouping JSON by values - Stack Overflow DATE FORMAT Netsuite Javascript Freemarkup | | Iterate through nested JSON all Values Using Nested For Loop Nested JSON Iterate Using BFO javascript - Error parsing XHTML: The content of elements must consist of well-formed character data or markup - Stack Overflow NULLCHECK Freemarker||BFO|| XML||Template Before Accessing Any Value ADVANCE PDF HTML TEMPLATE 7 Tips for Becoming a Pro at NetSuite’s Advanced PDF/HTML HTML Tag Center Does Not Work in Advanced PDF/HTML Templates|| align center HTML BFO NOTES Advanced PDF/HTML Template - NetSuite (Ascendion Holdings Inc) Check Template Code Is Very Different || Bill Payment check template Check Template Code Is Very Different || Bill Payment check template Intro to NetSuite Advanced PDF Source Code Mode | Tutorial | Anchor Group NETSUITE GUIDE OVER PDF/HTML TEMPLATE EDITOR suitescript - Ability to choose between multiple PDF templates on a Netsuite transaction form - Stack Overflow BFO DIV tr td etc User Guide|| USEFULL IMPORTANT Border radius in advanced html/pdf templates is not supported? : Netsuite
001-hello-world: Hello Image Classification using OpenVINO™ toolkit 002-openvino-api: OpenVINO API tutorial 003-hello-segmentation: Introduction to Segmentation in OpenVINO 004-hello-detection: Introduction to Detection in OpenVINO 101-tensorflow-to-openvino: TensorFlow to OpenVINO Model Conversion Tutorial 102-pytorch-onnx-to-openvino: PyTorch to ONNX and OpenVINO IR Tutorial 103-paddle-onnx-to-openvino: Convert a PaddlePaddle Model to ONNX and OpenVINO IR 104-model-tools: Working with Open Model Zoo Models 210-ct-scan-live-inference: Live Inference and Benchmark CT-scan Data with OpenVINO 201-vision-monodepth: Monodepth Estimation with OpenVINO 210-ct-scan-live-inference: Live Inference and Benchmark CT-scan Data with OpenVINO 401-object-detection-webcam: Live Object Detection with OpenVINO 402-pose-estimation-webcam: Live Human Pose Estimation with OpenVINO 403-action-recognition-webcam: Human Action Recognition with OpenVINO 211-speech-to-text: Speech to Text with OpenVINO 213-question-answering: Interactive Question Answering with OpenVINO 208-optical-character-recognition: Optical Character Recognition (OCR) with OpenVINO 209-handwritten-ocr: Handwritten Chinese and Japanese OCR 405-paddle-ocr-webcam: PaddleOCR with OpenVINO 305-tensorflow-quantization-aware-training: Optimizing TensorFlow models with Neural Network Compression Framework of OpenVINO by 8-bit quantization 302-pytorch-quantization-aware-training: Optimizing PyTorch models with Neural Network Compression Framework of OpenVINO by 8-bit quantization 301-tensorflow-training-openvino: Post-Training Quantization with TensorFlow Classification Model 301-tensorflow-training-openvino: From Training to Deployment with TensorFlow and OpenVINO 204-named-entity-recognition: Named Entity Recognition with OpenVINO
If statement with switch delete duplicates from an array loop and find class Nullish with an Object - basic tranverse classes in a list substring capitalize substring text with elipses array.at() js media query Dynamic Filter - buttons advanced flow converted if statement Add an array to a HTML dataset getElement Function Intersection Observer template intersection Observer Basic Example fetch data, display and filter for of loop - get index get random index value from an array fetch with post method get id value from url debounce for scrolling get element or elements check functions return values from functions indexOf explantion sorting basic using for-of loop for getting index value of iteration import json data into a JS module Splide slider with modal JS change active class if url path matches Pagination / array of arrays FIlter array or return full array price formatting ignores wrapping element on click Create a dummy array with numbers Random Generated Number Dummy array - list items dummy id Limits the amount of text Random Number generator function format date function Remove duplicates from JSON array data Filter Posts from state remove duplicates and create object simple ternary remove transition with propertyName sorting in reverse counting items using reduce splice explanation Declaring Variables Get primitive properties on a number / string etc using proto check an array of objects exists / check using regex Destructuring nested function scope IFFE function basic switch Switch with function passing an object
Microsoft Powershell: Delete registry key or values on remote computer | vGeek - Tales from real IT system Administration environment How to Upload Files Over FTP With PowerShell Configure attack surface reduction in Microsoft Defender using Group Policy or PowerShell – 4sysops WinPE: Create bootable media | Microsoft Learn powershell - Can I get the correct date mixing Get-Date and [DateTime]::FromFileTime - Stack Overflow Search-ADAccount (ActiveDirectory) | Microsoft Docs Manual Package Download - PowerShell | Microsoft Docs Get a List of Expired User Accounts in AD Using PowerShell Search-ADAccount (ActiveDirectory) | Microsoft Docs How to Stop an Unresponsive Hyper-V Virtual Machine | Petri IT Knowledgebase Adding PowerShell 7 to WinPE - Deployment Research Send-MailMessage (Microsoft.PowerShell.Utility) - PowerShell | Microsoft Learn How to run a PowerShell script as a Windows service – 4sysops Connect to Exchange Online with PowerShell - The Best Method Find the Full Windows Build Number with PowerShell How to find all the ADFS servers in your environment and run diagnostics against them | Michael de Blok How to find ADFS servers in the environment - Windows Server path - PowerShell script working directory (current location) - Stack Overflow How to get the path of the currently executing script in PowerShell [SOLVED] Unable to Delete Hyper-V VM Checkpoints - Spiceworks Published Applications – Carl Stalhood VMM Reporting | Aidan Finn, IT Pro Use PowerShell to search for string in registry keys and values - Stack Overflow Search for Uninstall Strings - Jose Espitia
INCOME AND SPEND SUMMARY: Monthly regular gift income (DD, CC, PayPal) INCOME AND SPEND SO AGENCT AND PAYROLL INCOME AND SPEND Non Monthly DD income INCOME AND SPEND SUMMARY Donation (50020) Income and Spend Summary Appeal (50050) INCOME AND SPEND SUMMARY:FAV Donation (50060) INCOME AND SPEND SUMMARY: In Memory Of (IMO) (50170) INCOME AND SPEND SUMMARY: Single Fundraised Sales (51040)+Books(51050) INCOME AND SPEND SUMMARY: Single Fundraised Sales (51040)+Books(51050) INCOME AND SPEND SUMMARY: Single Fundraised Sales (51040)+Books(51050) INCOME AND SPEND SUMMARY: Single Fundraised Sales (51040)+Books(51050) INCOME AND SPEND SUMMARY: Raffle ticket sales INCOME AND SPEND SUMMARY:51060 - Community Fundraising INCOME AND SPEND SUMMARY:50130 - Collecting Tins INCOME AND SPEND SUMMARY:50110 - Gift Aid REGULAR GIVING SUMMARY: Monthly regular gift payments CC/PP/DD SINGLE GIFT SUMMARY: Single gift payments (donations only) NEW SUPPORTER INCOME: Single gift payments NEW SUPPORTER INCOME: Monthly regular gift income NEW SUPPORTER INCOME: New monthly regular gifts established (new supporters only) NEW SUPPORTER INCOME: Single gift income (new supporters only) EXISTING BASE: INCOME FROM EXISTING REGULAR INSTRUCTIONS EXISTING BASE Monthly regular gift payments (CC/PP/DD) EXISTING BASE: Existing Non monthly DD income Existing Base other regular giving income EXISTINGBASE Cumulative monthly regular gift payments EXISTING BASE Monthly regular gift income from new regular donor EXISTING BASE single gift donations income - existing supporters EXISTING BASE Single gift donations EXISTING BASE: Single gift income - high value gifts Existing Base: Workplace giving income EXISTING BASE Volunteer fundraising & other cash income (Community Events, FB fundraiser, Lilo) EXISTING BASE: Gift aid income ES ES Monthly cc/pp/dd Income existing ES ES Monthly cc/pp/dd Payments existing Single Single gift under 500 Total Regular giving Existing WORKPLACE GIVING
Windows: 7/Vista/XP/2K tcp tunneling nbtscan: Multiple-OS command line utility: NETBIOS nameserver scanner Linux: Simulating slow internet connection Linux/Ubuntu: Changing gateway address and flush/restart network interface Linux: SSH Tunnel to local machines. Linux: Get my external ip address Linux/Ubuntu: Enable/Disable ping response Linux: Cron: Reset neorouter Linux/Ubuntu: sniff tcp communications (binary output) Liunux/Ubuntu: get a list of apps that are consuming bandwidth Linux/Ubuntu: iptables: block external outgoing ip address Linux/Ubuntu: How to setup pptp vpn server Linux: NGINX: Setup alias Linux: ssh without password Linux: NGINX: Proxy reverse Linux: one way remote sync using unison and ssh Linux: Open ssh port access using a combination (knocking technique) Linux: ssh login for only one user Linux/Ubuntu: Server: Configuring proxies Linux/Ubuntu: Share folder with Windows (via samba) Linux: Get all my local IP addresses (IPv4) Linux/Ubuntu: list ufw firewall rules without enabling it Linux/Ubuntu: Connect windows to shared folder as guest Linux/Ubuntu: Avoid connection from specific ip address without using iptables Linux: Telegram: Send telegram message to channel when user logged via ssh Linux/Ubuntu: nginx: Configuration to send request to another server by servername Modbus/ModPoll Linux/Neorouter: watchdog for neorouter connection Linux: libgdcm: Send dicom images to PACS using gdcmscu. Ubuntu: mount full rw cifs share Linux/Ubuntu: NGINX: Basic authentication access Linux/Ubuntu: Mosquitto: Enable mqtt service Linux: Detect if port is closed from command line Linux: Get internet content from command line using wget Mac OSX: Port redirection Windows 10: Port redirection/Tunneling Python: Telegram Bot API: Ask/Answer question PHP: Post a XML File using PHP without cURL Nginx: Call php without extension PHP: Send compressed data to be used on Javascript (i.e. Json data) PHP: Download file and detect user connection aborted Linux/Proxmox: Enable /dev/net/tun for hamachi PHP: using curl for get and post NGINX: Creating .htpasswd
Linux: Free unused cache memory Linux: mounting VirtualBox VDI disk using qemu nbtscan: Multiple-OS command line utility: NETBIOS nameserver scanner Linux: Saving one or more webpages to pdf file Linux: Creating iso image from disk using one line bash command Linux/PostgreSQL: Getting service uptime Linux: Simulating slow internet connection Linux/Ubuntu: Changing gateway address and flush/restart network interface Linux: SSH Tunnel to local machines. Linux: Fast find text in specific files using wild cards Linux: Merging two or more pdf files into one, by using ghostscript Linux: Cron command for deleting old files (older than n days) Linux: Get my external ip address Linux: Get the size of a folder Linux: Get the size of a folder Linux: Get the size of a folder Lazarus/Linux: Connect to SQLServer using ZeosLib component TZConnection Linux/Ubuntu: Get ordered list of all installed packages Linux/Ubuntu: Enable/Disable ping response Linux/DICOM: Very small DICOM Server Linux: Cron: Reset neorouter Linux/Oracle: Startup script Linux/Ubuntu: detect if CD/DVD disk ispresent and is writeable Linux/PHP: Let www-data run other commands (permission) Linux/DICOM: Create DICOM Video Linux: Apply same command for multiple files Linux/Ubuntu: sniff tcp communications (binary output) Linux: rsync: Backup remote folder into local folder Linux: Installing Conquest Dicom Server Linux: Get number of pages of PDF document via command line Linux: split file into pieces and join pieces again Linux/Ubuntu: iptables: block external incoming ip address Liunux/Ubuntu: get a list of apps that are consuming bandwidth Linux/Ubuntu: iptables: block external outgoing ip address Linux/DICOM: dcmtk: Modify data in dicom folder Linux/Docker: save/load container using tgz file (tar.gz) Linx/Ubuntu: solve problem apt-get when proxy authentication is required Docker: Clean all Linux: ImageMagick: convert first page of pdf document to small jpeg preview Linux: Convert pdf to html PostgreSQL: backup/restore remote database with pg_dump Linux/Ubuntu: How to setup pptp vpn server Linux/Xubuntu: Solve HDMI disconnection caused by non-supported resolution Linux: List all users PostgreSQL: Log all queries Linux: NGINX: Setup alias Linux: ssh without password Linux: NGINX: Proxy reverse Linux: one way remote sync using unison and ssh Linux: Deleting files keeping only lastest n-files in specific folder Linux: Open ssh port access using a combination (knocking technique) Linux: Get Memory percentages. Linux: Can not sudo, unkown user root (solution) Linux: PDF: How to control pdf file size Linux: ssh login for only one user Linux: get pid of process who launch another process Linux: PHP: Fix pm.max server reached max children Linux/Ubuntu: Server: Configuring proxies Linux: Compare two files displaying differences Sox: Managing audio recording and playing Linux: VirtualBox: Explore VDI disk without running virtualbox Linux: Get machine unique ID Linux: rsync only files earlier than N days Linux: Create Virtual Filesystem Linux/Ubuntu: Server: Add disks to lvm Python/Ubuntu: connect to sqlserver and oracle Linux/Ubuntu: Share folder with Windows (via samba) Linux: Get all my local IP addresses (IPv4) Linux/Ubuntu: list ufw firewall rules without enabling it Linux/Ubuntu: Connect windows to shared folder as guest Linux: delete tons of files from folder with one command Linux/Ubuntu: Avoid connection from specific ip address without using iptables Linux: Telegram: Send telegram message to channel when user logged via ssh Linux/Ubuntu: Create barcode from command line. Linux: PHP/Python: Install python dependencies for python scripts and run scripts form php Linux/Ubuntu: nginx: Configuration to send request to another server by servername Linux/Ubuntu: Fix Imagemagick "not authorized" exception Linux/Neorouter: watchdog for neorouter connection Linux: libgdcm: Send dicom images to PACS using gdcmscu. Ubuntu: mount full rw cifs share Linux/Ubuntu: NGINX: Basic authentication access PostgreSQL: Backup/Restore database from/to postgres docker. Linux/Ubuntu: Mosquitto: Enable mqtt service Linux/PHP: Connect php7.0 with sqlserver using pdo. Linux: Detect if port is closed from command line Linux: PHP: Run shell command without waiting for output (untested) Linux/Ubuntu: OS Installation date Linux/Ubuntu: Join pdf files from command line using pdftk Linux: Get internet content from command line using wget Linux/PHP/SQL: Ubuntu/Php/Sybase: Connecting sysbase database with php7 Linux/Ubuntu: Solve LC_ALL file not found error Linux/Ubuntu: Run window program with wine headless on server Linux/Docker: List ip addresses of all containers. Linux: sysmon script (memory usage) Linux: Firebird: Create admin user from command line Linux/Firebird: Backup/Restore database Git: Update folder linux/dfm-to-json/docker Linux/Oracle/Docker: 19c-ee Linux/Docker: SQL Server in docker Linux/PHP/Docker: Run docker command from php/web Linux/Docker: Oracle 12-ee docker Linux: Oracle: Backup using expdp Linux/PHP/NGINX: Increase timeout Lazarus/Fastreport: Install on Linux Linux/Ubuntu: fswatch: watch file changes in folder Linux/Docker: SQL Server 2019 in docker Linux/Docker: SQLServer: mssql-scripter: backup/restore Linux/Ubuntu: Enable/disable screensaver Linux: SQLServer: Detect MDF version Linux/Docker: Oracle install 19c (II) on docker FirebirdSQL: Restore/Backup Linux/NGINX: Redirect to another server/port by domain name Linux/Proxmox: Enable /dev/net/tun for hamachi Linux/Ubuntu: Create sudoer user Linux: PDF-url to text without downloading pdf file Docker: Reset logs
Linux/PostgreSQL: Getting service uptime Lazarus/Linux: Connect to SQLServer using ZeosLib component TZConnection Linux/Oracle: Startup script PostgreSQL: backup/restore remote database with pg_dump PostgreSQL: Log all queries PostgreSQL: Create DBLINK PostgreSQL: Database replication Python/Ubuntu: connect to sqlserver and oracle Oracle/SQL: Generate range of dates PostgreSQL: Convert records to json string and revert to records PostgreSQL: Extract function DDL PostgreSQL: Backup/Restore database from/to postgres docker. Linux/PHP: Connect php7.0 with sqlserver using pdo. PostgreSQL: Trigger template PostgreSQL: Count all records in spite of using offset/limit PHP/SQL: Reverse SQL Order in sentence PostgreSQL: Filter using ilike and string with spaces PostgreSQL: Create log table and trigger Postgres: Get string all matches between {} Linux/PHP/SQL: Ubuntu/Php/Sybase: Connecting sysbase database with php7 PostgreSQL: Must know PostgreSQL: Debito, Credito, Saldo PostgreSQL: Count total rows when range is empty PostgreSQL: Extremely fast text search using tsvector PostgreSQL: Create a copy of DB in same host PHP/PostgreSQL: Event Listener Linux: Firebird: Create admin user from command line SQL: CTE Parent-child recursive Windows/Docker/Firebird: Backup remote database Linux/Firebird: Backup/Restore database PostgreSQL: Set search path (schemas) for user Firebird on Docker Firebird: Find holes in sequence (missing number) Linux/Oracle/Docker: 19c-ee Firebird: Create an Array of integers from String Oracle: Change Sys/System user pasword Oracle: Create/drop tablespace Oracle: Create User/Schema Linux: Oracle: Backup using expdp Linux/Docker: SQL Server 2019 in docker Oracle: Get slow queries Linux: SQLServer: Detect MDF version Linux/Docker: Oracle install 19c (II) on docker FirebirdSQL: Restore/Backup Firebird: Age Calculation using Ymd format Postgresql: Age calculation to Ymd Firebird: Create Range of dates, fraction in days, hours, minutes, months or years Firebird: DATE_TO_CHAR Function (like Oracle's TO_CHAR) MS SQLServer Backup Database MS SQLServer: Detect long duration SQL sentences Firebird: Fast Search Firebird: Code Generator FirebirdSQL 2.x: DATE_TO_CHAR Procedure (like Oracle's TO_CHAR) MSSQLServer: Sequences using Date based prefix format
CSS: Setup a perfect wallpaper using background image CSS: Ellipsis at end of long string in element Javascript: Is my browser on line? CSS: Crossbrowser blur effect Javascript: calculating similarity % between two string Javascript: vanilla and crossbrowser event handler accepting arguments Javascript: convert native browser event to jQuery event. Linux: Convert pdf to html Javascript: Convert proper name to Capital/Title Case Javascript: Disable Back Button - (untested) HTML/CSS: Login Page HTML/CSS: Circular progress bar using css HTML/CSS: Data column organized as top-down-right structure HTML/CSS: Printing page in letter size Javascript: Get file name from fullpath. HTML/CSS: Header/Body/Footer layout using flex Windows: Chrome: Avoid prompt on custom url calling by changing registry Javascript: Get filename and path from full path filename Javascript: Clone array/object. CSS + FontAwesome: Battery charging animation CSS: spin element HTML/CSS: Switch with input CSS: Transparent event clicks having translucid front div element CSS: Blurry Glass Effect CSS: Grow element size when mouse hover Javascript/jQuery: Input with fixed prefix. Javascript: ProtocolCheck Javascript: Beep Telegram: Chat/html (personal) PHP: php-imagick: Thumbnail from thispersondoesnotexists Javascript: Get host info Javascript: Vanilla async get HTML/CSS: Rotating circle loader PHP: Post JSON object to API without curl Javascript: Post data to new window. CSS/Android Ripple Effect in Pure CSS OSRM/API: OpenStreet Maps Calculate distance between points OSM/OpenLayers: Place marker on coordinates using custom image. PHP: Send compressed data to be used on Javascript (i.e. Json data) PHP/JSignature: Base30 to PNG Javascript: Query Params to JSON Javascript/CSS: Ripple Effect - Vanilla Delphi/UniGUI: Disable load animation PHP: Send basic authentication credentials Delphi: Post JSON to API PHP: Receiving Bearer Authorization token from header Linux/NGINX: Redirect to another server/port by domain name Javascript: Async/Await Web Programming: Fastest way for appending DOM elements CSS: Animated progress bar CSS: Shake element CSS: Align elements like windows explorer icons layout style Javascript: Submit JSON object from form values Javascript: Fetch POST JSON Linux: PDF-url to text without downloading pdf file Javascript/in Browser: Jump to anchor Javascript/NodeJS: Uglify js files CSS: Two of the best readable fonts in web CSS: Dark/Theater background Javascript: Detect inactivity / idle time Svelte: Dynamic component rendering CSS: Responsive Grid NGINX: Creating .htpasswd Javascript: Wait until all rendered Javascript: Print PDF directly having URL Sveltekit: Create component dynamically Sveltekit: Create component dynamically Sveltekit: Import component dynamically CSS: Animation/Gelatine
substr(): It takes two arguments, the starting index and number of characters to slice. substring(): It takes two arguments, the starting index and the stopping index but it doesn't include the character at the stopping index. split(): The split method splits a string at a specified place. includes(): It takes a substring argument and it checks if substring argument exists in the string. includes() returns a boolean. If a substring exist in a string, it returns true, otherwise it returns false. replace(): takes as a parameter the old substring and a new substring. replace(): takes as a parameter the old substring and a new substring. charAt(): Takes index and it returns the value at that index indexOf(): Takes a substring and if the substring exists in a string it returns the first position of the substring if does not exist it returns -1 lastIndexOf(): Takes a substring and if the substring exists in a string it returns the last position of the substring if it does not exist it returns -1 concat(): it takes many substrings and joins them. startsWith: it takes a substring as an argument and it checks if the string starts with that specified substring. It returns a boolean(true or false). endsWith: it takes a substring as an argument and it checks if the string ends with that specified substring. It returns a boolean(true or false). search: it takes a substring as an argument and it returns the index of the first match. The search value can be a string or a regular expression pattern. match: it takes a substring or regular expression pattern as an argument and it returns an array if there is match if not it returns null. Let us see how a regular expression pattern looks like. It starts with / sign and ends with / sign. repeat(): it takes a number as argument and it returns the repeated version of the string. Concatenating array using concat indexOf:To check if an item exist in an array. If it exists it returns the index else it returns -1. lastIndexOf: It gives the position of the last item in the array. If it exist, it returns the index else it returns -1. includes:To check if an item exist in an array. If it exist it returns the true else it returns false. Array.isArray:To check if the data type is an array toString:Converts array to string join: It is used to join the elements of the array, the argument we passed in the join method will be joined in the array and return as a string. By default, it joins with a comma, but we can pass different string parameter which can be joined between the items. Slice: To cut out a multiple items in range. It takes two parameters:starting and ending position. It doesn't include the ending position. Splice: It takes three parameters:Starting position, number of times to be removed and number of items to be added. Push: adding item in the end. To add item to the end of an existing array we use the push method. pop: Removing item in the end shift: Removing one array element in the beginning of the array. unshift: Adding array element in the beginning of the array. for of loop Unlimited number of parameters in regular function Unlimited number of parameters in arrow function Expression functions are anonymous functions. After we create a function without a name and we assign it to a variable. To return a value from the function we should call the variable. Self invoking functions are anonymous functions which do not need to be called to return a value. Arrow Function Object.assign: To copy an object without modifying the original object Object.keys: To get the keys or properties of an object as an array Object.values:To get values of an object as an array Object.entries:To get the keys and values in an array hasOwnProperty: To check if a specific key or property exist in an object forEach: Iterate an array elements. We use forEach only with arrays. It takes a callback function with elements, index parameter and array itself. The index and the array optional. map: Iterate an array elements and modify the array elements. It takes a callback function with elements, index , array parameter and return a new array. Filter: Filter out items which full fill filtering conditions and return a new array reduce: Reduce takes a callback function. The call back function takes accumulator, current, and optional initial value as a parameter and returns a single value. It is a good practice to define an initial value for the accumulator value. If we do not specify this parameter, by default accumulator will get array first value. If our array is an empty array, then Javascript will throw an error every: Check if all the elements are similar in one aspect. It returns boolean find: Return the first element which satisfies the condition findIndex: Return the position of the first element which satisfies the condition some: Check if some of the elements are similar in one aspect. It returns boolean sort: The sort methods arranges the array elements either ascending or descending order. By default, the sort() method sorts values as strings.This works well for string array items but not for numbers. If number values are sorted as strings and it give us wrong result. Sort method modify the original array. use a compare call back function inside the sort method, which return a negative, zero or positive. Whenever we sort objects in an array, we use the object key to compare. Destructing Arrays : If we like to skip on of the values in the array we use additional comma. The comma helps to omit the value at that specific index
openssh GitLab.com / GitLab Infrastructure Team / next.gitlab.com · GitLab Use Apple touch icon | webhint documentation How to get GPU Rasterization How to get GPU Rasterization Migrating to Manifest V3 - Chrome Developers Migrating to Manifest V3 - Chrome Developers Manifest - Web Accessible Resources - Chrome Developers chrome.webRequest - Chrome Developers chrome.webRequest - Chrome Developers Cross-site scripting – Wikipedia Cross-site scripting – Wikipedia Cross-site scripting – Wikipedia Cross-site scripting – Wikipedia Cross-site scripting – Wikipedia Cross-site scripting – Wikipedia Export-ModuleMember (Microsoft.PowerShell.Core) - PowerShell | Microsoft Learn Sourcegraph GraphQL API - Sourcegraph docs Winge19/vscode-abl: An extension for VS Code which provides support for the Progress OpenEdge ABL language. https://marketplace.visualstudio.com/items?itemName=chriscamicas.openedge-abl Winge19/vscode-abl: An extension for VS Code which provides support for the Progress OpenEdge ABL language. https://marketplace.visualstudio.com/items?itemName=chriscamicas.openedge-abl Winge19/vscode-abl: An extension for VS Code which provides support for the Progress OpenEdge ABL language. https://marketplace.visualstudio.com/items?itemName=chriscamicas.openedge-abl New File Cache · Actions · GitHub Marketplace Cache · Actions · GitHub Marketplace Winge19/cache: Cache dependencies and build outputs in GitHub Actions Winge19/cache: Cache dependencies and build outputs in GitHub Actions Winge19/cache: Cache dependencies and build outputs in GitHub Actions Winge19/cache: Cache dependencies and build outputs in GitHub Actions Winge19/cache: Cache dependencies and build outputs in GitHub Actions history.state during a bfcache traversal · web-platform-tests/wpt@7d60342 Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn Configure CI/CD pipeline with YAML file - MSIX | Microsoft Learn ASP.NET Core 6.0 Blazor Server APP and Working with MySQL DB - CodeProject json Process Herpaderping – Windows Defender Evasion | Pentest Laboratories 0x7c13/Notepads: A modern, lightweight text editor with a minimalist design. Share data - UWP applications | Microsoft Learn What is ie_to_edge_bho_64.dll? What is ie_to_edge_bho_64.dll? What is ie_to_edge_bho_64.dll? What is ie_to_edge_bho_64.dll? Message passing - Chrome Developers
parsing - Parse (split) a string in C++ using string delimiter (standard C++) - Stack Overflow parsing - Parse (split) a string in C++ using string delimiter (standard C++) - Stack Overflow parsing - Parse (split) a string in C++ using string delimiter (standard C++) - Stack Overflow parsing - Parse (split) a string in C++ using string delimiter (standard C++) - Stack Overflow arrays - Convert a hexadecimal to a float and viceversa in C - Stack Overflow arrays - Convert a hexadecimal to a float and viceversa in C - Stack Overflow Why does C++ require breaks in switch statements? - Stack Overflow Why does C++ require breaks in switch statements? - Stack Overflow Why does C++ require breaks in switch statements? - Stack Overflow coding style - Switch statement fall-through...should it be allowed? - Stack Overflow performance - Convert a hexadecimal string to an integer efficiently in C? - Stack Overflow C,C++ ---结构体指针初始化_zlQ_的博客-CSDN博客_c++初始化结构体指针 c++ - C++ 返回局部变量的常引用 - SegmentFault 思否 (23条消息) C++ 去掉const_最后冰吻free的博客-CSDN博客_c++ 去掉const (23条消息) 尾置返回值类型decltype_最后冰吻free的博客-CSDN博客 (23条消息) 变参模板函数_最后冰吻free的博客-CSDN博客_变参模板 (23条消息) 变参表达式_最后冰吻free的博客-CSDN博客 (23条消息) 变参下标_最后冰吻free的博客-CSDN博客 (23条消息) 变参基类_最后冰吻free的博客-CSDN博客 (23条消息) typname 使用_最后冰吻free的博客-CSDN博客 (23条消息) 零初始化_最后冰吻free的博客-CSDN博客 (23条消息) this->使用_最后冰吻free的博客-CSDN博客 (23条消息) 变量模板_最后冰吻free的博客-CSDN博客_变量模板 (23条消息) enable_if使用_最后冰吻free的博客-CSDN博客 (23条消息) 完美转发函数_最后冰吻free的博客-CSDN博客 (23条消息) C++ 函数返回局部变量地址和引用_最后冰吻free的博客-CSDN博客_c++函数返回地址 (23条消息) C++ 函数返回局部变量地址和引用_最后冰吻free的博客-CSDN博客_c++函数返回地址 (23条消息) C++ 函数返回局部变量地址和引用_最后冰吻free的博客-CSDN博客_c++函数返回地址 结构体数组定义时初始化 cJSON的数据结构 C++共用体与结构体区别-C++ union与struct的区别-嗨客网 C++共用体与结构体区别-C++ union与struct的区别-嗨客网 队列的c语言实现_51CTO博客_c语言实现队列 栈的实现 c语言版_51CTO博客_c语言栈的实现以及操作 【专业技术】如何写出优美的C 代码? - 腾讯云开发者社区-腾讯云 【专业技术】如何写出优美的C 代码? - 腾讯云开发者社区-腾讯云 C++ short-C++短整型-C++ short取值范围-嗨客网 C++ short-C++短整型-C++ short取值范围-嗨客网 C++ long long-C++长长整型-C++ long long取值范围-嗨客网 C++ long long-C++长长整型-C++ long long取值范围-嗨客网 C++字符-C++ char-C++字符取值范围-嗨客网 C++枚举enum-C++怎么定义枚举变量-C++枚举的作用-嗨客网 C++三目运算符-C++的三目运算符-C++三目运算符怎么用-什么是三目运算符-嗨客网 C++打印乘法表-嗨客网 C++ while循环打印乘法表-嗨客网 C++ do while循环打印乘法表-嗨客网 (转)sizeof()和_countof()区别 - 榕树下的愿望 - 博客园 详述CRC校验码(附代码)-面包板社区 详述CRC校验码(附代码)-面包板社区 详述CRC校验码(附代码)-面包板社区 C program to convert Hexadecimal to Decimal - Aticleworld Conversion of Hex decimal to integer value using C language (27条消息) vector<char>太慢,自己造一个CharVector_char vector_飞鸟真人的博客-CSDN博客 C++ windows显示器相关信息获取 - 艺文笔记
Q64 Snapshot Array - LeetCode Q63 Reorganize String - LeetCode Q62 Tricky Sorting Cost | Practice | GeeksforGeeks Q62 Minimum Cost To Connect Sticks Q60 PepCoding | Longest Substring With At Most Two Distinct Characters Q59 PepCoding | Line Reflection Q58 Pairs of Non Coinciding Points | Practice | GeeksforGeeks Q57 Avoid Flood in The City - LeetCode Q56 Random Pick with Blacklist - LeetCode Q55 Insert Delete GetRandom O(1) - Duplicates allowed - LeetCode Q55 Insert Delete GetRandom O(1) - Duplicates allowed - LeetCode Q54 Insert Delete GetRandom O(1) - LeetCode Q53 The Skyline Problem - LeetCode Q52 Encode and Decode TinyURL - LeetCode Q51 Maximum Frequency Stack - LeetCode Q50 Brick Wall - LeetCode Q50 Brick Wall - LeetCode Q49 X of a Kind in a Deck of Cards - LeetCode Q48 First Unique Character in a String - LeetCode Q47 Subdomain Visit Count - LeetCode Q46 Powerful Integers - LeetCode Q45 4Sum II - LeetCode Q44 PepCoding | Quadruplet Sum QFind K Pairs with Smallest Sums - LeetCode Q43 PepCoding | Pairs With Given Sum In Two Sorted Matrices Q42 Completing tasks | Practice | GeeksforGeeks Q41 Degree of an Array - LeetCode Q-40 Can Make Arithmetic Progression From Sequence - LeetCode Q39 PepCoding | Double Pair Array Q38 Rabbits in Forest - LeetCode Q-37* Fraction to Recurring Decimal - LeetCode Q36 PepCoding | Pairs With Equal Sum Q35 PepCoding | Count Of Subarrays With Equal Number Of 0s 1s And 2s Q34 PepCoding | Longest Subarray With Equal Number Of 0s 1s And 2s Q-34PepCoding | Pairs With Equal Sum Q33 PepCoding | Count Of Subarrays With Equal Number Of Zeroes And Ones Q32 Contiguous Array - LeetCode Q31 Subarray Sums Divisible by K - LeetCode Q30 PepCoding | Longest Subarray With Sum Divisible By K Q29 Subarray Sum Equals K - LeetCode Q27 Word Pattern - LeetCode Q-26 Isomorphic Strings - LeetCode Q-25 PepCoding | Group Shifted String Q24 Group Anagrams - LeetCode Q23 Valid Anagram - LeetCode Q22 PepCoding | Find Anagram Mappings Q21 PepCoding | K Anagrams Q20 Find All Anagrams in a String - LeetCode Q-19 Binary String With Substrings Representing 1 To N - LeetCode Q-18 PepCoding | Count Of Substrings Having At Most K Unique Characters Q-17 PepCoding | Longest Substring With At Most K Unique Characters Q-16 PepCoding | Maximum Consecutive Ones - 2 Q15 PepCoding | Maximum Consecutive Ones - 1 Q-14 PepCoding | Equivalent Subarrays Q13 PepCoding | Count Of Substrings With Exactly K Unique Characters Q-12 PepCoding | Longest Substring With Exactly K Unique Characters Q-11 PepCoding | Count Of Substrings Having All Unique Characters Q-10 PepCoding | Longest Substring With Non Repeating Characters Q-9 PepCoding | Smallest Substring Of A String Containing All Unique Characters Of Itself Q8 PepCoding | Smallest Substring Of A String Containing All Characters Of Another String | leetcode76 Q-7 PepCoding | Largest Subarray With Contiguous Elements Q-6 PepCoding | Count Of All Subarrays With Zero Sum Q5 PepCoding | Largest Subarray With Zero Sum Q4 PepCoding | Count Distinct Elements In Every Window Of Size K Q-3 PepCoding | Check If An Array Can Be Divided Into Pairs Whose Sum Is Divisible By K Q2 PepCoding | Find Itinerary From Tickets Q1 PepCoding | Number Of Employees Under Every Manager 2653. Sliding Subarray Beauty
DSA 1.8 : Pointers DSA-1.8 : Pointers DSA-1.8 : Pointers DSA 1.8 : Pointers DSA 1.10 : Reference DSA 1.12 - Pointer to structure DSA 1.12 - pointer to structure DSA 1.15 : Paramter passing method : by value DSA 1.15 : Parameter passing method- by address DSA 1.15 : parameter passing method -by reference DSA 1.18 : returning array from a function DSA 1.20 : pointer to structure DSA 1.23 : monolithic program DSA 1.24 : procedural or modular programming DSA 1.25 : procedural programming using structure and functions DSA 1.26 : Object Oriented programming approach DSA 1.30 : template classes DSA 5.52 Recursion using static variable DSA 5.56 : tree recursion DSA 5.58 : Indirect recursion DSA 5.56 : Nested recursion DSA 5.68 : Taylor series using recursion DSA 5.70 : Taylors series using Horner's rule DSA 5.73 : Fibonacci using iteration DSA 5.73 : Fibonacci using recursion DSA 5.73 : Fibonacci using memoization and recursion DSA 5.75 : nCr using recursion DSA 5.76 : Tower of Hanoi DSA 7 : array ADT DSA 7.99 - Delete function in an array DSA 7.102 : Linear Search DSA 146 : C++ class for Diagonal matrix DSA 150 : Lower Triangular matrix Diagonal matrix full code Creation of sparse matrix 175. Display for linked list 176. Recursive display for linked list 178 : counting nodes in a linked list 179: sum of all elements in a linked list 181: find the largest element in the linked list 183: searching for a value in linked list 184: Improve searching in a linked list 186: Inserting a new node in a linked list (logic) 186: Insertion in a linked list (function) 189: Creating a linked list by inserting at end 191: Inserting in a sorted linked list 192: deleting a node from a linked list 195 : check if the linked list is sorted or not 197: Remove duplicates from sorted linked list
Q66 Distinct Echo Substrings - LeetCode 1316 (rabin karp rolling hash -> O(n^2)) Q66 Distinct Echo Substrings - LeetCode 1316(O(n^3) solution) Q65 Interleaving String - LeetCode 97 Q64 Frog Jump - LeetCode 403 Q63 Champagne Tower - LeetCode 799 Q62 Super Ugly Number - LeetCode 313 Q61 Ugly Number 2 - LeetCode 264 Q60 Minimum Insertion Steps to Make a String Palindrome - LeetCode 1316 Q59 Temple Offerings | Practice | GeeksforGeeks Q58 Word Break - LeetCode 139 Q57 Arithmetic Slices II - Subsequence - LeetCode 446 Q56 Arithmetic Slices - LeetCode 413 Q55 Max sum of M non-overlapping subarrays of size K - GeeksforGeeks (tabulization) Q55 Max sum of M non-overlapping subarrays of size K - GeeksforGeeks (memoization) Q54 Maximum Sum of 3 Non-Overlapping Subarrays - LeetCode 689 Q53 Maximum Sum of Two Non-Overlapping Subarrays - LeetCode 1031 Q52 Maximum difference of zeros and ones in binary string | Practice | GeeksforGeeks Q51 Mobile numeric keypad | Practice | GeeksforGeeks Q50 Distinct Transformation LeetCode Playground ( tabulization approach) Q50 - Distinct Transformation- LeetCode Playground ( recursion + memoization approach) Q49 Highway BillBoards - Coding Ninjas Codestudio approach 2 Q49 Highway BillBoards - Coding Ninjas Codestudio (approach 1 LIS) Q48 Knight Probability in Chessboard - LeetCode 688 Q47 Cherry Pickup - LeetCode 741 (recursion approach) Q46 Super Egg Drop - LeetCode 887 Q45 Predict the Winner - LeetCode 486 Q45 Optimal Strategy For A Game | Practice | GeeksforGeeks | leetcode 46 Q44 Largest Sum Subarray of Size at least K | Practice | GeeksforGeeks Q42 Maximum Subarray - LeetCode 53 Q41 Minimum Cost To Make Two Strings Identical | Practice | GeeksforGeeks Q40 Minimum ASCII Delete Sum for Two Strings - LeetCode 712 Q39 Scramble String - LeetCode 87 Q38 Edit Distance - LeetCode 72 Q37 Regular Expression Matching - LeetCode 10 Q36 Wildcard Matching - LeetCode Q35 Longest Repeating Subsequence | Practice | GeeksforGeeks Q34 Longest Common Substring | Practice | GeeksforGeeks Q33 Count Different Palindromic Subsequences - LeetCode 730 Q32 Number of distinct subsequences | Practice | GeeksforGeeks Q31 Longest Palindromic Substring - LeetCode Q30 Count Palindromic Subsequences | Practice | GeeksforGeeks Q29 Longest Palindromic Subsequence - LeetCode 516 Q28 Longest Common Subsequence - LeetCode 1143 Q27 Minimum Score Triangulation of Polygon - LeetCode 1039 Q26 Optimal binary search tree | Practice | GeeksforGeeks Q24 Matrix Chain Multiplication | Practice | GeeksforGeeks Q23 Palindrome Partitioning II - LeetCode 132 Q23 Palindrome Partitioning II - LeetCode - 132 ( n^3 approach) Q22 Palindromic Substrings - LeetCode 647 Q21 Rod Cutting | Practice | GeeksforGeeks Q20 Minimum Score Triangulation of Polygon - LeetCode 1039 Q19 Intersecting Chords in a Circle | Interviewbit Q18 Generate Parentheses - LeetCode 22 Q17 PepCoding | Count Of Valleys And Mountains Q16 Unique Binary Search Trees - LeetCode 96 Q15 Catalan Number Minimum Score of a Path Between Two Cities - 2492 Q14 Perfect Squares - LeetCode Q13 Russian Doll Envelopes - LeetCode (LIS in NlogN - accepted solution) Q13 Russian Doll Envelopes - LeetCode 354 solution1(LIS in O(n^2)) Q12 PepCoding | Maximum Non-overlapping Bridges Q11 Longest Bitonic subsequence | Practice | GeeksforGeeks Q10 Maximum sum increasing subsequence | Practice | GeeksforGeeks Q9 PepCoding | Print All Longest Increasing Subsequences Q8 Longest Increasing Subsequence - LeetCode 300 Q7 2 Keys Keyboard - LeetCode 650 Q-6 PepCoding | Print All Results In 0-1 Knapsack Q5 PepCoding | Print All Paths With Target Sum Subset Q4 PepCoding | Print All Paths With Maximum Gold Q3 PepCoding | Print All Paths With Minimum Cost Q2 Jump Game II - LeetCode 45 Q1 Maximal Square - LeetCode 221 Q67 Longest Increasing Subsequence - LeetCode 300 ( LIS O(nlogn) solution) Q43 K-Concatenation Maximum Sum - LeetCode 1191 Q13 Russian Doll Envelopes - LeetCode 354 ( LIS -> O(nlogn) solution) Q25 Burst Balloons - LeetCode 312
Minimum Score of a Path Between Two Cities - 2492 Number of Operations to Make Network Connected - 1319 Q42 Mother Vertex | Interviewbit (kosraju) Q41 Count Strongly Connected Components (Kosaraju’s Algorithm) Q40 Leetcode 734. Sentence Similarity Q39 Satisfiability of Equality Equations - LeetCode 990 Q38 Redundant Connection II - LeetCode 685 Q37 Redundant Connection - LeetCode 684 Q36 Minimize Malware Spread II - LeetCode 928 Q35 Minimize Malware Spread - LeetCode 924 Q34 Accounts Merge - LeetCode 721 Q33 Minimize Hamming Distance After Swap Operations - LeetCode 1722 Q32 Rank Transform of a Matrix - LeetCode 1632 Q32 Reconstruct Itinerary - leetcode 332 (eularian path && Eularian cycle) Q31 Regions Cut By Slashes - LeetCode 959 Q30 Minimum Spanning Tree | Practice | GeeksforGeeks (kruskal algo) Q29 Number of Islands II - Coding Ninjas (DSU) Q28 Remove Max Number of Edges to Keep Graph Fully Traversable - LeetCode 1579 Q27 Checking Existence of Edge Length Limited Paths - LeetCode 1675 Q26 Network Delay Time - LeetCode 743 Q25 Cheapest Flights Within K Stops - LeetCode 787 Q24 Distance from the Source (Bellman-Ford Algorithm) | Practice | GeeksforGeeks Q23 Connecting Cities With Minimum Cost - Coding Ninjas Q22 Swim in Rising Water - LeetCode 778 Q21 Water Supply In A Village - Coding Ninjas Q20 Minimum Spanning Tree | Practice | GeeksforGeeks(prims algo) Q19 Alien Dictionary | Practice | GeeksforGeeks Q18 Course Schedule - LeetCode 207 (kahn's algorithm) Q17 Minimum edges(0-1 BFS) | Practice | GeeksforGeeks Q17 Minimum edges(0-1 BFS) | Practice | GeeksforGeeks ( using djikstra) Q16 Sliding Puzzle - LeetCode 773 Q15 Bus Routes - LeetCode 815 Q14 Shortest Bridge - LeetCode 934 (without pair class) Q14 Shortest Bridge - LeetCode 934 ( with pair class) Q 13 As Far from Land as Possible - LeetCode 1120 Q12 Rotting Oranges - LeetCode 994 Q11 01 Matrix - LeetCode 542 Q10 Number of Distinct Islands | Practice | GeeksforGeeks Q9 Number of Enclaves - LeetCode 1085 Q8 Coloring A Border - LeetCode 1034 Q7 Unique Paths II - 63 Q6 Unique Paths III - LeetCode 980 Q5 Number of Provinces - LeetCode 547 Q4 Number of Islands - LeetCode 200 Q3 Number of Operations to Make Network Connected - LeetCode 1319 Q2 All Paths From Source to Target - LeetCode 797 Q1 Find if Path Exists in Graph - LeetCode 1971 Q43 is Cycle present in DAG ? GFG ( Topological Sort) Q43 is cycle present in DAG ? GFG (using kahns algo) Q44 Bellman ford | GFG ( Smaller code) Q45 Minimum Cost to Make at Least One Valid Path in a Grid - LeetCode 1368
chapter2-code-1 chapter2-code-2 chapter2-code-3 chapter3-code-1 chapter4-code-1 chapter4-code-2 chapter4-code-3 chapter4-code-4 chapter4-code-5 chapter4-code-6 chapter4-code-7 chapter4-code-8 chapter4-code-9 chapter4-code-10 chapter5-code-1 chapter5-code-2 chapter5-code-3 chapter6-code-1 chapter6-code-2 chapter6-code-3 chapter7-code-1 chapter7-code-2 chapter7-code-3 chapter7-code-4 chapter7-code-5 chapter7-code-6 chapter7-code-7 chapter7-code-8 chapter7-code-9 chapter7-code-10 chapter7-code-11 chapter7-code-12 chapter7-code-13 chapter8-code-1 chapter8-code-2 chapter8-code-3 chapter8-code-4 chapter9-code-1 chapter9-code-2 chapter9-code-3 chapter9-code-4 chapter10-code-1 chapter10-code-2 chapter10-code-3 chapter10-code-4 chapter10-code-5 chapter11-code-1 chapter11-code2 chapter11-code-3 chapter11-code-4 chapter11-code-5 chapter11-code-6 chapter12-code-1 chapter12-code-2 chapter13-code-1 chapter13-code-2 chapter13-code-3 chapter13-code-4 chapter13-code-5 chapter14-code-1 chapter14-code-2 chapter14-code-3 chapter14-code-4 chapter15-code-1 chapter15-code-2 chapter16-code-1 chapter16-code-2 chapter16-code-3 chapter16-code-4 chapter16-code-5 chapter16-code-6 chapter16-code-7 chapter16-code-8 chapter16-code-9 chapter16-code-10 chapter17-code-1 chapter17-code-2 chapter18-code-1 chapter18-code-2 chapter18-code-3 chapter18-code-4 chapter18-code-5 chapter19-code-1 chapter19-code-2 chapter19-code-3 chapter20-code-1 chapter21-code-1 chapter21-code-2 chapter21-code-3 chapter21-code-4 chapter21-code-5 chapter22-code-1 chapter22-code-2 chapter22-code-3 chapter23-code-1 chapter23-code-2 chapter23-code-3 chapter23-code-4 chapter23-code-5 chapter24-code-1 chapter24-code-2 chapter24-code-3 chapter25-code-1 chapter25-code-2 chapter25-code-3 chapter25-code-4 chapter25-code-5 chapter25-code-6 chapter25-code-7 chapter25-code-8 chapter25-code-9 chapter26-code-1 chapter26-code-2 chapter27-code-1 chapter27-code-2 chapter28-code-1 chapter28-code-2 chapter29-code-1 chapter11-code-2 chapter1-code-1
Display Custom Post By Custom Texonomy Hide Section If Data Empty Custom post Url Change Social Share Link Genrator / meta tag OG Set Custom post type template Custom post by current category page List all Category Loop Reverse Reverse Array in custom repeat fields Display custom texonomy name into post WP Rocket Delay JavaScript execution Files Post FIlter By Texonomy URL Check and echo active exclude current post from single page Post Sort by Custom Taxonomy in Admin Ajex select change texonomy and post Wordpress Ajex select Get Category Name (wp get object()) List Display Custom Taxonomy Order Date Woocommerce Disable notification for plugin update Custom Ajax Form for WP Order Expiry Woocommerce Post By Post Taxonomy Hide Section If Data Empty Product SKU Search with default search Hide WP Version Disable REST API Get Post Data with Post or page id (WCK CTP Select field) Custom API For Page with custom fields Disable WordPress Update Notifications Create Users Automatically In WordPress / create admin login Create and Diplsay ACF Texonomy Data Custom Post by Custom Texo filter JQuery Multiple Select Filter WordPress Premium Plugins Repo WP Premium Plugins Repo Part 2 Custom Taxonomy Template Post Order by ASC and DESC on published date Post Order by ASC and DESC on published date Widget Register and Dipslay Display category and Subcategory with Post CF7 Successful Submit Redirect to another page Load Fancybox on page load Once in month with cookies SVG Support in WP in function.php List Custom Taxonomy Wordpress Admin Login Page Layout Change Change Posts To Blogs (Post Type) JQuery Load More Display Custom Post under custom taxonomy Search Form with result page Stater Theme style.css file Acf option page Call Another Custom Header Zoho Integration for cf7 Forms www redirection 404 WP Template Coming Soon HTML Template Display Custom Posts by Category With Ajax filter Disable wordpress update notification / wp update notification custom breadcrumbs Disable admin toolbar except admin Disable Comment module on whole website Custom Data / Product / Card Filter by page load (by get and isset function) Mastek Resource Page for filter data List All pages on admin dashboard Custom Taxonomy Name on Single Page custom texonomy list with child WordPress Security and admin page Code Js Load on Page Based
Prints I love python Opens a comic in webbrowser YouTube video downloader GUI in Python A Simple Text Editor In Python CTk Login Form GUI in Python A Word Guessing Game In Python A GUI Password Manager With Database Connectivity in Python Word Counter In Python An Adventure Game GUI In Python A Basic Browser In Python Using PyQt5 (This doesn't store your browsing history!) Speech command Bot (Doraemon) In Python To-Do List Interface(With Lottie) In Python To-Do List Interface(With Lottie) In Python: HTML Code Rock Paper Scissors GUI In Python Rock Paper Scissors GUI In Python: The GUI code Your Motivator With Python And Unsplash Dice Stimulator Python CTk GUI A PNG to WEBP Converter With Python Mini Calculator GUI with Python A Number-Guessing Game In Python A Random Wikipedia Article Generator GUI In Python Your Own Gallery In Python A Tic Tac Toe Game In Python AI Weather Predictor That Doesn't Predict The Weather A Real-Time Spelling Checker In Python How to make a simple button in customtkinter Button like one on my website in python CTk How to make a simple checkbox in customtkinter python Hobby Selector Using Python CustomTkinter A sample comment in Python Python hub challenge 1 Single line comment in Python Multi line comments in Python Inline comments in Python Demonstrating Integers in Python Demonstrating Boolean numbers in Python Demonstrating Float numbers in Python Demonstrating Complex numbers in Python challenge 2 solution (numbers in python) Implicit type conversion in python Explicit type conversion python part 1 Explicit type conversion in python part 2 String formatting in Python String Concatenation in Python String formatting using print function Python Format function in Python % operator string formatting in Python F-String in Python How to utilize variables in string's solution string indexing python String Slicing question String Slicing question2 String Slicing question3 String Slicing question4 String Slicing question5 String Slicing Answer 1 String Slicing Answer 2 String Slicing Answer 3 String Slicing Answer 4 String Slicing Answer 5 String part 2 challenge solution Madlib in Python (solution) Madlib in Python (solution) output Madlib in Python (solution) 2 Madlib in Python (solution) output 2 Dictionary challenge solution Dictionary challenge solution output Single value tuple Python Single value tuple Python output Concatenate tuples in Python Copy a tuple in Python count() method tuple python index() method tuple python Tuple challenge Tuple challenge output Creating a set in Python Fun world Dictionary challenge Fun world Dictionary Output If else statement Elif ladder Multiple if statements Python Nested if else statements Python if else comprehension Simple calculator in python (if else challenge) Simple calculator in python (if else challenge) Output Iterating through list in Python Iterating through list in Python using For loop Break statement in Python Continue statement in Python Pass statement in Python Else with for loop in Python
9. Write a C program that prints the English alphabet (a-z). 10. Write a C program that prints both the Max and Min value in an array 1. Write a C program that takes a character from the user and prints its corresponding ASCII value 2. Write a C program, which reads an integer and checks whether the number is divisible by both 5 and 6, or neither of them, of just one of them. 4. Write a C program that checks if a number is prime or not. 5. Write a C program that stores an integer code in a variable called ‘code’. It then prompts the user to enter an integer from the standard input, which we will compare with our original ‘code’. If it matches the code, print ‘Password cracked’, otherwise, prompt the user to try again. For example, int code = 23421; //23421 is the code here. 6. Based on question 5, modify the program to keep track of the number of attempted password guesses. It then prints: “The password was cracked after ‘n’ amount of tries”, n being the tracking variable. Bonus Questions: 1. What is the difference between a while loop and a do-while loop? 2. Is the size of an Array mutable after declaration? 3. What is the purpose of passing the address of a variable in scanf? 4. What are the various possible return values for scanf? 5. Why do we declare the main method as an int function with return 0 at the end? 7. Write a C program that prompts the User to initialize an array by providing its length, and its values. It then asks him to enter the ‘focal number’. Your task is to print all the values in the array that are greater than the ‘focal number’. 8. Write a C program that prompts the user to enter 10 positive numbers and calculates the sum of the numbers.
Q12 Maximum Path Sum in the matrix - Coding Ninjas (Striver DP) Q- Recursion | Memoization | Tabulization in 2d dp READ ME Q18 Partitions with Given Difference | Practice | GeeksforGeeks Q17 Perfect Sum Problem | Practice | GeeksforGeeks Q16 Minimum sum partition | Practice | GeeksforGeeks Q52 Boolean Evaluation - Coding Ninjas Q-49 Matrix Chain Multiplication - Coding Ninjas Q24 Rod Cutting | Practice | GeeksforGeeks Q23 Knapsack with Duplicate Items | Practice | GeeksforGeeks Q-19 0 - 1 Knapsack Problem | Practice | GeeksforGeeks Q14 Subset Sum Equal To K - Coding Ninjas Q14 Cherry Pickup - Coding Ninjas Q-8 Ninja’s Training - Coding Ninjas Q-6 Maximum sum of non-adjacent elements - Coding Ninjas Q-3 Frog Jump - Coding Ninjas Q55 Count Square Submatrices with All Ones - LeetCode 1277 Q55 Maximal Rectangle - LeetCode 85 Q54 Partition Array for Maximum Sum - LeetCode1043 Q53 Palindrome Partitioning II - LeetCode 132 Q51 Burst Balloons - LeetCode 312 Q50 Minimum Cost to Cut a Stick - LeetCode 1547 Q47 Number of Longest Increasing Subsequence - LeetCode 673 Q45 Longest String Chain - LeetCode 1048 Q44 Largest Divisible Subset - LeetCode 368 Q43 Longest Increasing Subsequence - LeetCode 300 Q34 Wildcard Matching - LeetCode 44 Q33 Edit Distance - LeetCode 72 Q-32 Distinct Subsequences - LeetCode 115 Q25 Longest Common Subsequence - LeetCode 1143 Q22 Coin Change II - LeetCode 518 Q-20 Coin Change - LeetCode 322 Q-15 Target Sum - LeetCode 494 Q-12 Triangle - LeetCode 120 Q11 Minimum Path Sum - LeetCode 64 Q-10 Unique Paths II - LeetCode Q-9 Unique Paths - LeetCode 62 Q-6 House Robber II - LeetCode 213 Q-5 House Robber - LeetCode 198 Q-1 Climbing Stairs - LeetCode 70
8. Write a C program function that uses pointers to swap to numbers. 6. Write a C program that prints the English alphabet using pointers. 10. Write a C program void function that uses pointers to perform decompose operation. (Print only in the main function). Decompose means breaking up a decimal number into an integer part and a double part and storing them in different variables. 1. Write a C program function called ‘changeEven’ that changes all the even numbers within an array to 0, using pointer arithmetic 2. Write a C program function called ‘changePrime’, that changes all the prime numbers within an array to 0. Use another function, within ‘changePrime, called ‘checkPrime’, to check and return whether the number is prime or not, then update the value in the ‘changePrime’ accordingly. Don’t use pointer arithmetic 3. Write a C program that sorts an Array in descending order. 4. Write a C program function called ‘factorial’ that calculates and returns the factorial of a number. 5. Write a C program that gets an Array with 10 3-digits integer IDs. The program then prompts the user to enter his ID, which will be compared to the existing IDs within our Array. If his ID is matched, print “Accepted”, else print “Unaccepted”. 7. Write a C program that accepts three integers: a, b, and c, and prints them in ascending order 9. After the holidays lots of people were rushing to move back to their apartments. In this scenario, even numbers will represent women while odd numbers will represent men. Store the sequence of entry into the building by typing in even and odd numbers into an array at random. Calculate the largest sequence of women entering the building before a man enters. (The largest continuous number of women that entered before a man came in) Example: 17, 4, 6, 8, 9, 2, 8, 49 (The largest continuous number of women is 3).
Write a loop that reads positive integers from console input, printing out those values that are greater than 100, and that terminates when it reads an integer that is not positive. The printed values should be separated by single blank spaces. Declare any variables that are needed. Write a loop that reads positive integers from console input, printing out those values that are even, separating them with spaces, and that terminates when it reads an integer that is not positive. Write a loop that reads positive integers from console input and that terminates when it reads an integer that is not positive. After the loop terminates, it prints out the sum of all the even integers read. Given an int variable n that has been initialized to a positive value and, in addition, int variables k and total that have already been declared, use a while loop to compute the sum of the cubes of the first n counting numbers, and store this value in total. Thus if n equals 4, your code should put 1*1*1 + 2*2*2 + 3*3*3 + 4*4*4 into total. Use no variables other than n, k, and total. Do not modify n. Don't forget to initialize k and total with appropriate values. loop design strategies Given a char variable c that has already been declared, write some code that repeatedly reads a value from console input into c until at last a 'Y' or 'y' or 'N' or 'n' has been entered. Given a string variable s that has already been declared, write some code that repeatedly reads a value from console input into s until at last a "Y" or "y" or "N" or "n" has been entered. Write a loop that reads strings from console input where the string is either "duck" or "goose". The loop terminates when "goose" is read in. After the loop, your code should print out the number of "duck" strings that were read. Objects of the BankAccount class require a name (string) and a social security number (string) be specified (in that order) upon creation. Declare an object named account, of type BankAccount, using the values "John Smith" and "123-45-6789" as the name and social security number respectively.
components-of-robot Difference in Robot System and AI Programs How does the computer vision contribute in robotics? Goals of Artificial Intelligence four Categories of AI What is searching?What are the different parameters used to evaluate the search technique? Uninformed Search Algorithms First-Order Logic Inference rule in First-Order Logic What are different branches of artificial intelligence? Discuss some of the branches and progress made in their fields. What is adversarial search? Write the steps for game problem formulation. State and explain minimax algorithm with tic-tac-toe game. Explain the role of Intelligent Agent in AI. Also explain all types of intelligent agents in details. Explain PEAS. Write the PEAS description of the task environment for an automated car driving system. Define the role of the machine intelligence in the human life Describe arguments in multiagent systems and its types. negotiation and bargining Explain information retrieval with its characteristics. What is information extraction ? What do you mean by natural language processing ? Why it is needed? What are the applications of natural language processing? What are the various steps in natural language processing Machine translation What are the three major approaches of machine translation ? Forward Chaining AND Backward Chaining with properties Difference between Forwarding Chaining and Backward Chaining: knowledge representation Explain unification algorithm used for reasoning under predicate logic with an example. State Space Search in Artificial Intelligence Explain about the hill climbing algorithm with its drawback and how it can be overcome ? What is the heuristic function? min max algorithm Describe alpha-beta pruning and give the other modifications to the Min-Max procedure to improve its performance.