Snippets Collections
#variáveis

nome = "Meu Nome" #Sempre que colocas o valor entre "" significa que a variável é do tipo string
#variável do tipo string é um tipo de variável que usa texto como base

idade = "28" #variável do tipo string

x = 2
y = 5
#Sempre que defines um valor numérico sem "" significa que a variável passa a ser do tipo int
#variáveis do tipo int, só aceitam valores numéricos inteiros, não podendo colocar texto à mistura



#exemplos do que não podes fazer
!var@ = 1 
#não podes, aliás não consegues usar pontuação na definição das variáveis

total = x + idade 
#não podes juntar diferentes tipos de variáveis numa só
#ou seja não consegues somar a idade "28" que está no formato string, com uma do formato int
#que é específicamente um número, é como se aquele 28 fosse escrito assim "vinte e oito"





#resultados retornados
print(x+y) #vai retornar na tela o valor 7
print("Olá Mundo") #o comando print, exibe qualquer coisa na tela.
exec(open("G:/AREA DE DATOS/Proyectos/Directorio/Fuentes_Informacion/Obtener_InfoDirectorio.py").read())
import calendar

alendar.monthrange(year, month)[1]
1.  abc				- This tiny module delivers the environment you need for creating abstract base classes
2.  argparse		- This module contains tools that make it easier to create user-friendly interfaces from the level of the command line
3.  asyncio			- This is a very large module that delivers the framework and environment for asynchronous programming
4.  base64			- This well-known module delivers a tool for coding and decoding binary code into a format that can be displayed – and 
					  the other way round
5.  collections		- This module offers specialized container data types that work as an alternative to basic contained types for general
					  purposes such as dict, list, set, and tuple.
6.  copy			- Everyone uses this tiny module that contains tools for deep copying of container type data. Its most famous function is `deepcopy` – if not for this function, copying lists and dictionaries would become a torture for developers.
7.  csv				- Delivers functionalities for exporting and importing tabular data in CSV format. The module allows developers to say “load data” or “save data” from to a CSV file.
8.  datetime		- It delivers tools that make it easier to work with dates and times
9.  decimal 		- The module delivers a data type called Decimal. Its main advantage is correct rounding of decimal numbers which is extremely important in billing systems– even
					  a slight distortion with several hundred thousand operations can change the final result significantly
10. functools		- The functools module comes in handy for higher-order functions, ie. the functions that act on or return other functions. You can treat any callable 
					  object as a function for the purposes of this module.
11. hashlib			- his handy module implements a common interface to numerous secure hash and message digest algorithms like the FIPS 
					  secure hash algorithms SHA1, SHA224, SHA256, SHA384, and SHA512
12. http			- his package collects several modules for working with the HyperText Transfer Protocol 
13. importlib 		- The package provides the implementation of the import statement (and the __import__() function) in Python source code
14. itertools		- This useful module implements iterator building blocks inspired by constructs 
					  from APL, Haskell, and SML (all in a form that matches Python programs)
15. inspect 		- The module offers several useful functions that help developers get information about live objects such as 
					  modules, classes, methods, functions, tracebacks, frame objects, and code objects in Python code
16. json			- Helps to work with json data
17. logging			- This module defines functions and classes that provide a flexible event logging system for applications and libraries
18. math			- This module gives developers access to the mathematical functions defined by the C standard
19. multiprocessing	- This handy package supports spawning processes with the help of an API similar to the threading module
20. os				- Offers a portable method of using operating system dependent functionality.
21. pdb				- This module defines an interactive source code debugger
22. random			- pseudo-random number generators for various distributions
23. re				- Provides regular expression matching operations similar to those you get in Perl
24. shutil			- Provides a number of high-level operations on files and collections of files, especially functions that 
					  support file copying and removal
25. sys				- offers access to variables used or maintained by the interpreter and functions that interact strongly 
					  with the interpreter
26. threading		- This useful module builds higher-level threading interfaces on top of the lower level _thread module
27. types			- It defines names for object types used by the standard Python interpreter but not exposed as builtins
28. unittest		- Works similarly to major unit testing frameworks in other programming languages
29. urllib			- This handy package collects several modules for working with URLs
30. uuid			- The module provides immutable UUID objects, as well as the following functions for generating version 1, 3, 4, and 5 UUIDs
from shapely.geometry import Point
import geopandas as gpd
from geopandas import GeoDataFrame

df = pd.read_csv("Long_Lats.csv", delimiter=',', skiprows=0, low_memory=False)

geometry = [Point(xy) for xy in zip(df['Longitude'], df['Latitude'])]
gdf = GeoDataFrame(df, geometry=geometry)   

#this is a simple map that goes with geopandas
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
gdf.plot(ax=world.plot(figsize=(10, 6)), marker='o', color='red', markersize=15);
# Imports
import time
import json

import numpy as np
import tokens_bert as tokens

from openvino.runtime import Core
from openvino.runtime import Dimension

# Download the model
# directory where model will be downloaded
base_model_dir = "model"

# desired precision
precision = "FP16-INT8"

# model name as named in Open Model Zoo
model_name = "bert-small-uncased-whole-word-masking-squad-int8-0002"

model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml"
model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin"

download_command = f"omz_downloader " \
                   f"--name {model_name} " \
                   f"--precision {precision} " \
                   f"--output_dir {base_model_dir} " \
                   f"--cache_dir {base_model_dir}"
! $download_command

# Load the model for Entity Extraction with Dynamic Shape
# initialize inference engine
ie_core = Core()
# read the network and corresponding weights from file
model = ie_core.read_model(model=model_path, weights=model_weights_path)

# assign dynamic shapes to every input layer on the last dimension
for input_layer in model.inputs:
    input_shape = input_layer.partial_shape
    input_shape[1] = Dimension(1, 384)
    model.reshape({input_layer: input_shape})

# compile the model for the CPU
compiled_model = ie_core.compile_model(model=model, device_name="CPU")

# get input names of nodes
input_keys = list(compiled_model.inputs)

# Processing
# path to vocabulary file
vocab_file_path = "data/vocab.txt"

# create dictionary with words and their indices
vocab = tokens.load_vocab_file(vocab_file_path)

# define special tokens
cls_token = vocab["[CLS]"]
sep_token = vocab["[SEP]"]

# set a confidence score threshold
confidence_threshold = 0.4

# Preprocessing
# generator of a sequence of inputs
def prepare_input(entity_tokens, context_tokens):
    input_ids = [cls_token] + entity_tokens + [sep_token] + \
        context_tokens + [sep_token]
    # 1 for any index
    attention_mask = [1] * len(input_ids)
    # 0 for entity tokens, 1 for context part
    token_type_ids = [0] * (len(entity_tokens) + 2) + \
        [1] * (len(context_tokens) + 1)

    # create input to feed the model
    input_dict = {
        "input_ids": np.array([input_ids], dtype=np.int32),
        "attention_mask": np.array([attention_mask], dtype=np.int32),
        "token_type_ids": np.array([token_type_ids], dtype=np.int32),
    }

    # some models require additional position_ids
    if "position_ids" in [i_key.any_name for i_key in input_keys]:
        position_ids = np.arange(len(input_ids))
        input_dict["position_ids"] = np.array([position_ids], dtype=np.int32)

    return input_dict

# Postprocessing
def postprocess(output_start, output_end, entity_tokens,
                context_tokens_start_end, input_size):

    def get_score(logits):
        out = np.exp(logits)
        return out / out.sum(axis=-1)

    # get start-end scores for context
    score_start = get_score(output_start)
    score_end = get_score(output_end)

    # index of first context token in tensor
    context_start_idx = len(entity_tokens) + 2
    # index of last+1 context token in tensor
    context_end_idx = input_size - 1

    # find product of all start-end combinations to find the best one
    max_score, max_start, max_end = find_best_entity_window(
        start_score=score_start, end_score=score_end,
        context_start_idx=context_start_idx, context_end_idx=context_end_idx
    )

    # convert to context text start-end index
    max_start = context_tokens_start_end[max_start][0]
    max_end = context_tokens_start_end[max_end][1]

    return max_score, max_start, max_end


def find_best_entity_window(start_score, end_score,
                            context_start_idx, context_end_idx):
    context_len = context_end_idx - context_start_idx
    score_mat = np.matmul(
        start_score[context_start_idx:context_end_idx].reshape(
            (context_len, 1)),
        end_score[context_start_idx:context_end_idx].reshape(
            (1, context_len)),
    )
    # reset candidates with end before start
    score_mat = np.triu(score_mat)
    # reset long candidates (>16 words)
    score_mat = np.tril(score_mat, 16)
    # find the best start-end pair
    max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
    max_score = score_mat[max_s, max_e]

    return max_score, max_s, max_e

def get_best_entity(entity, context, vocab):
    # convert context string to tokens
    context_tokens, context_tokens_end = tokens.text_to_tokens(
        text=context.lower(), vocab=vocab)
    # convert entity string to tokens
    entity_tokens, _ = tokens.text_to_tokens(text=entity.lower(), vocab=vocab)

    network_input = prepare_input(entity_tokens, context_tokens)
    input_size = len(context_tokens) + len(entity_tokens) + 3

    # openvino inference
    output_start_key = compiled_model.output("output_s")
    output_end_key = compiled_model.output("output_e")
    result = compiled_model(network_input)

    # postprocess the result getting the score and context range for the answer
    score_start_end = postprocess(output_start=result[output_start_key][0],
                                  output_end=result[output_end_key][0],
                                  entity_tokens=entity_tokens,
                                  context_tokens_start_end=context_tokens_end,
                                  input_size=input_size)

    # return the part of the context, which is already an answer
    return context[score_start_end[1]:score_start_end[2]], score_start_end[0]

# Set the Entity Recognition Template
template = ["building", "company", "persons", "city",
            "state", "height", "floor", "address"]

def run_analyze_entities(context):
    print(f"Context: {context}\n", flush=True)

    if len(context) == 0:
        print("Error: Empty context or outside paragraphs")
        return

    if len(context) > 380:
        print("Error: The context is too long for this particular model. "
              "Try with context shorter than 380 words.")
        return

    # measure processing time
    start_time = time.perf_counter()
    extract = []
    for field in template:
        entity_to_find = field + "?"
        entity, score = get_best_entity(entity=entity_to_find,
                                        context=context,
                                        vocab=vocab)
        if score >= confidence_threshold:
            extract.append({"Entity": entity, "Type": field,
                            "Score": f"{score:.2f}"})
    end_time = time.perf_counter()
    res = {"Extraction": extract, "Time": f"{end_time - start_time:.2f}s"}
    print("\nJSON Output:")
    print(json.dumps(res, sort_keys=False, indent=4))

# Run on Simple Text
# Sample 1
source_text = "Intel Corporation is an American multinational and technology" \
    " company headquartered in Santa Clara, California."
run_analyze_entities(source_text)

# Sample 2
source_text = "Intel was founded in Mountain View, California, " \
    "in 1968 by Gordon E. Moore, a chemist, and Robert Noyce, " \
    "a physicist and co-inventor of the integrated circuit."
run_analyze_entities(source_text)

# Sample 3
source_text = "The Robert Noyce Building in Santa Clara, California, " \
    "is the headquarters for Intel Corporation. It was constructed in 1992 " \
    "and is located at 2200 Mission College Boulevard - 95054. It has an " \
    "estimated height of 22.20 meters and 6 floors above ground."
run_analyze_entities(source_text)
# Import TensorFlow and Other Libraries
import os
import sys
from pathlib import Path

import PIL
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from PIL import Image
from openvino.runtime import Core
from openvino.tools.mo import mo_tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential

sys.path.append("../utils")
from notebook_utils import download_file

# Download and Explore the Dataset
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)

image_count = len(list(data_dir.glob('*/*.jpg')))
print(image_count)

roses = list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
PIL.Image.open(str(roses[1]))

tulips = list(data_dir.glob('tulips/*'))
PIL.Image.open(str(tulips[0]))
PIL.Image.open(str(tulips[1]))

# Create a Dataset
batch_size = 32
img_height = 180
img_width = 180

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
  data_dir,
  validation_split=0.2,
  subset="training",
  seed=123,
  image_size=(img_height, img_width),
  batch_size=batch_size)

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
  data_dir,
  validation_split=0.2,
  subset="validation",
  seed=123,
  image_size=(img_height, img_width),
  batch_size=batch_size)

class_names = train_ds.class_names
print(class_names)

# Visualize the Data
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
    for i in range(9):
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(images[i].numpy().astype("uint8"))
        plt.title(class_names[labels[i]])
        plt.axis("off")

for image_batch, labels_batch in train_ds:
    print(image_batch.shape)
    print(labels_batch.shape)
    break

# Configure the Dataset for Performance
# AUTOTUNE = tf.data.AUTOTUNE
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

# Standardize the Data
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)

normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
# Notice the pixels values are now in `[0,1]`.
print(np.min(first_image), np.max(first_image)) 

# Create the Model
num_classes = 5

model = Sequential([
  layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
  layers.Conv2D(16, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(32, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(64, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Flatten(),
  layers.Dense(128, activation='relu'),
  layers.Dense(num_classes)
])

# Compile the Model
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
# Preparation
from pathlib import Path

import tensorflow as tf

model_xml = Path("model/flower/flower_ir.xml")
dataset_url = (
    "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
)
data_dir = Path(tf.keras.utils.get_file("flower_photos", origin=dataset_url, untar=True))

if not model_xml.exists():
    print("Executing training notebook. This will take a while...")
    %run 301-tensorflow-training-openvino.ipynb

# Imports
import copy
import os
import sys

import cv2
import matplotlib.pyplot as plt
import numpy as np
from addict import Dict
from openvino.tools.pot.api import Metric, DataLoader
from openvino.tools.pot.graph import load_model, save_model
from openvino.tools.pot.graph.model_utils import compress_model_weights
from openvino.tools.pot.engines.ie_engine import IEEngine
from openvino.tools.pot.pipeline.initializer import create_pipeline
from openvino.runtime import Core
from PIL import Image

sys.path.append("../utils")
from notebook_utils import benchmark_model, download_file

# Settings
model_config = Dict(
    {
        "model_name": "flower",
        "model": "model/flower/flower_ir.xml",
        "weights": "model/flower/flower_ir.bin",
    }
)

engine_config = Dict({"device": "CPU", "stat_requests_number": 2, "eval_requests_number": 2})

algorithms = [
    {
        "name": "DefaultQuantization",
        "params": {
            "target_device": "CPU",
            "preset": "performance",
            "stat_subset_size": 1000,
        },
    }
]

# Create DataLoader Class
class ClassificationDataLoader(DataLoader):
    """
    DataLoader for image data that is stored in a directory per category. For example, for
    categories _rose_ and _daisy_, rose images are expected in data_source/rose, daisy images
    in data_source/daisy.
    """

    def __init__(self, data_source):
        """
        :param data_source: path to data directory
        """
        self.data_source = Path(data_source)
        self.dataset = [p for p in data_dir.glob("**/*") if p.suffix in (".png", ".jpg")]
        self.class_names = sorted([item.name for item in Path(data_dir).iterdir() if item.is_dir()])

    def __len__(self):
        """
        Returns the number of elements in the dataset
        """
        return len(self.dataset)

    def __getitem__(self, index):
        """
        Get item from self.dataset at the specified index.
        Returns (annotation, image), where annotation is a tuple (index, class_index)
        and image a preprocessed image in network shape
        """
        if index >= len(self):
            raise IndexError
        filepath = self.dataset[index]
        annotation = (index, self.class_names.index(filepath.parent.name))
        image = self._read_image(filepath)
        return annotation, image

    def _read_image(self, index):
        """
        Read image at dataset[index] to memory, resize, convert to BGR and to network shape

        :param index: dataset index to read
        :return ndarray representation of image batch
        """
        image = cv2.imread(os.path.join(self.data_source, index))[:, :, (2, 1, 0)]
        image = cv2.resize(image, (180, 180)).astype(np.float32)
        return image

# Create Accuracy Metric Class
class Accuracy(Metric):
    def __init__(self):
        super().__init__()
        self._name = "accuracy"
        self._matches = []

    @property
    def value(self):
        """Returns accuracy metric value for the last model output."""
        return {self._name: self._matches[-1]}

    @property
    def avg_value(self):
        """
        Returns accuracy metric value for all model outputs. Results per image are stored in
        self._matches, where True means a correct prediction and False a wrong prediction.
        Accuracy is computed as the number of correct predictions divided by the total
        number of predictions.
        """
        num_correct = np.count_nonzero(self._matches)
        return {self._name: num_correct / len(self._matches)}

    def update(self, output, target):
        """Updates prediction matches.

        :param output: model output
        :param target: annotations
        """
        predict = np.argmax(output[0], axis=1)
        match = predict == target
        self._matches.append(match)

    def reset(self):
        """
        Resets the Accuracy metric. This is a required method that should initialize all
        attributes to their initial value.
        """
        self._matches = []

    def get_attributes(self):
        """
        Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}.
        Required attributes: 'direction': 'higher-better' or 'higher-worse'
                             'type': metric type
        """
        return {self._name: {"direction": "higher-better", "type": "accuracy"}}

# POT Optimization
# Step 1: Load the model
model = load_model(model_config=model_config)
original_model = copy.deepcopy(model)

# Step 2: Initialize the data loader
data_loader = ClassificationDataLoader(data_source=data_dir)

# Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric
#        Compute metric results on original model
metric = Accuracy()

# Step 4: Initialize the engine for metric calculation and statistics collection
engine = IEEngine(config=engine_config, data_loader=data_loader, metric=metric)

# Step 5: Create a pipeline of compression algorithms
pipeline = create_pipeline(algo_config=algorithms, engine=engine)

# Step 6: Execute the pipeline
compressed_model = pipeline.run(model=model)

# Step 7 (Optional): Compress model weights quantized precision
#                    in order to reduce the size of final .bin file
compress_model_weights(model=compressed_model)

# Step 8: Save the compressed model and get the path to the model
compressed_model_paths = save_model(
    model=compressed_model, save_path=os.path.join(os.path.curdir, "model/optimized")
)
compressed_model_xml = Path(compressed_model_paths[0]["model"])
print(f"The quantized model is stored in {compressed_model_xml}")

# Step 9 (Optional): Evaluate the original and compressed model. Print the results
original_metric_results = pipeline.evaluate(original_model)
if original_metric_results:
    print(f"Accuracy of the original model:  {next(iter(original_metric_results.values())):.5f}")

quantized_metric_results = pipeline.evaluate(compressed_model)
if quantized_metric_results:
    print(f"Accuracy of the quantized model: {next(iter(quantized_metric_results.values())):.5f}")

# Run Inference on Quantized Model
def pre_process_image(imagePath, img_height=180):
    # Model input format
    n, c, h, w = [1, 3, img_height, img_height]
    image = Image.open(imagePath)
    image = image.resize((h, w), resample=Image.BILINEAR)

    # Convert to array and change data layout from HWC to CHW
    image = np.array(image)

    input_image = image.reshape((n, h, w, c))

    return input_image

# Load the optimized model and get the names of the input and output layer
ie = Core()
model_pot = ie.read_model(model="model/optimized/flower_ir.xml")
compiled_model_pot = ie.compile_model(model=model_pot, device_name="CPU")
input_layer = compiled_model_pot.input(0)
output_layer = compiled_model_pot.output(0)

# Get the class names: a list of directory names in alphabetical order
class_names = sorted([item.name for item in Path(data_dir).iterdir() if item.is_dir()])

# Run inference on an input image...
inp_img_url = (
    "https://upload.wikimedia.org/wikipedia/commons/4/48/A_Close_Up_Photo_of_a_Dandelion.jpg"
)
directory = "output"
inp_file_name = "A_Close_Up_Photo_of_a_Dandelion.jpg"
file_path = Path(directory)/Path(inp_file_name)
# Download the image if it does not exist yet
if not Path(inp_file_name).exists():
    download_file(inp_img_url, inp_file_name, directory=directory)

# Pre-process the image and get it ready for inference.
input_image = pre_process_image(imagePath=file_path)
print(f'input image shape: {input_image.shape}')
print(f'input layer shape: {input_layer.shape}')

res = compiled_model_pot([input_image])[output_layer]

score = tf.nn.softmax(res[0])

# Show the results
image = Image.open(file_path)
plt.imshow(image)
print(
    "This image most likely belongs to {} with a {:.2f} percent confidence.".format(
        class_names[np.argmax(score)], 100 * np.max(score)
    )
)

# Compare Inference Speed
# print the available devices on this system
ie = Core()
print("Device information:")
print(ie.get_property("CPU", "FULL_DEVICE_NAME"))
if "GPU" in ie.available_devices:
    print(ie.get_property("GPU", "FULL_DEVICE_NAME"))

# Original model - CPU
benchmark_model(model_path=model_xml, device="CPU", seconds=15, api='async')

# Quantized model - CPU
benchmark_model(model_path=compressed_model_xml, device="CPU", seconds=15, api='async')

# Original model - MULTI:CPU,GPU
if "GPU" in ie.available_devices:
    benchmark_model(model_path=model_xml, device="MULTI:CPU,GPU", seconds=15, api='async')
else:
    print("A supported integrated GPU is not available on this system.")

# Quantized model - MULTI:CPU,GPU
if "GPU" in ie.available_devices:
    benchmark_model(model_path=compressed_model_xml, device="MULTI:CPU,GPU", seconds=15, api='async')
else:
    print("A supported integrated GPU is not available on this system.")

# print the available devices on this system
print("Device information:")
print(ie.get_property("CPU", "FULL_DEVICE_NAME"))
if "GPU" in ie.available_devices:
    print(ie.get_property("GPU", "FULL_DEVICE_NAME"))

# Original IR model - CPU
benchmark_output = %sx benchmark_app -m $model_xml -t 15 -api async
# Remove logging info from benchmark_app output and show only the results
benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
print("\n".join(benchmark_result))

# Quantized IR model - CPU
benchmark_output = %sx benchmark_app -m $compressed_model_xml -t 15 -api async
# Remove logging info from benchmark_app output and show only the results
benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
print("\n".join(benchmark_result))

# Original IR model - MULTI:CPU,GPU
ie = Core()
if "GPU" in ie.available_devices:
    benchmark_output = %sx benchmark_app -m $model_xml -d MULTI:CPU,GPU -t 15 -api async
    # Remove logging info from benchmark_app output and show only the results
    benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
    print("\n".join(benchmark_result))
else:
    print("An integrated GPU is not available on this system.")

# Quantized IR model - MULTI:CPU,GPU
ie = Core()
if "GPU" in ie.available_devices:
    benchmark_output = %sx benchmark_app -m $compressed_model_xml -d MULTI:CPU,GPU -t 15 -api async
    # Remove logging info from benchmark_app output and show only the results
    benchmark_result = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line=="")]
    print("\n".join(benchmark_result))
else:
    print("An integrated GPU is not available on this system.")
# Imports and Settings
# On Windows, add the directory that contains cl.exe to the PATH to enable PyTorch to find the
# required C++ tools. This code assumes that Visual Studio 2019 is installed in the default
# directory. If you have a different C++ compiler, please add the correct path to os.environ["PATH"]
# directly. Note that the C++ Redistributable is not enough to run this notebook.

# Adding the path to os.environ["LIB"] is not always required - it depends on the system's configuration

import sys

if sys.platform == "win32":
    import distutils.command.build_ext
    import os
    from pathlib import Path

    VS_INSTALL_DIR = r"C:/Program Files (x86)/Microsoft Visual Studio"
    cl_paths = sorted(list(Path(VS_INSTALL_DIR).glob("**/Hostx86/x64/cl.exe")))
    if len(cl_paths) == 0:
        raise ValueError(
            "Cannot find Visual Studio. This notebook requires a C++ compiler. If you installed "
            "a C++ compiler, please add the directory that contains cl.exe to `os.environ['PATH']`."
        )
    else:
        # If multiple versions of MSVC are installed, get the most recent version
        cl_path = cl_paths[-1]
        vs_dir = str(cl_path.parent)
        os.environ["PATH"] += f"{os.pathsep}{vs_dir}"
        # Code for finding the library dirs from
        # https://stackoverflow.com/questions/47423246/get-pythons-lib-path
        d = distutils.core.Distribution()
        b = distutils.command.build_ext.build_ext(d)
        b.finalize_options()
        os.environ["LIB"] = os.pathsep.join(b.library_dirs)
        print(f"Added {vs_dir} to PATH")

import sys
import time
import warnings  # to disable warnings on export to ONNX
import zipfile
from pathlib import Path
import logging

import torch
import nncf  # Important - should be imported directly after torch

import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms

from nncf.common.utils.logger import set_log_level
set_log_level(logging.ERROR)  # Disables all NNCF info and warning messages
from nncf import NNCFConfig
from nncf.torch import create_compressed_model, register_default_init_args
from openvino.runtime import Core
from torch.jit import TracerWarning

sys.path.append("../utils")
from notebook_utils import download_file

torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using {device} device")

MODEL_DIR = Path("model")
OUTPUT_DIR = Path("output")
DATA_DIR = Path("data")
BASE_MODEL_NAME = "resnet18"
image_size = 64

OUTPUT_DIR.mkdir(exist_ok=True)
MODEL_DIR.mkdir(exist_ok=True)
DATA_DIR.mkdir(exist_ok=True)

# Paths where PyTorch, ONNX and OpenVINO IR models will be stored
fp32_pth_path = Path(MODEL_DIR / (BASE_MODEL_NAME + "_fp32")).with_suffix(".pth")
fp32_onnx_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_fp32")).with_suffix(".onnx")
fp32_ir_path = fp32_onnx_path.with_suffix(".xml")
int8_onnx_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_int8")).with_suffix(".onnx")
int8_ir_path = int8_onnx_path.with_suffix(".xml")

# It's possible to train FP32 model from scratch, but it might be slow. So the pre-trained weights are downloaded by default.
pretrained_on_tiny_imagenet = True
fp32_pth_url = "https://storage.openvinotoolkit.org/repositories/nncf/openvino_notebook_ckpts/302_resnet18_fp32_v1.pth"
download_file(fp32_pth_url, directory=MODEL_DIR, filename=fp32_pth_path.name)

# Download Tiny ImageNet dataset
def download_tiny_imagenet_200(
    data_dir: Path,
    url="http://cs231n.stanford.edu/tiny-imagenet-200.zip",
    tarname="tiny-imagenet-200.zip",
):
    archive_path = data_dir / tarname
    download_file(url, directory=data_dir, filename=tarname)
    zip_ref = zipfile.ZipFile(archive_path, "r")
    zip_ref.extractall(path=data_dir)
    zip_ref.close()

def prepare_tiny_imagenet_200(dataset_dir: Path):
    # format validation set the same way as train set is formatted
    val_data_dir = dataset_dir / 'val'
    val_annotations_file = val_data_dir / 'val_annotations.txt'
    with open(val_annotations_file, 'r') as f:
        val_annotation_data = map(lambda line: line.split('\t')[:2], f.readlines())
    val_images_dir = val_data_dir / 'images'
    for image_filename, image_label in val_annotation_data:
        from_image_filepath = val_images_dir / image_filename
        to_image_dir = val_data_dir / image_label
        if not to_image_dir.exists():
            to_image_dir.mkdir()
        to_image_filepath = to_image_dir / image_filename
        from_image_filepath.rename(to_image_filepath)
    val_annotations_file.unlink()
    val_images_dir.rmdir()
    

DATASET_DIR = DATA_DIR / "tiny-imagenet-200"
if not DATASET_DIR.exists():
    download_tiny_imagenet_200(DATA_DIR)
    prepare_tiny_imagenet_200(DATASET_DIR)
    print(f"Successfully downloaded and prepared dataset at: {DATASET_DIR}")

# Pre-train Floating-Point Model
# Train Function
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter("Time", ":3.3f")
    losses = AverageMeter("Loss", ":2.3f")
    top1 = AverageMeter("Acc@1", ":2.2f")
    top5 = AverageMeter("Acc@5", ":2.2f")
    progress = ProgressMeter(
        len(train_loader), [batch_time, losses, top1, top5], prefix="Epoch:[{}]".format(epoch)
    )

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        images = images.to(device)
        target = target.to(device)

        # compute output
        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))

        # compute gradient and do opt step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        print_frequency = 50
        if i % print_frequency == 0:
            progress.display(i)

# Validate Function
def validate(val_loader, model, criterion):
    batch_time = AverageMeter("Time", ":3.3f")
    losses = AverageMeter("Loss", ":2.3f")
    top1 = AverageMeter("Acc@1", ":2.2f")
    top5 = AverageMeter("Acc@5", ":2.2f")
    progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix="Test: ")

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.to(device)
            target = target.to(device)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            print_frequency = 10
            if i % print_frequency == 0:
                progress.display(i)

        print(" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5))
    return top1.avg

# Helpers
class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self, name, fmt=":f"):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

    def __str__(self):
        fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        print("\t".join(entries))

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = "{:" + str(num_digits) + "d}"
        return "[" + fmt + "/" + fmt.format(num_batches) + "]"


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res

# Get a Pre-trained FP32 Model
num_classes = 200  # 200 is for Tiny ImageNet, default is 1000 for ImageNet
init_lr = 1e-4
batch_size = 128
epochs = 4

model = models.resnet18(pretrained=not pretrained_on_tiny_imagenet)
# update the last FC layer for Tiny ImageNet number of classes
model.fc = nn.Linear(in_features=512, out_features=num_classes, bias=True)
model.to(device)

# Data loading code
train_dir = DATASET_DIR / "train"
val_dir = DATASET_DIR / "val"
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

train_dataset = datasets.ImageFolder(
    train_dir,
    transforms.Compose(
        [
            transforms.Resize(image_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]
    ),
)
val_dataset = datasets.ImageFolder(
    val_dir,
    transforms.Compose(
        [
            transforms.Resize(image_size),
            transforms.ToTensor(),
            normalize,
        ]
    ),
)

train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, sampler=None
)

val_loader = torch.utils.data.DataLoader(
    val_dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True
)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=init_lr)

if pretrained_on_tiny_imagenet:
    #
    # ** WARNING: torch.load functionality uses Python's pickling module that
    # may be used to perform arbitrary code execution during unpickling. Only load data that you
    # trust.
    #
    checkpoint = torch.load(str(fp32_pth_path), map_location="cpu")
    model.load_state_dict(checkpoint["state_dict"], strict=True)
    acc1_fp32 = checkpoint["acc1"]
else:
    best_acc1 = 0
    # Training loop
    for epoch in range(0, epochs):
        # run a single training epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion)

        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if is_best:
            checkpoint = {"state_dict": model.state_dict(), "acc1": acc1}
            torch.save(checkpoint, fp32_pth_path)
    acc1_fp32 = best_acc1
    
print(f"Accuracy of FP32 model: {acc1_fp32:.3f}")

dummy_input = torch.randn(1, 3, image_size, image_size).to(device)

torch.onnx.export(model, dummy_input, fp32_onnx_path)
print(f"FP32 ONNX model was exported to {fp32_onnx_path}.")

# Create and Initialize Quantization
nncf_config_dict = {
    "input_info": {"sample_size": [1, 3, image_size, image_size]},
    "log_dir": str(OUTPUT_DIR),  # log directory for NNCF-specific logging outputs
    "compression": {
        "algorithm": "quantization",  # specify the algorithm here
    },
}
nncf_config = NNCFConfig.from_dict(nncf_config_dict)

nncf_config = register_default_init_args(nncf_config, train_loader)
compression_ctrl, model = create_compressed_model(model, nncf_config)
acc1 = validate(val_loader, model, criterion)
print(f"Accuracy of initialized INT8 model: {acc1:.3f}")

# Fine-tune the Compressed Model
compression_lr = init_lr / 10
optimizer = torch.optim.Adam(model.parameters(), lr=compression_lr)

# train for one epoch with NNCF
train(train_loader, model, criterion, optimizer, epoch=0)

# evaluate on validation set after Quantization-Aware Training (QAT case)
acc1_int8 = validate(val_loader, model, criterion)

print(f"Accuracy of tuned INT8 model: {acc1_int8:.3f}")
print(f"Accuracy drop of tuned INT8 model over pre-trained FP32 model: {acc1_fp32 - acc1_int8:.3f}")

# Export INT8 Model to ONNX
if not int8_onnx_path.exists():
    warnings.filterwarnings("ignore", category=TracerWarning)
    warnings.filterwarnings("ignore", category=UserWarning)
    # Export INT8 model to ONNX that is supported by the OpenVINO™ toolkit
    compression_ctrl.export_model(int8_onnx_path)
    print(f"INT8 ONNX model exported to {int8_onnx_path}.")

# Convert ONNX models to OpenVINO Intermediate Representation (IR)
if not fp32_ir_path.exists():
    !mo --input_model $fp32_onnx_path --input_shape "[1,3, $image_size, $image_size]" --mean_values "[123.675, 116.28 , 103.53]" --scale_values "[58.395, 57.12 , 57.375]" --data_type FP16 --output_dir $OUTPUT_DIR

if not int8_ir_path.exists():
    !mo --input_model $int8_onnx_path --input_shape "[1,3, $image_size, $image_size]" --mean_values "[123.675, 116.28 , 103.53]" --scale_values "[58.395, 57.12 , 57.375]" --data_type FP16 --output_dir $OUTPUT_DIR

# Benchmark Model Performance by Computing Inference Time
def parse_benchmark_output(benchmark_output):
    parsed_output = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line == "")]
    print(*parsed_output, sep='\n')


print('Benchmark FP32 model (IR)')
benchmark_output = ! benchmark_app -m $fp32_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

print('Benchmark INT8 model (IR)')
benchmark_output = ! benchmark_app -m $int8_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

# Show CPU Information for reference
ie = Core()
ie.get_property(device_name="CPU", name="FULL_DEVICE_NAME")
# Imports and Settings
from pathlib import Path
import logging

import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.python.keras import layers
from tensorflow.python.keras import models

from nncf import NNCFConfig
from nncf.tensorflow.helpers.model_creation import create_compressed_model
from nncf.tensorflow.initialization import register_default_init_args
from nncf.common.utils.logger import set_log_level

set_log_level(logging.ERROR)

MODEL_DIR = Path("model")
OUTPUT_DIR = Path("output")
MODEL_DIR.mkdir(exist_ok=True)
OUTPUT_DIR.mkdir(exist_ok=True)

BASE_MODEL_NAME = "ResNet-18"

fp32_h5_path = Path(MODEL_DIR / (BASE_MODEL_NAME + "_fp32")).with_suffix(".h5")
fp32_sm_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_fp32"))
fp32_ir_path = Path(OUTPUT_DIR / "saved_model").with_suffix(".xml")
int8_pb_path = Path(OUTPUT_DIR / (BASE_MODEL_NAME + "_int8")).with_suffix(".pb")
int8_pb_name = Path(BASE_MODEL_NAME + "_int8").with_suffix(".pb")
int8_ir_path = int8_pb_path.with_suffix(".xml")

BATCH_SIZE = 128
IMG_SIZE = (64, 64)  # Default Imagenet image size
NUM_CLASSES = 10  # For Imagenette dataset

LR = 1e-5

MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)  # From Imagenet dataset
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)  # From Imagenet dataset

fp32_pth_url = "https://storage.openvinotoolkit.org/repositories/nncf/openvino_notebook_ckpts/305_resnet18_imagenette_fp32_v1.h5"
_ = tf.keras.utils.get_file(fp32_h5_path.resolve(), fp32_pth_url)
print(f'Absolute path where the model weights are saved:\n {fp32_h5_path.resolve()}')

# Dataset Preprocessing
datasets, datasets_info = tfds.load('imagenette/160px', shuffle_files=True, as_supervised=True, with_info=True,
                                    read_config=tfds.ReadConfig(shuffle_seed=0))
train_dataset, validation_dataset = datasets['train'], datasets['validation']
fig = tfds.show_examples(train_dataset, datasets_info)

def preprocessing(image, label):
    image = tf.image.resize(image, IMG_SIZE)
    image = image - MEAN_RGB
    image = image / STDDEV_RGB
    label = tf.one_hot(label, NUM_CLASSES)
    return image, label


train_dataset = (train_dataset.map(preprocessing, num_parallel_calls=tf.data.experimental.AUTOTUNE)
                              .batch(BATCH_SIZE)
                              .prefetch(tf.data.experimental.AUTOTUNE))

validation_dataset = (validation_dataset.map(preprocessing, num_parallel_calls=tf.data.experimental.AUTOTUNE)
                                        .batch(BATCH_SIZE)
                                        .prefetch(tf.data.experimental.AUTOTUNE))

# Define a Floating-Point Model
def residual_conv_block(filters, stage, block, strides=(1, 1), cut='pre'):
    def layer(input_tensor):
        x = layers.BatchNormalization(epsilon=2e-5)(input_tensor)
        x = layers.Activation('relu')(x)

        # defining shortcut connection
        if cut == 'pre':
            shortcut = input_tensor
        elif cut == 'post':
            shortcut = layers.Conv2D(filters, (1, 1), strides=strides, kernel_initializer='he_uniform', 
                                     use_bias=False)(x)

        # continue with convolution layers
        x = layers.ZeroPadding2D(padding=(1, 1))(x)
        x = layers.Conv2D(filters, (3, 3), strides=strides, kernel_initializer='he_uniform', use_bias=False)(x)

        x = layers.BatchNormalization(epsilon=2e-5)(x)
        x = layers.Activation('relu')(x)
        x = layers.ZeroPadding2D(padding=(1, 1))(x)
        x = layers.Conv2D(filters, (3, 3), kernel_initializer='he_uniform', use_bias=False)(x)

        # add residual connection
        x = layers.Add()([x, shortcut])
        return x

    return layer


def ResNet18(input_shape=None):
    """Instantiates the ResNet18 architecture."""
    img_input = layers.Input(shape=input_shape, name='data')

    # ResNet18 bottom
    x = layers.BatchNormalization(epsilon=2e-5, scale=False)(img_input)
    x = layers.ZeroPadding2D(padding=(3, 3))(x)
    x = layers.Conv2D(64, (7, 7), strides=(2, 2), kernel_initializer='he_uniform', use_bias=False)(x)
    x = layers.BatchNormalization(epsilon=2e-5)(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1))(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(x)

    # ResNet18 body
    repetitions = (2, 2, 2, 2)
    for stage, rep in enumerate(repetitions):
        for block in range(rep):
            filters = 64 * (2 ** stage)
            if block == 0 and stage == 0:
                x = residual_conv_block(filters, stage, block, strides=(1, 1), cut='post')(x)
            elif block == 0:
                x = residual_conv_block(filters, stage, block, strides=(2, 2), cut='post')(x)
            else:
                x = residual_conv_block(filters, stage, block, strides=(1, 1), cut='pre')(x)
    x = layers.BatchNormalization(epsilon=2e-5)(x)
    x = layers.Activation('relu')(x)

    # ResNet18 top
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(NUM_CLASSES)(x)
    x = layers.Activation('softmax')(x)

    # Create model
    model = models.Model(img_input, x)

    return model

IMG_SHAPE = IMG_SIZE + (3,)
model = ResNet18(input_shape=IMG_SHAPE)

# Pre-train Floating-Point Model
# Load the floating-point weights
model.load_weights(fp32_h5_path)

# Compile the floating-point model
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
              metrics=[tf.keras.metrics.CategoricalAccuracy(name='acc@1')])

# Validate the floating-point model
test_loss, acc_fp32 = model.evaluate(validation_dataset,
                                     callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))
print(f"\nAccuracy of FP32 model: {acc_fp32:.3f}")

model.save(fp32_sm_path)
print(f'Absolute path where the model is saved:\n {fp32_sm_path.resolve()}')

# Create and Initialize Quantization
nncf_config_dict = {
    "input_info": {"sample_size": [1, 3] + list(IMG_SIZE)},
    "log_dir": str(OUTPUT_DIR),  # log directory for NNCF-specific logging outputs
    "compression": {
        "algorithm": "quantization",  # specify the algorithm here
    },
}
nncf_config = NNCFConfig.from_dict(nncf_config_dict)

nncf_config = register_default_init_args(nncf_config=nncf_config,
                                         data_loader=train_dataset,
                                         batch_size=BATCH_SIZE)

compression_ctrl, model = create_compressed_model(model, nncf_config)

# Compile the int8 model
model.compile(optimizer=tf.keras.optimizers.Adam(lr=LR),
              loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1),
              metrics=[tf.keras.metrics.CategoricalAccuracy(name='acc@1')])

# Validate the int8 model
test_loss, test_acc = model.evaluate(validation_dataset,
                                     callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))
print(f"\nAccuracy of INT8 model after initialization: {test_acc:.3f}")

# Fine-tune the Compressed Model
# Train the int8 model
model.fit(train_dataset,
          epochs=2)

# Validate the int8 model
test_loss, acc_int8 = model.evaluate(validation_dataset,
                                     callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))
print(f"\nAccuracy of INT8 model after fine-tuning: {acc_int8:.3f}")
print(f"\nAccuracy drop of tuned INT8 model over pre-trained FP32 model: {acc_fp32 - acc_int8:.3f}")

compression_ctrl.export_model(int8_pb_path, 'frozen_graph')
print(f'Absolute path where the int8 model is saved:\n {int8_pb_path.resolve()}')

# Export Frozen Graph Models to OpenVINO Intermediate Representation (IR)
!mo --framework=tf --input_shape=[1,64,64,3] --input=data --saved_model_dir=$fp32_sm_path --output_dir=$OUTPUT_DIR

!mo --framework=tf --input_shape=[1,64,64,3] --input=Placeholder --input_model=$int8_pb_path --output_dir=$OUTPUT_DIR

# Benchmark Model Performance by Computing Inference Time
def parse_benchmark_output(benchmark_output):
    parsed_output = [line for line in benchmark_output if not (line.startswith(r"[") or line.startswith("  ") or line == "")]
    print(*parsed_output, sep='\n')


print('Benchmark FP32 model (IR)')
benchmark_output = ! benchmark_app -m $fp32_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

print('\nBenchmark INT8 model (IR)')
benchmark_output = ! benchmark_app -m $int8_ir_path -d CPU -api async -t 15
parse_benchmark_output(benchmark_output)

# Show CPU Information for reference
from openvino.runtime import Core

ie = Core()
ie.get_property(device_name='CPU', name="FULL_DEVICE_NAME")
# Imports
import sys
import os
import cv2
import numpy as np
import paddle
import math
import time
import collections
from PIL import Image
from pathlib import Path
import tarfile
import urllib.request

from openvino.runtime import Core
from IPython import display
import copy

sys.path.append("../utils")
import notebook_utils as utils
import pre_post_processing as processing

# Models for PaddleOCR
# Define the function to download text detection and recognition models from PaddleOCR resources

def run_model_download(model_url, model_file_path):
    """
    Download pre-trained models from PaddleOCR resources

    Parameters:
        model_url: url link to pre-trained models
        model_file_path: file path to store the downloaded model
    """
    model_name = model_url.split("/")[-1]
    
    if model_file_path.is_file(): 
        print("Model already exists")
    else:
        # Download the model from the server, and untar it.
        print("Downloading the pre-trained model... May take a while...")

        # create a directory
        os.makedirs("model", exist_ok=True)
        urllib.request.urlretrieve(model_url, f"model/{model_name} ")
        print("Model Downloaded")

        file = tarfile.open(f"model/{model_name} ")
        res = file.extractall("model")
        file.close()
        if not res:
            print(f"Model Extracted to {model_file_path}.")
        else:
            print("Error Extracting the model. Please check the network.")

# Download the Model for Text Detection
# Directory where model will be downloaded

det_model_url = "https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar"
det_model_file_path = Path("model/ch_ppocr_mobile_v2.0_det_infer/inference.pdmodel")

run_model_download(det_model_url, det_model_file_path)

# Load the Model for Text Detection
# initialize inference engine for text detection
core = Core()
det_model = core.read_model(model=det_model_file_path)
det_compiled_model = core.compile_model(model=det_model, device_name="CPU")

# get input and output nodes for text detection
det_input_layer = det_compiled_model.input(0)
det_output_layer = det_compiled_model.output(0)

# Download the Model for Text Recognition
rec_model_url = "https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar"
rec_model_file_path = Path("model/ch_ppocr_mobile_v2.0_rec_infer/inference.pdmodel")

run_model_download(rec_model_url, rec_model_file_path)

# Load the Model for Text Recognition with Dynamic Shape
# read the model and corresponding weights from file
rec_model = core.read_model(model=rec_model_file_path)

# assign dynamic shapes to every input layer on the last dimension
for input_layer in rec_model.inputs:
    input_shape = input_layer.partial_shape
    input_shape[3] = -1
    rec_model.reshape({input_layer: input_shape})

rec_compiled_model = core.compile_model(model=rec_model, device_name="CPU")

# get input and output nodes
rec_input_layer = rec_compiled_model.input(0)
rec_output_layer = rec_compiled_model.output(0)

# Preprocessing image functions for text detection and recognition
# Preprocess for text detection
def image_preprocess(input_image, size):
    """
    Preprocess input image for text detection

    Parameters:
        input_image: input image 
        size: value for the image to be resized for text detection model
    """
    img = cv2.resize(input_image, (size, size))
    img = np.transpose(img, [2, 0, 1]) / 255
    img = np.expand_dims(img, 0)
    # NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}
    img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    img -= img_mean
    img /= img_std
    return img.astype(np.float32)

# Preprocess for text recognition
def resize_norm_img(img, max_wh_ratio):
    """
    Resize input image for text recognition

    Parameters:
        img: bounding box image from text detection 
        max_wh_ratio: value for the resizing for text recognition model
    """
    rec_image_shape = [3, 32, 320]
    imgC, imgH, imgW = rec_image_shape
    assert imgC == img.shape[2]
    character_type = "ch"
    if character_type == "ch":
        imgW = int((32 * max_wh_ratio))
    h, w = img.shape[:2]
    ratio = w / float(h)
    if math.ceil(imgH * ratio) > imgW:
        resized_w = imgW
    else:
        resized_w = int(math.ceil(imgH * ratio))
    resized_image = cv2.resize(img, (resized_w, imgH))
    resized_image = resized_image.astype('float32')
    resized_image = resized_image.transpose((2, 0, 1)) / 255
    resized_image -= 0.5
    resized_image /= 0.5
    padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
    padding_im[:, :, 0:resized_w] = resized_image
    return padding_im


def prep_for_rec(dt_boxes, frame):
    """
    Preprocessing of the detected bounding boxes for text recognition

    Parameters:
        dt_boxes: detected bounding boxes from text detection 
        frame: original input frame 
    """
    ori_im = frame.copy()
    img_crop_list = [] 
    for bno in range(len(dt_boxes)):
        tmp_box = copy.deepcopy(dt_boxes[bno])
        img_crop = processing.get_rotate_crop_image(ori_im, tmp_box)
        img_crop_list.append(img_crop)
        
    img_num = len(img_crop_list)
    # Calculate the aspect ratio of all text bars
    width_list = []
    for img in img_crop_list:
        width_list.append(img.shape[1] / float(img.shape[0]))
    
    # Sorting can speed up the recognition process
    indices = np.argsort(np.array(width_list))
    return img_crop_list, img_num, indices


def batch_text_box(img_crop_list, img_num, indices, beg_img_no, batch_num):
    """
    Batch for text recognition

    Parameters:
        img_crop_list: processed detected bounding box images 
        img_num: number of bounding boxes from text detection
        indices: sorting for bounding boxes to speed up text recognition
        beg_img_no: the beginning number of bounding boxes for each batch of text recognition inference
        batch_num: number of images for each batch
    """
    norm_img_batch = []
    max_wh_ratio = 0
    end_img_no = min(img_num, beg_img_no + batch_num)
    for ino in range(beg_img_no, end_img_no):
        h, w = img_crop_list[indices[ino]].shape[0:2]
        wh_ratio = w * 1.0 / h
        max_wh_ratio = max(max_wh_ratio, wh_ratio)
    for ino in range(beg_img_no, end_img_no):
        norm_img = resize_norm_img(img_crop_list[indices[ino]], max_wh_ratio)
        norm_img = norm_img[np.newaxis, :]
        norm_img_batch.append(norm_img)

    norm_img_batch = np.concatenate(norm_img_batch)
    norm_img_batch = norm_img_batch.copy()
    return norm_img_batch

# Postprocessing image for text detection
def post_processing_detection(frame, det_results):
    """
    Postprocess the results from text detection into bounding boxes

    Parameters:
        frame: input image 
        det_results: inference results from text detection model
    """   
    ori_im = frame.copy()
    data = {'image': frame}
    data_resize = processing.DetResizeForTest(data)
    data_list = []
    keep_keys = ['image', 'shape']
    for key in keep_keys:
        data_list.append(data_resize[key])
    img, shape_list = data_list

    shape_list = np.expand_dims(shape_list, axis=0) 
    pred = det_results[0]    
    if isinstance(pred, paddle.Tensor):
        pred = pred.numpy()
    segmentation = pred > 0.3

    boxes_batch = []
    for batch_index in range(pred.shape[0]):
        src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
        mask = segmentation[batch_index]
        boxes, scores = processing.boxes_from_bitmap(pred[batch_index], mask, src_w, src_h)
        boxes_batch.append({'points': boxes})
    post_result = boxes_batch
    dt_boxes = post_result[0]['points']
    dt_boxes = processing.filter_tag_det_res(dt_boxes, ori_im.shape)    
    return dt_boxes

# Main processing function for PaddleOCR
def run_paddle_ocr(source=0, flip=False, use_popup=False, skip_first_frames=0):
    """
    Main function to run the paddleOCR inference:
    1. Create a video player to play with target fps (utils.VideoPlayer).
    2. Prepare a set of frames for text detection and recognition.
    3. Run AI inference for both text detection and recognition.
    4. Visualize the results.

    Parameters:
        source: the webcam number to feed the video stream with primary webcam set to "0", or the video path.  
        flip: to be used by VideoPlayer function for flipping capture image
        use_popup: False for showing encoded frames over this notebook, True for creating a popup window.
        skip_first_frames: Number of frames to skip at the beginning of the video. 
    """
    # create video player to play with target fps
    player = None
    try:
        player = utils.VideoPlayer(source=source, flip=flip, fps=30, skip_first_frames=skip_first_frames)
        # Start video capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(winname=title, flags=cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)

        processing_times = collections.deque()
        while True:
            # grab the frame
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
            # if frame larger than full HD, reduce size to improve the performance
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(src=frame, dsize=None, fx=scale, fy=scale,
                                   interpolation=cv2.INTER_AREA)
            # preprocess image for text detection
            test_image = image_preprocess(frame, 640)
                
            # measure processing time for text detection
            start_time = time.time()
            # perform the inference step
            det_results = det_compiled_model([test_image])[det_output_layer]
            stop_time = time.time()

            # Postprocessing for Paddle Detection
            dt_boxes = post_processing_detection(frame, det_results)

            processing_times.append(stop_time - start_time)
            # use processing times from last 200 frames
            if len(processing_times) > 200:
                processing_times.popleft()
            processing_time_det = np.mean(processing_times) * 1000

            # Preprocess detection results for recognition
            dt_boxes = processing.sorted_boxes(dt_boxes)  
            batch_num = 6
            img_crop_list, img_num, indices = prep_for_rec(dt_boxes, frame)
            
            # For storing recognition results, include two parts:
            # txts are the recognized text results, scores are the recognition confidence level 
            rec_res = [['', 0.0]] * img_num
            txts = [] 
            scores = []

            for beg_img_no in range(0, img_num, batch_num):

                # Recognition starts from here
                norm_img_batch = batch_text_box(
                    img_crop_list, img_num, indices, beg_img_no, batch_num)

                # Run inference for text recognition 
                rec_results = rec_compiled_model([norm_img_batch])[rec_output_layer]

                # Postprocessing recognition results
                postprocess_op = processing.build_post_process(processing.postprocess_params)
                rec_result = postprocess_op(rec_results)
                for rno in range(len(rec_result)):
                    rec_res[indices[beg_img_no + rno]] = rec_result[rno]   
                if rec_res:
                    txts = [rec_res[i][0] for i in range(len(rec_res))] 
                    scores = [rec_res[i][1] for i in range(len(rec_res))]
                                   
            image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            boxes = dt_boxes
            # draw text recognition results beside the image
            draw_img = processing.draw_ocr_box_txt(
                image,
                boxes,
                txts,
                scores,
                drop_score=0.5)

            # Visualize PaddleOCR results
            f_height, f_width = draw_img.shape[:2]
            fps = 1000 / processing_time_det
            cv2.putText(img=draw_img, text=f"Inference time: {processing_time_det:.1f}ms ({fps:.1f} FPS)", 
                        org=(20, 40),fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=f_width / 1000,
                        color=(0, 0, 255), thickness=1, lineType=cv2.LINE_AA)
            
            # use this workaround if there is flickering
            if use_popup: 
                draw_img = cv2.cvtColor(draw_img, cv2.COLOR_RGB2BGR)
                cv2.imshow(winname=title, mat=draw_img)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # encode numpy array to jpg
                draw_img = cv2.cvtColor(draw_img, cv2.COLOR_RGB2BGR)
                _, encoded_img = cv2.imencode(ext=".jpg", img=draw_img,
                                              params=[cv2.IMWRITE_JPEG_QUALITY, 100])
                # create IPython image
                i = display.Image(data=encoded_img)
                # display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)
            
    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Live PaddleOCR with OpenVINO
run_paddle_ocr(source=0, flip=False, use_popup=False)

# Test OCR results on video file

video_file = "https://raw.githubusercontent.com/yoyowz/classification/master/images/test.mp4"
run_paddle_ocr(source=video_file, flip=False, use_popup=False, skip_first_frames=0)
# pip3 install openvino
 
# Install ONNX Runtime for OpenVINO™ Execution Provider
# pip3 install onnxruntime-openvino==1.11.0
 
# pip3 install -r requirements.txt
# Running the ONNXRuntime OpenVINO™ Execution Provider sample
# python3 yolov4.py --device CPU_FP32 --video classroom.mp4 --model yolov4.onnx

'''
Copyright (C) 2021-2022, Intel Corporation
SPDX-License-Identifier: Apache-2.0
Major Portions of this code are copyright of their respective authors and released under the Apache License Version 2.0:
- onnx, Copyright 2021-2022. For licensing see https://github.com/onnx/models/blob/master/LICENSE
'''

import cv2
import numpy as np
from onnx import numpy_helper
import onnx
import onnxruntime as rt
import os
from PIL import Image
from scipy import special
import colorsys
import random
import argparse
import sys
import time
import platform

if platform.system() == "Windows":
    from openvino import utils
    utils.add_openvino_libs_to_path()

def image_preprocess(image, target_size, gt_boxes=None):

    ih, iw = target_size
    h, w, _ = image.shape

    scale = min(iw/w, ih/h)
    nw, nh = int(scale * w), int(scale * h)
    image_resized = cv2.resize(image, (nw, nh))

    image_padded = np.full(shape=[ih, iw, 3], fill_value=128.0)
    dw, dh = (iw - nw) // 2, (ih-nh) // 2
    image_padded[dh:nh+dh, dw:nw+dw, :] = image_resized
    image_padded = image_padded / 255.

    if gt_boxes is None:
        return image_padded

    else:
        gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
        gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
        return image_padded, gt_boxes

def postprocess_bbbox(pred_bbox):
    '''define anchor boxes'''
    for i, pred in enumerate(pred_bbox):
        conv_shape = pred.shape
        output_size = conv_shape[1]
        conv_raw_dxdy = pred[:, :, :, :, 0:2]
        conv_raw_dwdh = pred[:, :, :, :, 2:4]
        xy_grid = np.meshgrid(np.arange(output_size), np.arange(output_size))
        xy_grid = np.expand_dims(np.stack(xy_grid, axis=-1), axis=2)

        xy_grid = np.tile(np.expand_dims(xy_grid, axis=0), [1, 1, 1, 3, 1])
        xy_grid = xy_grid.astype(float)

        pred_xy = ((special.expit(conv_raw_dxdy) * XYSCALE[i]) - 0.5 * (XYSCALE[i] - 1) + xy_grid) * STRIDES[i]
        pred_wh = (np.exp(conv_raw_dwdh) * ANCHORS[i])
        pred[:, :, :, :, 0:4] = np.concatenate([pred_xy, pred_wh], axis=-1)

    pred_bbox = [np.reshape(x, (-1, np.shape(x)[-1])) for x in pred_bbox]
    pred_bbox = np.concatenate(pred_bbox, axis=0)
    return pred_bbox

def postprocess_boxes(pred_bbox, org_img_shape, input_size, score_threshold):
    '''remove boundary boxs with a low detection probability'''
    valid_scale=[0, np.inf]
    pred_bbox = np.array(pred_bbox)

    pred_xywh = pred_bbox[:, 0:4]
    pred_conf = pred_bbox[:, 4]
    pred_prob = pred_bbox[:, 5:]

    # # (1) (x, y, w, h) --> (xmin, ymin, xmax, ymax)
    pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
                                pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
    # # (2) (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
    org_h, org_w = org_img_shape
    resize_ratio = min(input_size / org_w, input_size / org_h)

    dw = (input_size - resize_ratio * org_w) / 2
    dh = (input_size - resize_ratio * org_h) / 2

    pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
    pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio

    # # (3) clip some boxes that are out of range
    pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
                                np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
    invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
    pred_coor[invalid_mask] = 0

    # # (4) discard some invalid boxes
    bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
    scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))

    # # (5) discard some boxes with low scores
    classes = np.argmax(pred_prob, axis=-1)
    scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
    score_mask = scores > score_threshold
    mask = np.logical_and(scale_mask, score_mask)
    coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]

    return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)

def bboxes_iou(boxes1, boxes2):
    '''calculate the Intersection Over Union value'''
    boxes1 = np.array(boxes1)
    boxes2 = np.array(boxes2)

    boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
    boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])

    left_up       = np.maximum(boxes1[..., :2], boxes2[..., :2])
    right_down    = np.minimum(boxes1[..., 2:], boxes2[..., 2:])

    inter_section = np.maximum(right_down - left_up, 0.0)
    inter_area    = inter_section[..., 0] * inter_section[..., 1]
    union_area    = boxes1_area + boxes2_area - inter_area
    ious          = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)

    return ious

def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
    """
    :param bboxes: (xmin, ymin, xmax, ymax, score, class)
    Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
          https://github.com/bharatsingh430/soft-nms
    """
    classes_in_img = list(set(bboxes[:, 5]))
    best_bboxes = []

    for cls in classes_in_img:
        cls_mask = (bboxes[:, 5] == cls)
        cls_bboxes = bboxes[cls_mask]

        while len(cls_bboxes) > 0:
            max_ind = np.argmax(cls_bboxes[:, 4])
            best_bbox = cls_bboxes[max_ind]
            best_bboxes.append(best_bbox)
            cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
            iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
            weight = np.ones((len(iou),), dtype=np.float32)

            assert method in ['nms', 'soft-nms']

            if method == 'nms':
                iou_mask = iou > iou_threshold
                weight[iou_mask] = 0.0

            if method == 'soft-nms':
                weight = np.exp(-(1.0 * iou ** 2 / sigma))

            cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
            score_mask = cls_bboxes[:, 4] > 0.
            cls_bboxes = cls_bboxes[score_mask]

    return best_bboxes

def read_class_names(class_file_name):
    '''loads class name from a file'''
    names = {}
    with open(class_file_name, 'r') as data:
        for ID, name in enumerate(data):
            names[ID] = name.strip('\n')
    return names

def draw_bbox(image, bboxes, classes=read_class_names("coco.names"), show_label=True):
    """
    bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
    """

    num_classes = len(classes)
    image_h, image_w, _ = image.shape
    hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
    colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
    colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))

    random.seed(0)
    random.shuffle(colors)
    random.seed(None)

    for i, bbox in enumerate(bboxes):
        coor = np.array(bbox[:4], dtype=np.int32)
        fontScale = 0.5
        score = bbox[4]
        class_ind = int(bbox[5])
        bbox_color = colors[class_ind]
        bbox_thick = int(0.6 * (image_h + image_w) / 600)
        c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])
        cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)

        if show_label:
            bbox_mess = '%s: %.2f' % (classes[class_ind], score)
            t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick//2)[0]
            cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1)
            cv2.putText(image, bbox_mess, (c1[0], c1[1]-2), cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale, (0, 0, 0), bbox_thick//2, lineType=cv2.LINE_AA)

    return image

def get_anchors(anchors_path, tiny=False):
    '''loads the anchors from a file'''
    with open(anchors_path) as f:
        anchors = f.readline()
    anchors = np.array(anchors.split(','), dtype=np.float32)
    return anchors.reshape(3, 3, 2)

#Specify the path to anchors file on your machine
ANCHORS = "./yolov4_anchors.txt"    
STRIDES = [8, 16, 32]
XYSCALE = [1.2, 1.1, 1.05]
ANCHORS = get_anchors(ANCHORS)
STRIDES = np.array(STRIDES)

def parse_arguments():
    parser = argparse.ArgumentParser(description='Object Detection using YOLOv4 in OPENCV using OpenVINO Execution Provider for ONNXRuntime')
    parser.add_argument('--device', default='CPU_FP32', help="Device to perform inference on 'cpu (MLAS)' or on devices supported by OpenVINO-EP [CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VAD-M_FP16].")
    parser.add_argument('--image', help='Path to image file.')
    parser.add_argument('--video', help='Path to video file.')
    parser.add_argument('--model', help='Path to model.')
    args = parser.parse_args()
    return args
    
def check_model_extension(fp):
  # Split the extension from the path and normalise it to lowercase.
  ext = os.path.splitext(fp)[-1].lower()

  # Now we can simply use != to check for inequality, no need for wildcards.
  if(ext != ".onnx"):
    raise Exception(fp, "is an unknown file format. Use the model ending with .onnx format")
  
  if not os.path.exists(fp):
    raise Exception("[ ERROR ] Path of the onnx model file is Invalid")

def main(): 

    # Process arguments
    args = parse_arguments()

    # Validate model file path
    check_model_extension(args.model)

    # Process inputs
    win_name = 'Object detection using ONNXRuntime OpenVINO Execution Provider using YoloV4 model'
    cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)

    output_file = "yolo_out_py.avi"
    if (args.image):
        # Open the image file
        if not os.path.isfile(args.image):
            print("Input image file ", args.image, " doesn't exist")
            sys.exit(1)
        cap = cv2.VideoCapture(args.image)
        output_file = args.image[:-4]+'_yolo_out_py.jpg'
    elif (args.video):
        # Open the video file
        if not os.path.isfile(args.video):
            print("Input video file ", args.video, " doesn't exist")
            sys.exit(1)
        cap = cv2.VideoCapture(args.video)
        output_file = args.video[:-4]+'_yolo_out_py.avi'
    else:
        # Webcam input
        cap = cv2.VideoCapture(0)

    # Get the video writer initialized to save the output video
    if (not args.image):
        vid_writer = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc('M','J','P','G'), 30, (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
    
    # Check the device information and create a session
    device = args.device
    so = rt.SessionOptions()
    so.log_severity_level = 3
    if(args.device == 'cpu'):
        print("Device type selected is 'cpu' which is the default CPU Execution Provider (MLAS)")
        #Specify the path to the ONNX model on your machine and register the CPU EP
        sess = rt.InferenceSession(args.model, so, providers=['CPUExecutionProvider'])
    else:
        #Specify the path to the ONNX model on your machine and register the OpenVINO EP
        sess = rt.InferenceSession(args.model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : device}])
        print("Device type selected is: " + device + " using the OpenVINO Execution Provider")
        '''
        other 'device_type' options are: (Any hardware target can be assigned if you have the access to it)
        'CPU_FP32', 'GPU_FP32', 'GPU_FP16', 'MYRIAD_FP16', 'VAD-M_FP16'
        '''

    input_name = sess.get_inputs()[0].name
    
    while cv2.waitKey(1) < 0:
        # get frame from the video
        has_frame, frame = cap.read()
        # Stop the program if reached end of video
        if not has_frame:
            print("Done processing !!!")
            print("Output file is stored as ", output_file)
            has_frame=False
            cv2.waitKey(3000)
            # Release device
            cap.release()
            break
            
        input_size = 416
        original_image = frame
        original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
        original_image_size = original_image.shape[:2]

        image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
        image_data = image_data[np.newaxis, ...].astype(np.float32)
    
        outputs = sess.get_outputs()
        output_names = list(map(lambda output: output.name, outputs))

        start = time.time()
        detections = sess.run(output_names, {input_name: image_data})
        end = time.time()
        inference_time = end - start

        pred_bbox = postprocess_bbbox(detections)
        bboxes = postprocess_boxes(pred_bbox, original_image_size, input_size, 0.25)
        bboxes = nms(bboxes, 0.213, method='nms')
        image = draw_bbox(original_image, bboxes)

        cv2.putText(image,device,(10,20),cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255),1)
        cv2.putText(image,'FPS: {}'.format(1.0/inference_time),(10,40),cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255),1)

        # Write the frame with the detection boxes
        if (args.image):
            cv2.imwrite(output_file, image.astype(np.uint8))
        else:
            vid_writer.write(image.astype(np.uint8))
 
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        cv2.imshow(win_name, image)

if __name__ == "__main__":
  main()
# pip3 install openvino

# Install ONNX Runtime for OpenVINO™ Execution Provider
# pip3 install onnxruntime-openvino==1.11.0

# pip3 install -r requirements.txt
# How to run the sample
# python3 tiny_yolov2_obj_detection_sample.py --h

# Running the ONNXRuntime OpenVINO™ Execution Provider sample
# python3 tiny_yolov2_obj_detection_sample.py --video face-demographics-walking-and-pause.mp4 --model tinyyolov2.onnx --device CPU_FP32

'''
Copyright (C) 2021-2022, Intel Corporation
SPDX-License-Identifier: Apache-2.0
'''

import numpy as np
import onnxruntime as rt
import cv2
import time
import os
import argparse
import platform

if platform.system() == "Windows":
    from openvino import utils
    utils.add_openvino_libs_to_path()

# color look up table for different classes for object detection sample
clut = [(0,0,0),(255,0,0),(255,0,255),(0,0,255),(0,255,0),(0,255,128),
        (128,255,0),(128,128,0),(0,128,255),(128,0,128),
        (255,0,128),(128,0,255),(255,128,128),(128,255,128),(255,255,0),
        (255,128,128),(128,128,255),(255,128,128),(128,255,128),(128,255,128)]

# 20 labels that the tiny-yolov2 model can do the object_detection on
label = ["aeroplane","bicycle","bird","boat","bottle",
         "bus","car","cat","chair","cow","diningtable",
         "dog","horse","motorbike","person","pottedplant",
          "sheep","sofa","train","tvmonitor"]

def parse_arguments():
  parser = argparse.ArgumentParser(description='Object Detection using YOLOv2 in OPENCV using OpenVINO Execution Provider for ONNXRuntime')
  parser.add_argument('--device', default='CPU_FP32', help="Device to perform inference on 'cpu (MLAS)' or on devices supported by OpenVINO-EP [CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VAD-M_FP16].")
  parser.add_argument('--video', help='Path to video file.')
  parser.add_argument('--model', help='Path to model.')
  args = parser.parse_args()
  return args

def sigmoid(x, derivative=False):
  return x*(1-x) if derivative else 1/(1+np.exp(-x))

def softmax(x):
  score_mat_exp = np.exp(np.asarray(x))
  return score_mat_exp / score_mat_exp.sum(0)

def check_model_extension(fp):
  # Split the extension from the path and normalise it to lowercase.
  ext = os.path.splitext(fp)[-1].lower()

  # Now we can simply use != to check for inequality, no need for wildcards.
  if(ext != ".onnx"):
    raise Exception(fp, "is an unknown file format. Use the model ending with .onnx format")
  
  if not os.path.exists(fp):
    raise Exception("[ ERROR ] Path of the onnx model file is Invalid")

def check_video_file_extension(fp):
  # Split the extension from the path and normalise it to lowercase.
  ext = os.path.splitext(fp)[-1].lower()
  # Now we can simply use != to check for inequality, no need for wildcards.
  
  if(ext == ".mp4" or ext == ".avi" or ext == ".mov"):
    pass
  else:
    raise Exception(fp, "is an unknown file format. Use the video file ending with .mp4 or .avi or .mov formats")
  
  if not os.path.exists(fp):
    raise Exception("[ ERROR ] Path of the video file is Invalid")

def image_preprocess(frame):
  in_frame = cv2.resize(frame, (416, 416))
  preprocessed_image = np.asarray(in_frame)
  preprocessed_image = preprocessed_image.astype(np.float32)
  preprocessed_image = preprocessed_image.transpose(2,0,1)
  #Reshaping the input array to align with the input shape of the model
  preprocessed_image = preprocessed_image.reshape(1,3,416,416)
  return preprocessed_image

def postprocess_output(out, frame, x_scale, y_scale, i):
  out = out[0][0]
  num_classes = 20
  anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]
  existing_labels = {l: [] for l in label}

  #Inside this loop we compute the bounding box b for grid cell (cy, cx)
  for cy in range(0,13):
    for cx in range(0,13):
      for b in range(0,5):
      # First we read the tx, ty, width(tw), and height(th) for the bounding box from the out array, as well as the confidence score
        channel = b*(num_classes+5)
        tx = out[channel  ][cy][cx]
        ty = out[channel+1][cy][cx]
        tw = out[channel+2][cy][cx]
        th = out[channel+3][cy][cx]
        tc = out[channel+4][cy][cx]

        x = (float(cx) + sigmoid(tx))*32
        y = (float(cy) + sigmoid(ty))*32
        w = np.exp(tw) * 32 * anchors[2*b]
        h = np.exp(th) * 32 * anchors[2*b+1] 

        #calculating the confidence score
        confidence = sigmoid(tc) # The confidence value for the bounding box is given by tc
        classes = np.zeros(num_classes)
        for c in range(0,num_classes):
          classes[c] = out[channel + 5 +c][cy][cx]
          # we take the softmax to turn the array into a probability distribution. And then we pick the class with the largest score as the winner.
          classes = softmax(classes)
          detected_class = classes.argmax()
          # Now we can compute the final score for this bounding box and we only want to keep the ones whose combined score is over a certain threshold
          if 0.60 < classes[detected_class]*confidence:
            color =clut[detected_class]
            x = (x - w/2)*x_scale
            y = (y - h/2)*y_scale
            w *= x_scale
            h *= y_scale
               
            labelX = int((x+x+w)/2)
            labelY = int((y+y+h)/2)
            addLabel = True
            lab_threshold = 100
            for point in existing_labels[label[detected_class]]:
              if labelX < point[0] + lab_threshold and labelX > point[0] - lab_threshold and \
                 labelY < point[1] + lab_threshold and labelY > point[1] - lab_threshold:
                  addLabel = False
              #Adding class labels to the output of the frame and also drawing a rectangular bounding box around the object detected.
            if addLabel:
              cv2.rectangle(frame, (int(x),int(y)),(int(x+w),int(y+h)),color,2)
              cv2.rectangle(frame, (int(x),int(y-13)),(int(x)+9*len(label[detected_class]),int(y)),color,-1)
              cv2.putText(frame,label[detected_class],(int(x)+2,int(y)-3),cv2.FONT_HERSHEY_COMPLEX,0.4,(255,255,255),1)
              existing_labels[label[detected_class]].append((labelX,labelY))
            print('{} detected in frame {}'.format(label[detected_class],i))
  

def show_bbox(device, frame, inference_time):
  cv2.putText(frame,device,(10,20),cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255),1)
  cv2.putText(frame,'FPS: {}'.format(1.0/inference_time),(10,40),cv2.FONT_HERSHEY_COMPLEX,0.5,(255,255,255),1)
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  cv2.imshow('frame',frame)

def main():
  
  # Process arguments
  args = parse_arguments()

  # Validate model file path
  check_model_extension(args.model)
  so = rt.SessionOptions()
  so.log_severity_level = 3
  if (args.device == 'cpu'):
    print("Device type selected is 'cpu' which is the default CPU Execution Provider (MLAS)")
    #Specify the path to the ONNX model on your machine and register the CPU EP
    sess = rt.InferenceSession(args.model, so, providers=['CPUExecutionProvider'])
  elif (args.device == 'CPU_FP32' or args.device == 'GPU_FP32' or args.device == 'GPU_FP16' or args.device == 'MYRIAD_FP16' or args.device == 'VADM_FP16'):
    #Specify the path to the ONNX model on your machine and register the OpenVINO EP
    sess = rt.InferenceSession(args.model, so, providers=['OpenVINOExecutionProvider'], provider_options=[{'device_type' : args.device}])
    print("Device type selected is: " + args.device + " using the OpenVINO Execution Provider")
    '''
    other 'device_type' options are: (Any hardware target can be assigned if you have the access to it)
    'CPU_FP32', 'GPU_FP32', 'GPU_FP16', 'MYRIAD_FP16', 'VAD-M_FP16'
    '''
  else:
    raise Exception("Device type selected is not [cpu, CPU_FP32, GPU_FP32, GPU_FP16, MYRIAD_FP16, VADM_FP16]")

  # Get the input name of the model
  input_name = sess.get_inputs()[0].name

  #validate video file input path
  check_video_file_extension(args.video)

  #Path to video file has to be provided
  cap = cv2.VideoCapture(args.video)

  # capturing different metrics of the image from the video
  fps = cap.get(cv2.CAP_PROP_FPS)
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  x_scale = float(width)/416.0  #In the document of tino-yolo-v2, input shape of this network is (1,3,416,416).
  y_scale = float(height)/416.0      
 
  # writing the inferencing output as a video to the local disk
  fourcc = cv2.VideoWriter_fourcc(*'XVID')
  output_video_name = args.device + "_output.avi"
  output_video = cv2.VideoWriter(output_video_name,fourcc, float(17.0), (640,360))

  # capturing one frame at a time from the video feed and performing the inference
  i = 0
  while cv2.waitKey(1) < 0:
    l_start = time.time()
    ret, frame = cap.read()
    if not ret:
      break
    initial_w = cap.get(3)
    initial_h = cap.get(4)
        
    # preprocessing the input frame and reshaping it.
    #In the document of tino-yolo-v2, input shape of this network is (1,3,416,416). so we resize the model frame w.r.t that size.
    preprocessed_image =  image_preprocess(frame)

    start = time.time()
    #Running the session by passing in the input data of the model
    out = sess.run(None, {input_name: preprocessed_image})
    end = time.time()
    inference_time = end - start

    #Get the output
    postprocess_output(out, frame, x_scale, y_scale, i)
   
    #Show the Output
    output_video.write(frame)
    show_bbox(args.device, frame, inference_time)
        
    #Press 'q' to quit the process
    print('Processed Frame {}'.format(i))
    i += 1
    l_end = time.time()
    print('Loop Time = {}'.format(l_end - l_start))

  output_video.release()
  cv2.destroyAllWindows()

if __name__ == "__main__":
  main()
# Imports
from collections import namedtuple
from itertools import groupby
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from openvino.runtime import Core

# Settings
# Directories where data will be placed
model_folder = "model"
data_folder = "data"
charlist_folder = f"{data_folder}/charlists"

# Precision used by model
precision = "FP16"

Language = namedtuple(
    typename="Language", field_names=["model_name", "charlist_name", "demo_image_name"]
)
chinese_files = Language(
    model_name="handwritten-simplified-chinese-recognition-0001",
    charlist_name="chinese_charlist.txt",
    demo_image_name="handwritten_chinese_test.jpg",
)
japanese_files = Language(
    model_name="handwritten-japanese-recognition-0001",
    charlist_name="japanese_charlist.txt",
    demo_image_name="handwritten_japanese_test.png",
)

# Select Language
# Select language by using either language='chinese' or language='japanese'
language = "chinese"

languages = {"chinese": chinese_files, "japanese": japanese_files}

selected_language = languages.get(language)

# Download Model
path_to_model_weights = Path(f'{model_folder}/intel/{selected_language.model_name}/{precision}/{selected_language.model_name}.bin')
if not path_to_model_weights.is_file():
    download_command = f'omz_downloader --name {selected_language.model_name} --output_dir {model_folder} --precision {precision}'
    print(download_command)
    ! $download_command

# Load Network and Execute
ie = Core()
path_to_model = path_to_model_weights.with_suffix(".xml")
model = ie.read_model(model=path_to_model)

# Select Device Name
# To check available device names run the line below
# print(ie.available_devices)

compiled_model = ie.compile_model(model=model, device_name="CPU")

# Fetch Information About Input and Output Layers
recognition_output_layer = compiled_model.output(0)
recognition_input_layer = compiled_model.input(0)

# Load an Image
# Read file name of demo file based on the selected model

file_name = selected_language.demo_image_name

# Text detection models expects an image in grayscale format
# IMPORTANT!!! This model allows to read only one line at time

# Read image
image = cv2.imread(filename=f"{data_folder}/{file_name}", flags=cv2.IMREAD_GRAYSCALE)

# Fetch shape
image_height, _ = image.shape

# B,C,H,W = batch size, number of channels, height, width
_, _, H, W = recognition_input_layer.shape

# Calculate scale ratio between input shape height and image height to resize image
scale_ratio = H / image_height

# Resize image to expected input sizes
resized_image = cv2.resize(
    image, None, fx=scale_ratio, fy=scale_ratio, interpolation=cv2.INTER_AREA
)

# Pad image to match input size, without changing aspect ratio
resized_image = np.pad(
    resized_image, ((0, 0), (0, W - resized_image.shape[1])), mode="edge"
)

# Reshape to network the input shape
input_image = resized_image[None, None, :, :]

# Visualise Input Image
plt.figure(figsize=(20, 1))
plt.axis("off")
plt.imshow(resized_image, cmap="gray", vmin=0, vmax=255);

# Prepare Charlist
# Get dictionary to encode output, based on model documentation
used_charlist = selected_language.charlist_name

# With both models, there should be blank symbol added at index 0 of each charlist
blank_char = "~"

with open(f"{charlist_folder}/{used_charlist}", "r", encoding="utf-8") as charlist:
    letters = blank_char + "".join(line.strip() for line in charlist)

# Run Inference
# Run inference on the model
predictions = compiled_model([input_image])[recognition_output_layer]

# Process Output Data
# Remove batch dimension
predictions = np.squeeze(predictions)

# Run argmax to pick the symbols with the highest probability
predictions_indexes = np.argmax(predictions, axis=1)

# Use groupby to remove concurrent letters, as required by CTC greedy decoding
output_text_indexes = list(groupby(predictions_indexes))

# Remove grouper objects
output_text_indexes, _ = np.transpose(output_text_indexes, (1, 0))

# Remove blank symbols
output_text_indexes = output_text_indexes[output_text_indexes != 0]

# Assign letters to indexes from output array
output_text = [letters[letter_index] for letter_index in output_text_indexes]

# Print Output
plt.figure(figsize=(20, 1))
plt.axis("off")
plt.imshow(resized_image, cmap="gray", vmin=0, vmax=255)

print("".join(output_text))
# Imports
import shutil
import sys
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Markdown, display
from PIL import Image
from openvino.runtime import Core
from yaspin import yaspin

sys.path.append("../utils")
from notebook_utils import load_image

# Settings
ie = Core()

model_dir = Path("model")
precision = "FP16"
detection_model = "horizontal-text-detection-0001"
recognition_model = "text-recognition-resnet-fc"
base_model_dir = Path("~/open_model_zoo_models").expanduser()
omz_cache_dir = Path("~/open_model_zoo_cache").expanduser()

model_dir.mkdir(exist_ok=True)

# Download Models
download_command = f"omz_downloader --name {detection_model},{recognition_model} --output_dir {base_model_dir} --cache_dir {omz_cache_dir} --precision {precision}"
display(Markdown(f"Download command: `{download_command}`"))
with yaspin(text=f"Downloading {detection_model}, {recognition_model}") as sp:
    download_result = !$download_command
    print(download_result)
    sp.text = f"Finished downloading {detection_model}, {recognition_model}"
    sp.ok("✔")

# Convert Models
convert_command = f"omz_converter --name {recognition_model} --precisions {precision} --download_dir {base_model_dir} --output_dir {base_model_dir}"
display(Markdown(f"Convert command: `{convert_command}`"))
display(Markdown(f"Converting {recognition_model}..."))
! $convert_command

# Copy Models
models_info_output = %sx omz_info_dumper --name $detection_model,$recognition_model
print(f'sx omz_info_dumper --name {detection_model},{recognition_model}')
detection_model_info, recognition_model_info = [
    {
        "name": "horizontal-text-detection-0001",
        "composite_model_name": None,
        "description": "Horizontal text detector based on FCOS with light MobileNetV2 backbone",
        "framework": "dldt",
        "license_url": "https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE",
        "precisions": [
            "FP16",
            "FP16-INT8",
            "FP32"
        ],
        "quantization_output_precisions": [],
        "subdirectory": "intel/horizontal-text-detection-0001",
        "task_type": "detection"
    },
    {
        "name": "text-recognition-resnet-fc",
        "composite_model_name": None,
        "description": "\"text-recognition-resnet-fc\" is a simple and preformant scene text recognition model based on ResNet with Fully Connected text recognition head. Source implementation on a PyTorch* framework could be found here <https://github.com/Media-Smart/vedastr>. Model is able to recognize alphanumeric text.",
        "framework": "pytorch",
        "license_url": "https://raw.githubusercontent.com/Media-Smart/vedastr/0fd2a0bd7819ae4daa2a161501e9f1c2ac67e96a/LICENSE",
        "precisions": [
            "FP16",
            "FP32"
        ],
        "quantization_output_precisions": [],
        "subdirectory": "public/text-recognition-resnet-fc",
        "task_type": "optical_character_recognition"
    }
]

for model_info in (detection_model_info, recognition_model_info):
    omz_dir = Path(model_info["subdirectory"])
    omz_model_dir = base_model_dir / omz_dir / precision
    print(omz_model_dir) 
    for model_file in omz_model_dir.iterdir():
        try:
            shutil.copyfile(model_file, model_dir / model_file.name)
        except FileExistsError:
            pass

detection_model_path = (model_dir / detection_model).with_suffix(".xml")
recognition_model_path = (model_dir / recognition_model).with_suffix(".xml")

# Load Detection Model
detection_model = ie.read_model(
    model=detection_model_path, weights=detection_model_path.with_suffix(".bin")
)
detection_compiled_model = ie.compile_model(model=detection_model, device_name="CPU")

detection_input_layer = detection_compiled_model.input(0)

# Load an Image
# image_file can point to a URL or local image
image_file = "https://github.com/openvinotoolkit/openvino_notebooks/raw/main/notebooks/004-hello-detection/data/intel_rnb.jpg"

image = load_image(image_file)

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = detection_input_layer.shape

# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))

# Reshape to network input shape
input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0)

plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));

# Do Inference
output_key = detection_compiled_model.output("boxes")
boxes = detection_compiled_model([input_image])[output_key]

# Remove zero only boxes
boxes = boxes[~np.all(boxes == 0, axis=1)]

# Get Detection Results
def multiply_by_ratio(ratio_x, ratio_y, box):
    return [
        max(shape * ratio_y, 10) if idx % 2 else shape * ratio_x
        for idx, shape in enumerate(box[:-1])
    ]


def run_preprocesing_on_crop(crop, net_shape):
    temp_img = cv2.resize(crop, net_shape)
    temp_img = temp_img.reshape((1,) * 2 + temp_img.shape)
    return temp_img


def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True):
    # Define colors for boxes and descriptions
    colors = {"red": (255, 0, 0), "green": (0, 255, 0), "white": (255, 255, 255)}

    # Fetch image shapes to calculate ratio
    (real_y, real_x), (resized_y, resized_x) = image.shape[:2], resized_image.shape[:2]
    ratio_x, ratio_y = real_x / resized_x, real_y / resized_y

    # Convert base image from bgr to rgb format
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    # Iterate through non-zero boxes
    for box, annotation in boxes:
        # Pick confidence factor from last place in array
        conf = box[-1]
        if conf > threshold:
            # Convert float to int and multiply position of each box by x and y ratio
            (x_min, y_min, x_max, y_max) = map(int, multiply_by_ratio(ratio_x, ratio_y, box))

            # Draw box based on position, parameters in rectangle function are: image, start_point, end_point, color, thickness
            cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3)

            # Add text to image based on position and confidence, parameters in putText function are: image, text, bottomleft_corner_textfield, font, font_scale, color, thickness, line_type
            if conf_labels:
                # Create background box based on annotation length
                (text_w, text_h), _ = cv2.getTextSize(
                    f"{annotation}", cv2.FONT_HERSHEY_TRIPLEX, 0.8, 1
                )
                image_copy = rgb_image.copy()
                cv2.rectangle(
                    image_copy,
                    (x_min, y_min - text_h - 10),
                    (x_min + text_w, y_min - 10),
                    colors["white"],
                    -1,
                )
                # Add weighted image copy with white boxes under text
                cv2.addWeighted(image_copy, 0.4, rgb_image, 0.6, 0, rgb_image)
                cv2.putText(
                    rgb_image,
                    f"{annotation}",
                    (x_min, y_min - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.8,
                    colors["red"],
                    1,
                    cv2.LINE_AA,
                )

    return rgb_image

# Load Text Recognition Model
recognition_model = ie.read_model(
    model=recognition_model_path, weights=recognition_model_path.with_suffix(".bin")
)

recognition_compiled_model = ie.compile_model(model=recognition_model, device_name="CPU")

recognition_output_layer = recognition_compiled_model.output(0)
recognition_input_layer = recognition_compiled_model.input(0)

# Get height and width of input layer
_, _, H, W = recognition_input_layer.shape

# Do Inference
# Calculate scale for image resizing
(real_y, real_x), (resized_y, resized_x) = image.shape[:2], resized_image.shape[:2]
ratio_x, ratio_y = real_x / resized_x, real_y / resized_y

# Convert image to grayscale for text recognition model
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Get dictionary to encode output, based on model documentation
letters = "~0123456789abcdefghijklmnopqrstuvwxyz"

# Prepare empty list for annotations
annotations = list()
cropped_images = list()
# fig, ax = plt.subplots(len(boxes), 1, figsize=(5,15), sharex=True, sharey=True)
# For each crop, based on boxes given by detection model we want to get annotations
for i, crop in enumerate(boxes):
    # Get coordinates on corners of crop
    (x_min, y_min, x_max, y_max) = map(int, multiply_by_ratio(ratio_x, ratio_y, crop))
    image_crop = run_preprocesing_on_crop(grayscale_image[y_min:y_max, x_min:x_max], (W, H))

    # Run inference with recognition model
    result = recognition_compiled_model([image_crop])[recognition_output_layer]

    # Squeeze output to remove unnececery dimension
    recognition_results_test = np.squeeze(result)

    # Read annotation based on probabilities from output layer
    annotation = list()
    for letter in recognition_results_test:
        parsed_letter = letters[letter.argmax()]

        # Returning 0 index from argmax signalises end of string
        if parsed_letter == letters[0]:
            break
        annotation.append(parsed_letter)
    annotations.append("".join(annotation))
    cropped_image = Image.fromarray(image[y_min:y_max, x_min:x_max])
    cropped_images.append(cropped_image)

boxes_with_annotations = list(zip(boxes, annotations))

# Show Detected Text Boxes and OCR Results for the Image
plt.figure(figsize=(12, 12))
plt.imshow(convert_result_to_image(image, resized_image, boxes_with_annotations, conf_labels=True));

# Show the OCR Result per Bounding Box
for cropped_image, annotation in zip(cropped_images, annotations):
    display(cropped_image, Markdown("".join(annotation)))

# Print Annotations in Plain Text Format
[
    annotation
    for _, annotation in sorted(zip(boxes, annotations), key=lambda x: x[0][0] ** 2 + x[0][1] ** 2)
]
# Imports
import operator
import time
from urllib import parse

import numpy as np
from openvino.runtime import Core

import html_reader as reader
import tokens_bert as tokens

# Download the model
# directory where model will be downloaded
base_model_dir = "model"

# desired precision
precision = "FP16-INT8"

# model name as named in Open Model Zoo
model_name = "bert-small-uncased-whole-word-masking-squad-int8-0002"

model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml"
model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin"

download_command = f"omz_downloader " \
                   f"--name {model_name} " \
                   f"--precision {precision} " \
                   f"--output_dir {base_model_dir} " \
                   f"--cache_dir {base_model_dir}"
! $download_command

# Load the model
# initialize inference engine
core = Core()
# read the network and corresponding weights from file
model = core.read_model(model=model_path, weights=model_weights_path)
# load the model on the CPU (you can use GPU as well)
compiled_model = core.compile_model(model=model, device_name="CPU")

# get input and output names of nodes
input_keys = list(compiled_model.inputs)
output_keys = list(compiled_model.outputs)

# get network input size
input_size = compiled_model.input(0).shape[1]

# Processing
# path to vocabulary file
vocab_file_path = "data/vocab.txt"

# create dictionary with words and their indices
vocab = tokens.load_vocab_file(vocab_file_path)

# define special tokens
cls_token = vocab["[CLS]"]
pad_token = vocab["[PAD]"]
sep_token = vocab["[SEP]"]


# function to load text from given urls
def load_context(sources):
    input_urls = []
    paragraphs = []
    for source in sources:
        result = parse.urlparse(source)
        if all([result.scheme, result.netloc]):
            input_urls.append(source)
        else:
            paragraphs.append(source)

    paragraphs.extend(reader.get_paragraphs(input_urls))
    # produce one big context string
    return "\n".join(paragraphs)

# Preprocessing
# generator of a sequence of inputs
def prepare_input(question_tokens, context_tokens):
    # length of question in tokens
    question_len = len(question_tokens)
    # context part size
    context_len = input_size - question_len - 3

    if context_len < 16:
        raise RuntimeError("Question is too long in comparison to input size. No space for context")

    # take parts of context with overlapping by 0.5
    for start in range(0, max(1, len(context_tokens) - context_len), context_len // 2):
        # part of context
        part_context_tokens = context_tokens[start:start + context_len]
        # input: question and context separated by special tokens
        input_ids = [cls_token] + question_tokens + [sep_token] + part_context_tokens + [sep_token]
        # 1 for any index if there is no padding token, 0 otherwise
        attention_mask = [1] * len(input_ids)
        # 0 for question tokens, 1 for context part
        token_type_ids = [0] * (question_len + 2) + [1] * (len(part_context_tokens) + 1)

        # add padding at the end
        (input_ids, attention_mask, token_type_ids), pad_number = pad(input_ids=input_ids,
                                                                      attention_mask=attention_mask,
                                                                      token_type_ids=token_type_ids)

        # create input to feed the model
        input_dict = {
            "input_ids": np.array([input_ids], dtype=np.int32),
            "attention_mask": np.array([attention_mask], dtype=np.int32),
            "token_type_ids": np.array([token_type_ids], dtype=np.int32),
        }

        # some models require additional position_ids
        if "position_ids" in [i_key.any_name for i_key in input_keys]:
            position_ids = np.arange(len(input_ids))
            input_dict["position_ids"] = np.array([position_ids], dtype=np.int32)

        yield input_dict, pad_number, start


# function to add padding
def pad(input_ids, attention_mask, token_type_ids):
    # how many padding tokens
    diff_input_size = input_size - len(input_ids)

    if diff_input_size > 0:
        # add padding to all inputs
        input_ids = input_ids + [pad_token] * diff_input_size
        attention_mask = attention_mask + [0] * diff_input_size
        token_type_ids = token_type_ids + [0] * diff_input_size

    return (input_ids, attention_mask, token_type_ids), diff_input_size

# Postprocessing
# based on https://github.com/openvinotoolkit/open_model_zoo/blob/bf03f505a650bafe8da03d2747a8b55c5cb2ef16/demos/common/python/openvino/model_zoo/model_api/models/bert.py#L163
def postprocess(output_start, output_end, question_tokens, context_tokens_start_end, padding, start_idx):

    def get_score(logits):
        out = np.exp(logits)
        return out / out.sum(axis=-1)

    # get start-end scores for context
    score_start = get_score(output_start)
    score_end = get_score(output_end)

    # index of first context token in tensor
    context_start_idx = len(question_tokens) + 2
    # index of last+1 context token in tensor
    context_end_idx = input_size - padding - 1

    # find product of all start-end combinations to find the best one
    max_score, max_start, max_end = find_best_answer_window(start_score=score_start,
                                                            end_score=score_end,
                                                            context_start_idx=context_start_idx,
                                                            context_end_idx=context_end_idx)

    # convert to context text start-end index
    max_start = context_tokens_start_end[max_start + start_idx][0]
    max_end = context_tokens_start_end[max_end + start_idx][1]

    return max_score, max_start, max_end


# based on https://github.com/openvinotoolkit/open_model_zoo/blob/bf03f505a650bafe8da03d2747a8b55c5cb2ef16/demos/common/python/openvino/model_zoo/model_api/models/bert.py#L188
def find_best_answer_window(start_score, end_score, context_start_idx, context_end_idx):
    context_len = context_end_idx - context_start_idx
    score_mat = np.matmul(
        start_score[context_start_idx:context_end_idx].reshape((context_len, 1)),
        end_score[context_start_idx:context_end_idx].reshape((1, context_len)),
    )
    # reset candidates with end before start
    score_mat = np.triu(score_mat)
    # reset long candidates (>16 words)
    score_mat = np.tril(score_mat, 16)
    # find the best start-end pair
    max_s, max_e = divmod(score_mat.flatten().argmax(), score_mat.shape[1])
    max_score = score_mat[max_s, max_e]

    return max_score, max_s, max_e

def get_best_answer(question, context):
    # convert context string to tokens
    context_tokens, context_tokens_start_end = tokens.text_to_tokens(text=context.lower(),
                                                                     vocab=vocab)
    # convert question string to tokens
    question_tokens, _ = tokens.text_to_tokens(text=question.lower(), vocab=vocab)

    results = []
    # iterate through different parts of context
    for network_input, padding, start_idx in prepare_input(question_tokens=question_tokens,
                                                           context_tokens=context_tokens):
        # get output layers
        output_start_key = compiled_model.output("output_s")
        output_end_key = compiled_model.output("output_e")

        # openvino inference
        result = compiled_model(network_input)
        # postprocess the result getting the score and context range for the answer
        score_start_end = postprocess(output_start=result[output_start_key][0],
                                      output_end=result[output_end_key][0],
                                      question_tokens=question_tokens,
                                      context_tokens_start_end=context_tokens_start_end,
                                      padding=padding,
                                      start_idx=start_idx)
        results.append(score_start_end)

    # find the highest score
    answer = max(results, key=operator.itemgetter(0))
    # return the part of the context, which is already an answer
    return context[answer[1]:answer[2]], answer[0]

# Main Processing Function
def run_question_answering(sources):
    print(f"Context: {sources}", flush=True)
    context = load_context(sources)

    if len(context) == 0:
        print("Error: Empty context or outside paragraphs")
        return

    while True:
        question = input()
        # if no question - break
        if question == "":
            break

        # measure processing time
        start_time = time.perf_counter()
        answer, score = get_best_answer(question=question, context=context)
        end_time = time.perf_counter()

        print(f"Question: {question}")
        print(f"Answer: {answer}")
        print(f"Score: {score:.2f}")
        print(f"Time: {end_time - start_time:.2f}s")

# Run on local paragraphs
sources = ["Computational complexity theory is a branch of the theory of computation in theoretical computer "
           "science that focuses on classifying computational problems according to their inherent difficulty, "
           "and relating those classes to each other. A computational problem is understood to be a task that "
           "is in principle amenable to being solved by a computer, which is equivalent to stating that the "
           "problem may be solved by mechanical application of mathematical steps, such as an algorithm."]

run_question_answering(sources)

# Run on websites
sources = ["https://en.wikipedia.org/wiki/OpenVINO"]

run_question_answering(sources)
# Imports
from pathlib import Path

import IPython.display as ipd
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import scipy
from openvino.runtime import Core

# Settings
model_folder = "model"
download_folder = "output"
data_folder = "data"

precision = "FP16"
model_name = "quartznet-15x5-en"

# Download Model
# Check if model is already downloaded in download directory
path_to_model_weights = Path(f'{download_folder}/public/{model_name}/models')
downloaded_model_file = list(path_to_model_weights.glob('*.pth'))

if not path_to_model_weights.is_dir() or len(downloaded_model_file) == 0:
    download_command = f"omz_downloader --name {model_name} --output_dir {download_folder} --precision {precision}"
    ! $download_command

# Convert Model
# Check if model is already converted in model directory
path_to_converted_weights = Path(f'{model_folder}/public/{model_name}/{precision}/{model_name}.bin')

if not path_to_converted_weights.is_file():
    convert_command = f"omz_converter --name {model_name} --precisions {precision} --download_dir {download_folder} --output_dir {model_folder}"
    ! $convert_command

# Defining constants
audio_file_name = "edge_to_cloud.ogg"
alphabet = " abcdefghijklmnopqrstuvwxyz'~"

# Load Audio File
audio, sampling_rate = librosa.load(path=f'{data_folder}/{audio_file_name}', sr=16000)
ipd.Audio(audio, rate=sampling_rate)

# Visualise Audio File
plt.figure()
librosa.display.waveplot(y=audio, sr=sampling_rate, max_points=50000.0, x_axis='time', offset=0.0, max_sr=1000);
plt.show()
specto_audio = librosa.stft(audio)
specto_audio = librosa.amplitude_to_db(np.abs(specto_audio), ref=np.max)
print(specto_audio.shape)
librosa.display.specshow(specto_audio, sr=sampling_rate, x_axis='time', y_axis='hz');

# Change Type of Data
if max(np.abs(audio)) <= 1:
    audio = (audio * (2**15 - 1))
audio = audio.astype(np.int16)

# Convert Audio to Mel Spectrum
def audio_to_mel(audio, sampling_rate):
    assert sampling_rate == 16000, "Only 16 KHz audio supported"
    preemph = 0.97
    preemphased = np.concatenate([audio[:1], audio[1:] - preemph * audio[:-1].astype(np.float32)])

    # Calculate window length
    win_length = round(sampling_rate * 0.02)

    # Based on previously calculated window length run short-time Fourier transform
    spec = np.abs(librosa.core.spectrum.stft(preemphased, n_fft=512, hop_length=round(sampling_rate * 0.01),
                  win_length=win_length, center=True, window=scipy.signal.windows.hann(win_length), pad_mode='reflect'))

    # Create mel filter-bank, produce transformation matrix to project current values onto Mel-frequency bins
    mel_basis = librosa.filters.mel(sampling_rate, 512, n_mels=64, fmin=0.0, fmax=8000.0, htk=False)
    return mel_basis, spec


def mel_to_input(mel_basis, spec, padding=16):
    # Convert to logarithmic scale
    log_melspectrum = np.log(np.dot(mel_basis, np.power(spec, 2)) + 2 ** -24)

    # Normalize output
    normalized = (log_melspectrum - log_melspectrum.mean(1)[:, None]) / (log_melspectrum.std(1)[:, None] + 1e-5)

    # Calculate padding
    remainder = normalized.shape[1] % padding
    if remainder != 0:
        return np.pad(normalized, ((0, 0), (0, padding - remainder)))[None]
    return normalized[None]

# Run Conversion from Audio to Mel Format
mel_basis, spec = audio_to_mel(audio=audio.flatten(), sampling_rate=sampling_rate)

# Visualise Mel Spectogram
librosa.display.specshow(data=spec, sr=sampling_rate, x_axis='time', y_axis='log');
plt.show();
librosa.display.specshow(data=mel_basis, sr=sampling_rate, x_axis='linear');
plt.ylabel('Mel filter');

# Adjust Mel scale to Input
audio = mel_to_input(mel_basis=mel_basis, spec=spec)

# Load Model
ie = Core()

model = ie.read_model(
    model=f"{model_folder}/public/{model_name}/{precision}/{model_name}.xml"
)
model_input_layer = model.input(0)
shape = model_input_layer.partial_shape
shape[2] = -1
model.reshape({model_input_layer: shape})
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Do Inference
output_layer_ir = compiled_model.output(0)

character_probabilities = compiled_model([audio])[output_layer_ir]

# Read Output
# Remove unnececery dimension
character_probabilities = np.squeeze(character_probabilities)

# Run argmax to pick most possible symbols
character_probabilities = np.argmax(character_probabilities, axis=1)

# Implementation of Decoding
def ctc_greedy_decode(predictions):
    previous_letter_id = blank_id = len(alphabet) - 1
    transcription = list()
    for letter_index in predictions:
        if previous_letter_id != letter_index != blank_id:
            transcription.append(alphabet[letter_index])
        previous_letter_id = letter_index
    return ''.join(transcription)

# Run Decoding and Print Output
transcription = ctc_greedy_decode(character_probabilities)
print(transcription)
# Imports
import collections
import os
import sys
import time
from typing import Tuple, List

import cv2
import numpy as np
from IPython import display
from openvino.runtime import Core
from openvino.runtime.ie_api import CompiledModel

sys.path.append("../utils")
import notebook_utils as utils

# Download the models
# Directory where model will be downloaded
base_model_dir = "model"
# Model name as named in Open Model Zoo
model_name = "action-recognition-0001"
# Selected precision (FP32, FP16, FP16-INT8)
precision = "FP16"
model_path_decoder = (
    f"model/intel/{model_name}/{model_name}-decoder/{precision}/{model_name}-decoder.xml"
)
model_path_encoder = (
    f"model/intel/{model_name}/{model_name}-encoder/{precision}/{model_name}-encoder.xml"
)
if not os.path.exists(model_path_decoder) or not os.path.exists(model_path_encoder):
    download_command = f"omz_downloader " \
                       f"--name {model_name} " \
                       f"--precision {precision} " \
                       f"--output_dir {base_model_dir}"
    ! $download_command

# Load your labels
labels = "data/kinetics.txt"

with open(labels) as f:
    labels = [line.strip() for line in f]

print(labels[0:9], np.shape(labels))

# Model Initialization function
# Initialize inference engine
ie_core = Core()


def model_init(model_path: str) -> Tuple:
    """
    Read the network and weights from file, load the
    model on the CPU and get input and output names of nodes

    :param: model: model architecture path *.xml
    :retuns:
            compiled_model: Compiled model 
            input_key: Input node for model
            output_key: Output node for model
    """

    # Read the network and corresponding weights from file
    model = ie_core.read_model(model=model_path)
    # compile the model for the CPU (you can use GPU or MYRIAD as well)
    compiled_model = ie_core.compile_model(model=model, device_name="CPU")
    # Get input and output names of nodes
    input_keys = compiled_model.input(0)
    output_keys = compiled_model.output(0)
    return input_keys, output_keys, compiled_model

# Initialization for Encoder and Decoder
# Encoder initialization
input_key_en, output_keys_en, compiled_model_en = model_init(model_path_encoder)
# Decoder initialization
input_key_de, output_keys_de, compiled_model_de = model_init(model_path_decoder)

# Get input size - Encoder
height_en, width_en = list(input_key_en.shape)[2:]
# Get input size - Decoder
frames2decode = list(input_key_de.shape)[0:][1]

# Helper functions
def center_crop(frame: np.ndarray) -> np.ndarray:
    """
    Center crop squared the original frame to standardize the input image to the encoder model

    :param frame: input frame
    :returns: center-crop-squared frame
    """
    img_h, img_w, _ = frame.shape
    min_dim = min(img_h, img_w)
    start_x = int((img_w - min_dim) / 2.0)
    start_y = int((img_h - min_dim) / 2.0)
    roi = [start_y, (start_y + min_dim), start_x, (start_x + min_dim)]
    return frame[start_y : (start_y + min_dim), start_x : (start_x + min_dim), ...], roi


def adaptive_resize(frame: np.ndarray, size: int) -> np.ndarray:
    """
     The frame going to be resized to have a height of size or a width of size

    :param frame: input frame
    :param size: input size to encoder model
    :returns: resized frame, np.array type
    """
    h, w, _ = frame.shape
    scale = size / min(h, w)
    w_scaled, h_scaled = int(w * scale), int(h * scale)
    if w_scaled == w and h_scaled == h:
        return frame
    return cv2.resize(frame, (w_scaled, h_scaled))


def decode_output(probs: np.ndarray, labels: np.ndarray, top_k: int = 3) -> np.ndarray:
    """
    Decodes top probabilities into corresponding label names

    :param probs: confidence vector for 400 actions
    :param labels: list of actions
    :param top_k: The k most probable positions in the list of labels
    :returns: decoded_labels: The k most probable actions from the labels list
              decoded_top_probs: confidence for the k most probable actions
    """
    top_ind = np.argsort(-1 * probs)[:top_k]
    out_label = np.array(labels)[top_ind.astype(int)]
    decoded_labels = [out_label[0][0], out_label[0][1], out_label[0][2]]
    top_probs = np.array(probs)[0][top_ind.astype(int)]
    decoded_top_probs = [top_probs[0][0], top_probs[0][1], top_probs[0][2]]
    return decoded_labels, decoded_top_probs


def rec_frame_display(frame: np.ndarray, roi) -> np.ndarray:
    """
    Draw a rec frame over actual frame

    :param frame: input frame
    :param roi: Region of interest, image section processed by the Encoder
    :returns: frame with drawed shape

    """

    cv2.line(frame, (roi[2] + 3, roi[0] + 3), (roi[2] + 3, roi[0] + 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[2] + 3, roi[0] + 3), (roi[2] + 100, roi[0] + 3), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[1] - 3), (roi[3] - 3, roi[1] - 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[1] - 3), (roi[3] - 100, roi[1] - 3), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[0] + 3), (roi[3] - 3, roi[0] + 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[3] - 3, roi[0] + 3), (roi[3] - 100, roi[0] + 3), (0, 200, 0), 2)
    cv2.line(frame, (roi[2] + 3, roi[1] - 3), (roi[2] + 3, roi[1] - 100), (0, 200, 0), 2)
    cv2.line(frame, (roi[2] + 3, roi[1] - 3), (roi[2] + 100, roi[1] - 3), (0, 200, 0), 2)
    # Write ROI over actual frame
    FONT_STYLE = cv2.FONT_HERSHEY_SIMPLEX
    org = (roi[2] + 3, roi[1] - 3)
    org2 = (roi[2] + 2, roi[1] - 2)
    FONT_SIZE = 0.5
    FONT_COLOR = (0, 200, 0)
    FONT_COLOR2 = (0, 0, 0)
    cv2.putText(frame, "ROI", org2, FONT_STYLE, FONT_SIZE, FONT_COLOR2)
    cv2.putText(frame, "ROI", org, FONT_STYLE, FONT_SIZE, FONT_COLOR)
    return frame


def display_text_fnc(frame: np.ndarray, display_text: str, index: int):
    """
    Include text on the analized frame

    :param frame: input frame
    :param display_text: text to add on the frame
    :param index: index line dor adding text

    """
    # Configuration for displaying images with text
    FONT_COLOR = (255, 255, 255)
    FONT_COLOR2 = (0, 0, 0)
    FONT_STYLE = cv2.FONT_HERSHEY_DUPLEX
    FONT_SIZE = 0.7
    TEXT_VERTICAL_INTERVAL = 25
    TEXT_LEFT_MARGIN = 15
    # ROI over actual frame
    (processed, roi) = center_crop(frame)
    # Draw a ROI over actual frame
    frame = rec_frame_display(frame, roi)
    # Put text over actual frame
    text_loc = (TEXT_LEFT_MARGIN, TEXT_VERTICAL_INTERVAL * (index + 1))
    text_loc2 = (TEXT_LEFT_MARGIN + 1, TEXT_VERTICAL_INTERVAL * (index + 1) + 1)
    cv2.putText(frame, display_text, text_loc2, FONT_STYLE, FONT_SIZE, FONT_COLOR2)
    cv2.putText(frame, display_text, text_loc, FONT_STYLE, FONT_SIZE, FONT_COLOR)

# AI Functions
def preprocessing(frame: np.ndarray, size: int) -> np.ndarray:
    """
    Preparing frame before Encoder.
    The image should be scaled to its shortest dimension at "size"
    and cropped, centered, and squared so that both width and
    height have lengths "size". Frame must be transposed from
    Height-Width-Channels (HWC) to Channels-Height-Width (CHW).

    :param frame: input frame
    :param size: input size to encoder model
    :returns: resized and cropped frame
    """
    # Adaptative resize
    preprocessed = adaptive_resize(frame, size)
    # Center_crop
    (preprocessed, roi) = center_crop(preprocessed)
    # Transpose frame HWC -> CHW
    preprocessed = preprocessed.transpose((2, 0, 1))[None,]  # HWC -> CHW
    return preprocessed, roi


def encoder(
    preprocessed: np.ndarray,
    compiled_model: CompiledModel
) -> List:
    """
    Encoder Inference per frame. This function calls the network previously
    configured for the encoder model (compiled_model), extracts the data
    from the output node, and appends it in an array to be used by the decoder.

    :param: preprocessed: preprocessing frame
    :param: compiled_model: Encoder model network
    :returns: encoder_output: embedding layer that is appended with each arriving frame
    """
    output_key_en = compiled_model.output(0)
    
    # Get results on action-recognition-0001-encoder model
    infer_result_encoder = compiled_model([preprocessed])[output_key_en]
    return infer_result_encoder


def decoder(encoder_output: List, compiled_model_de: CompiledModel) -> List:
    """
    Decoder inference per set of frames. This function concatenates the embedding layer
    froms the encoder output, transpose the array to match with the decoder input size.
    Calls the network previously configured for the decoder model (compiled_model_de), extracts
    the logits and normalize those to get confidence values along specified axis.
    Decodes top probabilities into corresponding label names

    :param: encoder_output: embedding layer for 16 frames
    :param: compiled_model_de: Decoder model network
    :returns: decoded_labels: The k most probable actions from the labels list
              decoded_top_probs: confidence for the k most probable actions
    """
    # Concatenate sample_duration frames in just one array
    decoder_input = np.concatenate(encoder_output, axis=0)
    # Organize input shape vector to the Decoder (shape: [1x16x512]]
    decoder_input = decoder_input.transpose((2, 0, 1, 3))
    decoder_input = np.squeeze(decoder_input, axis=3)
    output_key_de = compiled_model_de.output(0)
    # Get results on action-recognition-0001-decoder model
    result_de = compiled_model_de([decoder_input])[output_key_de]
    # Normalize logits to get confidence values along specified axis
    probs = softmax(result_de - np.max(result_de))
    # Decodes top probabilities into corresponding label names
    decoded_labels, decoded_top_probs = decode_output(probs, labels, top_k=3)
    return decoded_labels, decoded_top_probs


def softmax(x: np.ndarray) -> np.ndarray:
    """
    Normalizes logits to get confidence values along specified axis
    x: np.array, axis=None
    """
    exp = np.exp(x)
    return exp / np.sum(exp, axis=None)

# Main Processing Function
def run_action_recognition(
    source: str = "0",
    flip: bool = True,
    use_popup: bool = False,
    compiled_model_en: CompiledModel = compiled_model_en,
    compiled_model_de: CompiledModel = compiled_model_de,
    skip_first_frames: int = 0,
):
    """
    Use the "source" webcam or video file to run the complete pipeline for action-recognition problem
    1. Create a video player to play with target fps
    2. Prepare a set of frames to be encoded-decoded
    3. Preprocess frame before Encoder
    4. Encoder Inference per frame
    5. Decoder inference per set of frames
    6. Visualize the results

    :param: source: webcam "0" or video path
    :param: flip: to be used by VideoPlayer function for flipping capture image
    :param: use_popup: False for showing encoded frames over this notebook, True for creating a popup window.
    :param: skip_first_frames: Number of frames to skip at the beginning of the video.
    :returns: display video over the notebook or in a popup window

    """
    size = height_en  # Endoder input size - From Cell 5_9
    sample_duration = frames2decode  # Decoder input size - From Cell 5_7
    # Select frames per second of your source
    fps = 30
    player = None
    try:
        # Create a video player
        player = utils.VideoPlayer(source, flip=flip, fps=fps, skip_first_frames=skip_first_frames)
        # Start capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)

        processing_times = collections.deque()
        processing_time = 0
        encoder_output = []
        decoded_labels = [0, 0, 0]
        decoded_top_probs = [0, 0, 0]
        counter = 0
        # Create a text template to show inference results over video
        text_inference_template = "Infer Time:{Time:.1f}ms,{fps:.1f}FPS"
        text_template = "{label},{conf:.2f}%"

        while True:
            counter = counter + 1

            # read a frame from the video stream
            frame = player.next()
            if frame is None:
                print("Source ended")
                break

            scale = 1280 / max(frame.shape)

            # Adaptative resize for visualization
            if scale < 1:
                frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)

            # Select one frame every two for processing through the encoder.
            # After 16 frames are processed, the decoder will find the action,
            # and the label will be printed over the frames.

            if counter % 2 == 0:
                # Preprocess frame before Encoder
                (preprocessed, _) = preprocessing(frame, size)

                # Measure processing time
                start_time = time.time()

                # Encoder Inference per frame
                encoder_output.append(encoder(preprocessed, compiled_model_en))

                # Decoder inference per set of frames
                # Wait for sample duration to work with decoder model
                if len(encoder_output) == sample_duration:
                    decoded_labels, decoded_top_probs = decoder(encoder_output, compiled_model_de)
                    encoder_output = []

                # Inference has finished ... Let's to display results
                stop_time = time.time()

                # Calculate processing time
                processing_times.append(stop_time - start_time)

                # Use processing times from last 200 frames
                if len(processing_times) > 200:
                    processing_times.popleft()

                # Mean processing time [ms]
                processing_time = np.mean(processing_times) * 1000
                fps = 1000 / processing_time

            # Visualize the results
            for i in range(0, 3):
                display_text = text_template.format(
                    label=decoded_labels[i],
                    conf=decoded_top_probs[i] * 100,
                )
                display_text_fnc(frame, display_text, i)

            display_text = text_inference_template.format(Time=processing_time, fps=fps)
            display_text_fnc(frame, display_text, 3)

            # Use this workaround you experience flickering
            if use_popup:
                cv2.imshow(title, frame)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # Encode numpy array to jpg
                _, encoded_img = cv2.imencode(".jpg", frame, params=[cv2.IMWRITE_JPEG_QUALITY, 90])
                # Create IPython image
                i = display.Image(data=encoded_img)
                # Display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)

    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # Any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Action Recognition on a Video File
video_file = "https://archive.org/serve/ISSVideoResourceLifeOnStation720p/ISS%20Video%20Resource_LifeOnStation_720p.mp4"
run_action_recognition(source=video_file, flip=False, use_popup=False, skip_first_frames=600)

# Run Action Recognition using your webcam
run_action_recognition(source=0, flip=False, use_popup=False, skip_first_frames=0)
# Imports
import collections
import os
import sys
import time

import cv2
import numpy as np
from IPython import display
from numpy.lib.stride_tricks import as_strided
from openvino.runtime import Core

from decoder import OpenPoseDecoder

sys.path.append("../utils")
import notebook_utils as utils

# Download the model
# directory where model will be downloaded
base_model_dir = "model"

# model name as named in Open Model Zoo
model_name = "human-pose-estimation-0001"
# selected precision (FP32, FP16, FP16-INT8)
precision = "FP16-INT8"

model_path = f"model/intel/{model_name}/{precision}/{model_name}.xml"
model_weights_path = f"model/intel/{model_name}/{precision}/{model_name}.bin"

if not os.path.exists(model_path):
    download_command = f"omz_downloader " \
                       f"--name {model_name} " \
                       f"--precision {precision} " \
                       f"--output_dir {base_model_dir}"
    ! $download_command

# Load the model
# initialize inference engine
ie_core = Core()
# read the network and corresponding weights from file
model = ie_core.read_model(model=model_path, weights=model_weights_path)
# load the model on the CPU (you can use GPU or MYRIAD as well)
compiled_model = ie_core.compile_model(model=model, device_name="CPU")

# get input and output names of nodes
input_layer = compiled_model.input(0)
output_layers = list(compiled_model.outputs)

# get input size
height, width = list(input_layer.shape)[2:]

# Processing OpenPoseDecoder
decoder = OpenPoseDecoder()

# Process Results
# 2d pooling in numpy (from: htt11ps://stackoverflow.com/a/54966908/1624463)
def pool2d(A, kernel_size, stride, padding, pool_mode="max"):
    """
    2D Pooling

    Parameters:
        A: input 2D array
        kernel_size: int, the size of the window
        stride: int, the stride of the window
        padding: int, implicit zero paddings on both sides of the input
        pool_mode: string, 'max' or 'avg'
    """
    # Padding
    A = np.pad(A, padding, mode="constant")

    # Window view of A
    output_shape = (
        (A.shape[0] - kernel_size) // stride + 1,
        (A.shape[1] - kernel_size) // stride + 1,
    )
    kernel_size = (kernel_size, kernel_size)
    A_w = as_strided(
        A,
        shape=output_shape + kernel_size,
        strides=(stride * A.strides[0], stride * A.strides[1]) + A.strides
    )
    A_w = A_w.reshape(-1, *kernel_size)

    # Return the result of pooling
    if pool_mode == "max":
        return A_w.max(axis=(1, 2)).reshape(output_shape)
    elif pool_mode == "avg":
        return A_w.mean(axis=(1, 2)).reshape(output_shape)


# non maximum suppression
def heatmap_nms(heatmaps, pooled_heatmaps):
    return heatmaps * (heatmaps == pooled_heatmaps)


# get poses from results
def process_results(img, pafs, heatmaps):
    # this processing comes from
    # https://github.com/openvinotoolkit/open_model_zoo/blob/master/demos/common/python/models/open_pose.py
    pooled_heatmaps = np.array(
        [[pool2d(h, kernel_size=3, stride=1, padding=1, pool_mode="max") for h in heatmaps[0]]]
    )
    nms_heatmaps = heatmap_nms(heatmaps, pooled_heatmaps)

    # decode poses
    poses, scores = decoder(heatmaps, nms_heatmaps, pafs)
    output_shape = list(compiled_model.output(index=0).partial_shape)
    output_scale = img.shape[1] / output_shape[3].get_length(), img.shape[0] / output_shape[2].get_length()
    # multiply coordinates by scaling factor
    poses[:, :, :2] *= output_scale
    return poses, scores

# Draw Pose Overlays
colors = ((255, 0, 0), (255, 0, 255), (170, 0, 255), (255, 0, 85), (255, 0, 170), (85, 255, 0),
          (255, 170, 0), (0, 255, 0), (255, 255, 0), (0, 255, 85), (170, 255, 0), (0, 85, 255),
          (0, 255, 170), (0, 0, 255), (0, 255, 255), (85, 0, 255), (0, 170, 255))

default_skeleton = ((15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11), (6, 12), (5, 6), (5, 7),
                    (6, 8), (7, 9), (8, 10), (1, 2), (0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6))


def draw_poses(img, poses, point_score_threshold, skeleton=default_skeleton):
    if poses.size == 0:
        return img

    img_limbs = np.copy(img)
    for pose in poses:
        points = pose[:, :2].astype(np.int32)
        points_scores = pose[:, 2]
        # Draw joints.
        for i, (p, v) in enumerate(zip(points, points_scores)):
            if v > point_score_threshold:
                cv2.circle(img, tuple(p), 1, colors[i], 2)
        # Draw limbs.
        for i, j in skeleton:
            if points_scores[i] > point_score_threshold and points_scores[j] > point_score_threshold:
                cv2.line(img_limbs, tuple(points[i]), tuple(points[j]), color=colors[j], thickness=4)
    cv2.addWeighted(img, 0.4, img_limbs, 0.6, 0, dst=img)
    return img

# Main Processing Function
# main processing function to run pose estimation
def run_pose_estimation(source=0, flip=False, use_popup=False, skip_first_frames=0):
    pafs_output_key = compiled_model.output("Mconv7_stage2_L1")
    heatmaps_output_key = compiled_model.output("Mconv7_stage2_L2")
    player = None
    try:
        # create video player to play with target fps
        player = utils.VideoPlayer(source, flip=flip, fps=30, skip_first_frames=skip_first_frames)
        # start capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE)

        processing_times = collections.deque()

        while True:
            # grab the frame
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
            # if frame larger than full HD, reduce size to improve the performance
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)

            # resize image and change dims to fit neural network input
            # (see https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/human-pose-estimation-0001)
            input_img = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
            # create batch of images (size = 1)
            input_img = input_img.transpose((2,0,1))[np.newaxis, ...]

            # measure processing time
            start_time = time.time()
            # get results
            results = compiled_model([input_img])
            stop_time = time.time()

            pafs = results[pafs_output_key]
            heatmaps = results[heatmaps_output_key]
            # get poses from network results
            poses, scores = process_results(frame, pafs, heatmaps)

            # draw poses on a frame
            frame = draw_poses(frame, poses, 0.1)

            processing_times.append(stop_time - start_time)
            # use processing times from last 200 frames
            if len(processing_times) > 200:
                processing_times.popleft()

            _, f_width = frame.shape[:2]
            # mean processing time [ms]
            processing_time = np.mean(processing_times) * 1000
            fps = 1000 / processing_time
            cv2.putText(frame, f"Inference time: {processing_time:.1f}ms ({fps:.1f} FPS)", (20, 40),
                        cv2.FONT_HERSHEY_COMPLEX, f_width / 1000, (0, 0, 255), 1, cv2.LINE_AA)

            # use this workaround if there is flickering
            if use_popup:
                cv2.imshow(title, frame)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # encode numpy array to jpg
                _, encoded_img = cv2.imencode(".jpg", frame, params=[cv2.IMWRITE_JPEG_QUALITY, 90])
                # create IPython image
                i = display.Image(data=encoded_img)
                # display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)
    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Live Pose Estimation
run_pose_estimation(source=0, flip=True, use_popup=False)

# Run Pose Estimation on a Video File
video_file = "https://github.com/intel-iot-devkit/sample-videos/blob/master/store-aisle-detection.mp4?raw=true"

run_pose_estimation(video_file, flip=False, use_popup=False, skip_first_frames=500)
# Imports
import collections
import os
import sys
import time

import cv2
import numpy as np
from IPython import display
from openvino.runtime import Core

sys.path.append("../utils")
import notebook_utils as utils

# Download the Model
# directory where model will be downloaded
base_model_dir = "model"

# model name as named in Open Model Zoo
model_name = "ssdlite_mobilenet_v2"

download_command = f"omz_downloader " \
                   f"--name {model_name} " \
                   f"--output_dir {base_model_dir} " \
                   f"--cache_dir {base_model_dir}"
! $download_command

# Convert the Model
precision = "FP16"

# output path for the conversion
converted_model_path = f"model/public/{model_name}/{precision}/{model_name}.xml"

if not os.path.exists(converted_model_path):
    convert_command = f"omz_converter " \
                      f"--name {model_name} " \
                      f"--download_dir {base_model_dir} " \
                      f"--precisions {precision}"
    ! $convert_command

# Load the Model
# initialize inference engine
ie_core = Core()
# read the network and corresponding weights from file
model = ie_core.read_model(model=converted_model_path)
# compile the model for the CPU (you can choose manually CPU, GPU, MYRIAD etc.)
# or let the engine choose the best available device (AUTO)
compiled_model = ie_core.compile_model(model=model, device_name="CPU")

# get input and output nodes
input_layer = compiled_model.input(0)
output_layer = compiled_model.output(0)

# get input size
height, width = list(input_layer.shape)[1:3]

# Process Results
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
classes = [
    "background", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
    "truck", "boat", "traffic light", "fire hydrant", "street sign", "stop sign",
    "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant",
    "bear", "zebra", "giraffe", "hat", "backpack", "umbrella", "shoe", "eye glasses",
    "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
    "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
    "plate", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
    "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
    "couch", "potted plant", "bed", "mirror", "dining table", "window", "desk", "toilet",
    "door", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven",
    "toaster", "sink", "refrigerator", "blender", "book", "clock", "vase", "scissors",
    "teddy bear", "hair drier", "toothbrush", "hair brush"
]

# colors for above classes (Rainbow Color Map)
colors = cv2.applyColorMap(
    src=np.arange(0, 255, 255 / len(classes), dtype=np.float32).astype(np.uint8),
    colormap=cv2.COLORMAP_RAINBOW,
).squeeze()


def process_results(frame, results, thresh=0.6):
    # size of the original frame
    h, w = frame.shape[:2]
    # results is a tensor [1, 1, 100, 7]
    results = results.squeeze()
    boxes = []
    labels = []
    scores = []
    for _, label, score, xmin, ymin, xmax, ymax in results:
        # create a box with pixels coordinates from the box with normalized coordinates [0,1]
        boxes.append(
            tuple(map(int, (xmin * w, ymin * h, (xmax - xmin) * w, (ymax - ymin) * h)))
        )
        labels.append(int(label))
        scores.append(float(score))

    # apply non-maximum suppression to get rid of many overlapping entities
    # see https://paperswithcode.com/method/non-maximum-suppression
    # this algorithm returns indices of objects to keep
    indices = cv2.dnn.NMSBoxes(
        bboxes=boxes, scores=scores, score_threshold=thresh, nms_threshold=0.6
    )

    # if there are no boxes
    if len(indices) == 0:
        return []

    # filter detected objects
    return [(labels[idx], scores[idx], boxes[idx]) for idx in indices.flatten()]


def draw_boxes(frame, boxes):
    for label, score, box in boxes:
        # choose color for the label
        color = tuple(map(int, colors[label]))
        # draw box
        x2 = box[0] + box[2]
        y2 = box[1] + box[3]
        cv2.rectangle(img=frame, pt1=box[:2], pt2=(x2, y2), color=color, thickness=3)

        # draw label name inside the box
        cv2.putText(
            img=frame,
            text=f"{classes[label]} {score:.2f}",
            org=(box[0] + 10, box[1] + 30),
            fontFace=cv2.FONT_HERSHEY_COMPLEX,
            fontScale=frame.shape[1] / 1000,
            color=color,
            thickness=1,
            lineType=cv2.LINE_AA,
        )

    return frame

# Main Processing Function
# main processing function to run object detection
def run_object_detection(source=0, flip=False, use_popup=False, skip_first_frames=0):
    player = None
    try:
        # create video player to play with target fps
        player = utils.VideoPlayer(
            source=source, flip=flip, fps=30, skip_first_frames=skip_first_frames
        )
        # start capturing
        player.start()
        if use_popup:
            title = "Press ESC to Exit"
            cv2.namedWindow(
                winname=title, flags=cv2.WINDOW_GUI_NORMAL | cv2.WINDOW_AUTOSIZE
            )

        processing_times = collections.deque()
        while True:
            # grab the frame
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
            # if frame larger than full HD, reduce size to improve the performance
            scale = 1280 / max(frame.shape)
            if scale < 1:
                frame = cv2.resize(
                    src=frame,
                    dsize=None,
                    fx=scale,
                    fy=scale,
                    interpolation=cv2.INTER_AREA,
                )

            # resize image and change dims to fit neural network input
            input_img = cv2.resize(
                src=frame, dsize=(width, height), interpolation=cv2.INTER_AREA
            )
            # create batch of images (size = 1)
            input_img = input_img[np.newaxis, ...]

            # measure processing time

            start_time = time.time()
            # get results
            results = compiled_model([input_img])[output_layer]
            stop_time = time.time()
            # get poses from network results
            boxes = process_results(frame=frame, results=results)

            # draw boxes on a frame
            frame = draw_boxes(frame=frame, boxes=boxes)

            processing_times.append(stop_time - start_time)
            # use processing times from last 200 frames
            if len(processing_times) > 200:
                processing_times.popleft()

            _, f_width = frame.shape[:2]
            # mean processing time [ms]
            processing_time = np.mean(processing_times) * 1000
            fps = 1000 / processing_time
            cv2.putText(
                img=frame,
                text=f"Inference time: {processing_time:.1f}ms ({fps:.1f} FPS)",
                org=(20, 40),
                fontFace=cv2.FONT_HERSHEY_COMPLEX,
                fontScale=f_width / 1000,
                color=(0, 0, 255),
                thickness=1,
                lineType=cv2.LINE_AA,
            )

            # use this workaround if there is flickering
            if use_popup:
                cv2.imshow(winname=title, mat=frame)
                key = cv2.waitKey(1)
                # escape = 27
                if key == 27:
                    break
            else:
                # encode numpy array to jpg
                _, encoded_img = cv2.imencode(
                    ext=".jpg", img=frame, params=[cv2.IMWRITE_JPEG_QUALITY, 100]
                )
                # create IPython image
                i = display.Image(data=encoded_img)
                # display the image in this notebook
                display.clear_output(wait=True)
                display.display(i)
    # ctrl-c
    except KeyboardInterrupt:
        print("Interrupted")
    # any different error
    except RuntimeError as e:
        print(e)
    finally:
        if player is not None:
            # stop capturing
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()

# Run Live Object Detection
run_object_detection(source=0, flip=True, use_popup=False)

# Run Object Detection on a Video File
video_file = "../201-vision-monodepth/data/Coco Walking in Berkeley.mp4"

run_object_detection(source=video_file, flip=False, use_popup=False)
# Imports
import os
import sys
import zipfile
from pathlib import Path

import numpy as np
from monai.transforms import LoadImage
from openvino.inference_engine import IECore

sys.path.append("../utils")
from models.custom_segmentation import SegmentationModel
from notebook_utils import benchmark_model, download_file, show_live_inference

# Settings
# The directory that contains the IR model (xml and bin) files
MODEL_PATH = "pretrained_model/quantized_unet_kits19.xml"
# Uncomment the next line to use the FP16 model instead of the quantized model
# MODEL_PATH = "pretrained_model/unet_kits19.xml"

# Benchmark Model Performance
ie = IECore()
# By default, benchmark on MULTI:CPU,GPU if a GPU is available, otherwise on CPU.
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
# Uncomment one of the options below to benchmark on other devices
# device = "GPU"
# device = "CPU"
# device = "AUTO"

# Benchmark model
benchmark_model(model_path=MODEL_PATH, device=device, seconds=15)

# Download and Prepare Data
# Directory that contains the CT scan data. This directory should contain subdirectories
# case_00XXX where XXX is between 000 and 299
BASEDIR = Path("kits19_frames_1")
# The CT scan case number. For example: 16 for data from the case_00016 directory
# Currently only 117 is supported
CASE = 117

case_path = BASEDIR / f"case_{CASE:05d}"

if not case_path.exists():
    filename = download_file(
        f"https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_{CASE:05d}.zip"
    )
    with zipfile.ZipFile(filename, "r") as zip_ref:
        zip_ref.extractall(path=BASEDIR)
    os.remove(filename)  # remove zipfile
    print(f"Downloaded and extracted data for case_{CASE:05d}")
else:
    print(f"Data for case_{CASE:05d} exists")

# Load model
ie = IECore()
segmentation_model = SegmentationModel(
    ie=ie, model_path=Path(MODEL_PATH), sigmoid=True, rotate_and_flip=True
)
image_paths = sorted(case_path.glob("imaging_frames/*jpg"))

print(f"{case_path.name}, {len(image_paths)} images")

# Show Live Inference
# Possible options for device include "CPU", "GPU", "AUTO", "MULTI"
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
reader = LoadImage(image_only=True, dtype=np.uint8)

show_live_inference(
    ie=ie, image_paths=image_paths, model=segmentation_model, device=device, reader=reader
)
# Preparation
# Imports
import sys
import time
from pathlib import Path

import cv2
import matplotlib.cm
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import (
    HTML,
    FileLink,
    Pretty,
    ProgressBar,
    Video,
    clear_output,
    display,
)
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import load_image

# Settings
DEVICE = "CPU"
MODEL_FILE = "model/MiDaS_small.xml"

model_xml_path = Path(MODEL_FILE)

# Functions
def normalize_minmax(data):
    """Normalizes the values in `data` between 0 and 1"""
    return (data - data.min()) / (data.max() - data.min())


def convert_result_to_image(result, colormap="viridis"):
    """
    Convert network result of floating point numbers to an RGB image with
    integer values from 0-255 by applying a colormap.

    `result` is expected to be a single network result in 1,H,W shape
    `colormap` is a matplotlib colormap.
    See https://matplotlib.org/stable/tutorials/colors/colormaps.html
    """
    cmap = matplotlib.cm.get_cmap(colormap)
    result = result.squeeze(0)
    result = normalize_minmax(result)
    result = cmap(result)[:, :, :3] * 255
    result = result.astype(np.uint8)
    return result


def to_rgb(image_data) -> np.ndarray:
    """
    Convert image_data from BGR to RGB
    """
    return cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)

# Load the Model
ie = Core()
model = ie.read_model(model=model_xml_path, weights=model_xml_path.with_suffix(".bin"))
compiled_model = ie.compile_model(model=model, device_name=DEVICE)

input_key = compiled_model.input(0)
output_key = compiled_model.output(0)

network_input_shape = list(input_key.shape)
network_image_height, network_image_width = network_input_shape[2:]

# Load, resize and reshape input image
IMAGE_FILE = "data/coco_bike.jpg"
image = load_image(path=IMAGE_FILE)

# resize to input shape for network
resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))

# reshape image to network input shape NCHW
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)

# Do inference on image
result = compiled_model([input_image])[output_key]

# convert network result of disparity map to an image that shows
# distance as colors
result_image = convert_result_to_image(result=result)

# resize back to original image shape. cv2.resize expects shape
# in (width, height), [::-1] reverses the (height, width) shape to match this
result_image = cv2.resize(result_image, image.shape[:2][::-1])

# Display monodepth image
fig, ax = plt.subplots(1, 2, figsize=(20, 15))
ax[0].imshow(to_rgb(image))
ax[1].imshow(result_image);

# Video Settings
# Video source: https://www.youtube.com/watch?v=fu1xcQdJRws (Public Domain)
VIDEO_FILE = "data/Coco Walking in Berkeley.mp4"
# Number of seconds of input video to process. Set to 0 to process
# the full video.
NUM_SECONDS = 4
# Set ADVANCE_FRAMES to 1 to process every frame from the input video
# Set ADVANCE_FRAMES to 2 to process every second frame. This reduces
# the time it takes to process the video
ADVANCE_FRAMES = 2
# Set SCALE_OUTPUT to reduce the size of the result video
# If SCALE_OUTPUT is 0.5, the width and height of the result video
# will be half the width and height of the input video
SCALE_OUTPUT = 0.5
# The format to use for video encoding. vp09 is slow,
# but it works on most systems.
# Try the THEO encoding if you have FFMPEG installed.
# FOURCC = cv2.VideoWriter_fourcc(*"THEO")
FOURCC = cv2.VideoWriter_fourcc(*"vp09")

# Create Path objects for the input video and the resulting video
output_directory = Path("output")
output_directory.mkdir(exist_ok=True)
result_video_path = output_directory / f"{Path(VIDEO_FILE).stem}_monodepth.mp4"

# Load Video
cap = cv2.VideoCapture(str(VIDEO_FILE))
ret, image = cap.read()
if not ret:
    raise ValueError(f"The video at {VIDEO_FILE} cannot be read.")
input_fps = cap.get(cv2.CAP_PROP_FPS)
input_video_frame_height, input_video_frame_width = image.shape[:2]

target_fps = input_fps / ADVANCE_FRAMES
target_frame_height = int(input_video_frame_height * SCALE_OUTPUT)
target_frame_width = int(input_video_frame_width * SCALE_OUTPUT)

cap.release()
print(
    f"The input video has a frame width of {input_video_frame_width}, "
    f"frame height of {input_video_frame_height} and runs at {input_fps:.2f} fps"
)
print(
    "The monodepth video will be scaled with a factor "
    f"{SCALE_OUTPUT}, have width {target_frame_width}, "
    f" height {target_frame_height}, and run at {target_fps:.2f} fps"
)

# Do Inference on a Video and Create Monodepth Video
# Initialize variables
input_video_frame_nr = 0
start_time = time.perf_counter()
total_inference_duration = 0

# Open input video
cap = cv2.VideoCapture(str(VIDEO_FILE))

# Create result video
out_video = cv2.VideoWriter(
    str(result_video_path),
    FOURCC,
    target_fps,
    (target_frame_width * 2, target_frame_height),
)

num_frames = int(NUM_SECONDS * input_fps)
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if num_frames == 0 else num_frames
progress_bar = ProgressBar(total=total_frames)
progress_bar.display()

try:
    while cap.isOpened():
        ret, image = cap.read()
        if not ret:
            cap.release()
            break

        if input_video_frame_nr >= total_frames:
            break

        # Only process every second frame
        # Prepare frame for inference
        # resize to input shape for network
        resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width))
        # reshape image to network input shape NCHW
        input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)

        # Do inference
        inference_start_time = time.perf_counter()
        result = compiled_model([input_image])[output_key]
        inference_stop_time = time.perf_counter()
        inference_duration = inference_stop_time - inference_start_time
        total_inference_duration += inference_duration

        if input_video_frame_nr % (10 * ADVANCE_FRAMES) == 0:
            clear_output(wait=True)
            progress_bar.display()
            # input_video_frame_nr // ADVANCE_FRAMES gives the number of
            # frames that have been processed by the network
            display(
                Pretty(
                    f"Processed frame {input_video_frame_nr // ADVANCE_FRAMES}"
                    f"/{total_frames // ADVANCE_FRAMES}. "
                    f"Inference time per frame: {inference_duration:.2f} seconds "
                    f"({1/inference_duration:.2f} FPS)"
                )
            )

        # Transform network result to RGB image
        result_frame = to_rgb(convert_result_to_image(result))
        # Resize image and result to target frame shape
        result_frame = cv2.resize(result_frame, (target_frame_width, target_frame_height))
        image = cv2.resize(image, (target_frame_width, target_frame_height))
        # Put image and result side by side
        stacked_frame = np.hstack((image, result_frame))
        # Save frame to video
        out_video.write(stacked_frame)

        input_video_frame_nr = input_video_frame_nr + ADVANCE_FRAMES
        cap.set(1, input_video_frame_nr)

        progress_bar.progress = input_video_frame_nr
        progress_bar.update()

except KeyboardInterrupt:
    print("Processing interrupted.")
finally:
    clear_output()
    processed_frames = num_frames // ADVANCE_FRAMES
    out_video.release()
    cap.release()
    end_time = time.perf_counter()
    duration = end_time - start_time

    print(
        f"Processed {processed_frames} frames in {duration:.2f} seconds. "
        f"Total FPS (including video processing): {processed_frames/duration:.2f}."
        f"Inference FPS: {processed_frames/total_inference_duration:.2f} "
    )
    print(f"Monodepth Video saved to '{str(result_video_path)}'.")

# Display Monodepth Video
video = Video(result_video_path, width=800, embed=True)
if not result_video_path.exists():
    plt.imshow(stacked_frame)
    raise ValueError("OpenCV was unable to write the video file. Showing one video frame.")
else:
    print(f"Showing monodepth video saved at\n{result_video_path.resolve()}")
    print(
        "If you cannot see the video in your browser, please click on the "
        "following link to download the video "
    )
    video_link = FileLink(result_video_path)
    video_link.html_link_str = "<a href='%s' download>%s</a>"
    display(HTML(video_link._repr_html_()))
    display(video)
# Upload the required wheel files, models and images in a google drive folder
# Uncomment and run the below command to copy them in your current workspace
#!cp /content/drive/MyDrive/TF-OV/working_dir_files/* . 

import os

# Enable these variables for runtime inference optimizations
os.environ["OPENVINO_TF_CONVERT_VARIABLES_TO_CONSTANTS"] = "1"
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "1"

!python3 -m pip install --upgrade pip
!python3 -m pip -q install pillow
!python3 -m pip -q install -U tensorflow_hub

# Install TensorFlow (v2.8.0) and OpenVINO-TensorFlow (v2.0.0) only if they aren't found
!if python3 -c "import tensorflow"; then echo "Found TensorFlow. Skipping."; else echo "TensorFlow Not Found. Installing."; python3 -m pip -q install tensorflow==2.8.0; fi
!if python3 -c "import openvino_tensorflow"; then echo "Found OpenVINO-TensorFlow. Skipping."; else echo "OpenVINO-TensorFlow Not Found. Installing.";

# Now let's infer
from __future__ import absolute_import, division, print_function, unicode_literals
from IPython.display import HTML

import numpy as np
import tensorflow as tf
import openvino_tensorflow as ovtf
import tensorflow_hub as hub

from PIL import Image
import time

def preprocess_image(file_name,
               input_height=299,
               input_width=299,
               input_mean=0,
               input_std=255):
    """Reads input image file, resizes it to given input height and width
       and returns the pre-processed image
    """  
    image = Image.open(file_name)
    resized_image = image.resize((input_height,input_width))
    resized_image = np.asarray(resized_image, np.float32)
    normalized_image = (resized_image - input_mean) / input_std
    result = np.expand_dims(normalized_image, 0)
    return result

def load_labels(label_file):
    """Parses the label file, assuming that labels are separated with a newline
       in the file and returns the list of labels.
    """  
    label = []
    proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines()
    for l in proto_as_ascii_lines:
        label.append(l.rstrip())
    return label

def infer_openvino_tensorflow(model_file, file_name , input_height, input_width, input_mean, input_std, label_file):
    """Takes the tensorflow model and all other input parameters as arguments. 
       Runs inference with the classification model and prints the predictions.
    """
    print("CREATE MODEL - BEGIN")
    if model_file=="":
        model = hub.load("https://tfhub.dev/google/imagenet/inception_v3/classification/4")
    else:
        model = tf.saved_model.load(model_file)
    print("CREATE MODEL - END")

    print("PREDICTION - BEGIN") 

    img =  tf.convert_to_tensor(preprocess_image(
            file_name, input_height=input_height, input_width=input_width, input_mean=input_mean, input_std=input_std))

    # Warmup
    results = model(img)
    # Run
    
    for num_times in range(10):
        start = time.time()
        results = model(img)
        elapsed = time.time() - start
        print('Inference time in ms: %f' % (elapsed * 1000))
            
    print("PREDICTION - END")
    results = tf.nn.softmax(results).numpy()
    
    if label_file:
        top_5 = tf.argsort(results, axis=-1, direction="DESCENDING")[0][:5].numpy()
        labels = load_labels(label_file)
        for i,item in enumerate(top_5):
            print(labels[item], results[0][top_5][i])
    else:
        print("No label file provided. Cannot print classification results")

# Set all the parameters needed for inference
# Enable OpenVINO™ integration with TensorFlow, and set Backend in just a few simple lines of code to boost performace
#Infer the input image
# Output the top 5 predicted classes, and the inference time with OpenVINO™ integration with TensorFlow enabled

file_name = tf.keras.utils.get_file(
    'grace_hopper.jpg',
    "https://www.tensorflow.org/images/grace_hopper.jpg")
model_file = ""
label_file = tf.keras.utils.get_file(
    'ImageNetLabels.txt',
    'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
input_height = 299
input_width = 299
input_mean = 0
input_std = 255
backend_name = "CPU"
    

#Print list of available backends
print('Available Backends:')
backends_list = ovtf.list_backends()
for backend in backends_list:
    print(backend)
ovtf.set_backend(backend_name)

print("OpenVINO TensorFlow is enabled")
infer_openvino_tensorflow(model_file, file_name, input_height, input_width, input_mean, input_std, label_file)

# Disable OpenVINO™ integration with TensorFlow to gauge the achieved performance boost
# Infer the input image again
# Output the top 5 predicted classes, and the inference time with OpenVINO™ integration with TensorFlow disabled

ovtf.disable() ## Disabling OVTF
print("OpenVINO TensorFlow is disabled")
infer_openvino_tensorflow(model_file, file_name, input_height, input_width, input_mean, input_std, label_file )
ovtf.enable()
import os
import pandas as pd

# Get the list of json files, which are in the folder:   
str_address = r"/media/New Volume/a3d/pdb/json_parser/"
lst_files = [i for i in os.listdir(str_address) if i.endswith(".json")]

# Loop through the json files
for file_ in lst_files: 
    df = pd.read_json(str_address + file_)
    df.to_csv(str_address+ file_+ ".txt", index = False)
from django.db import IntegrityError

except IntegrityError as e: 
    if 'unique constraint' in e.message: # or e.args[0] from Django 1.10
        #do something
password=(0004)
print(password)          
            
1.  abc				- This tiny module delivers the environment you need for creating abstract base classes
2.  argparse		- This module contains tools that make it easier to create user-friendly interfaces from the level of the command line
3.  asyncio			- This is a very large module that delivers the framework and environment for asynchronous programming
4.  base64			- This well-known module delivers a tool for coding and decoding binary code into a format that can be displayed – and 
					  the other way round
5.  collections		- This module offers specialized container data types that work as an alternative to basic contained types for general
					  purposes such as dict, list, set, and tuple.
6.  copy			- Everyone uses this tiny module that contains tools for deep copying of container type data. Its most famous function is `deepcopy` – if not for this function, copying lists and dictionaries would become a torture for developers.
7.  csv				- Delivers functionalities for exporting and importing tabular data in CSV format. The module allows developers to say “load data” or “save data” from to a CSV file.
8.  datetime		- It delivers tools that make it easier to work with dates and times
9.  decimal 		- The module delivers a data type called Decimal. Its main advantage is correct rounding of decimal numbers which is extremely important in billing systems– even
					  a slight distortion with several hundred thousand operations can change the final result significantly
10. functools		- The functools module comes in handy for higher-order functions, ie. the functions that act on or return other functions. You can treat any callable 
					  object as a function for the purposes of this module.
11. hashlib			- his handy module implements a common interface to numerous secure hash and message digest algorithms like the FIPS 
					  secure hash algorithms SHA1, SHA224, SHA256, SHA384, and SHA512
12. http			- his package collects several modules for working with the HyperText Transfer Protocol 
13. importlib 		- The package provides the implementation of the import statement (and the __import__() function) in Python source code
14. itertools		- This useful module implements iterator building blocks inspired by constructs 
					  from APL, Haskell, and SML (all in a form that matches Python programs)
15. inspect 		- The module offers several useful functions that help developers get information about live objects such as 
					  modules, classes, methods, functions, tracebacks, frame objects, and code objects in Python code
16. json			- Helps to work with json data
17. logging			- This module defines functions and classes that provide a flexible event logging system for applications and libraries
18. math			- This module gives developers access to the mathematical functions defined by the C standard
19. multiprocessing	- This handy package supports spawning processes with the help of an API similar to the threading module
20. os				- Offers a portable method of using operating system dependent functionality.
21. pdb				- This module defines an interactive source code debugger
22. random			- pseudo-random number generators for various distributions
23. re				- Provides regular expression matching operations similar to those you get in Perl
24. shutil			- Provides a number of high-level operations on files and collections of files, especially functions that 
					  support file copying and removal
25. sys				- offers access to variables used or maintained by the interpreter and functions that interact strongly 
					  with the interpreter
26. threading		- This useful module builds higher-level threading interfaces on top of the lower level _thread module
27. types			- It defines names for object types used by the standard Python interpreter but not exposed as builtins
28. unittest		- Works similarly to major unit testing frameworks in other programming languages
29. urllib			- This handy package collects several modules for working with URLs
30. uuid			- The module provides immutable UUID objects, as well as the following functions for generating version 1, 3, 4, and 5 UUIDs
# Imports
import os
import sys
import zipfile
from pathlib import Path

import numpy as np
from monai.transforms import LoadImage
from openvino.inference_engine import IECore

sys.path.append("../utils")
from models.custom_segmentation import SegmentationModel
from notebook_utils import benchmark_model, download_file, show_live_inference

# Settings
# The directory that contains the IR model (xml and bin) files
MODEL_PATH = "pretrained_model/quantized_unet_kits19.xml"
# Uncomment the next line to use the FP16 model instead of the quantized model
# MODEL_PATH = "pretrained_model/unet_kits19.xml"

# Benchmark Model Performance
ie = IECore()
# By default, benchmark on MULTI:CPU,GPU if a GPU is available, otherwise on CPU.
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
# Uncomment one of the options below to benchmark on other devices
# device = "GPU"
# device = "CPU"
# device = "AUTO"

# Benchmark model
benchmark_model(model_path=MODEL_PATH, device=device, seconds=15)

# Download and Prepare Data
# Directory that contains the CT scan data. This directory should contain subdirectories
# case_00XXX where XXX is between 000 and 299
BASEDIR = Path("kits19_frames_1")
# The CT scan case number. For example: 16 for data from the case_00016 directory
# Currently only 117 is supported
CASE = 117

case_path = BASEDIR / f"case_{CASE:05d}"

if not case_path.exists():
    filename = download_file(
        f"https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_{CASE:05d}.zip"
    )
    with zipfile.ZipFile(filename, "r") as zip_ref:
        zip_ref.extractall(path=BASEDIR)
    os.remove(filename)  # remove zipfile
    print(f"Downloaded and extracted data for case_{CASE:05d}")
else:
    print(f"Data for case_{CASE:05d} exists")

# Load Model and List of Image Files
ie = IECore()
segmentation_model = SegmentationModel(
    ie=ie, model_path=Path(MODEL_PATH), sigmoid=True, rotate_and_flip=True
)
image_paths = sorted(case_path.glob("imaging_frames/*jpg"))

print(f"{case_path.name}, {len(image_paths)} images")

# Show Inference
# Possible options for device include "CPU", "GPU", "AUTO", "MULTI"
device = "MULTI:CPU,GPU" if "GPU" in ie.available_devices else "CPU"
reader = LoadImage(image_only=True, dtype=np.uint8)

show_live_inference(
    ie=ie, image_paths=image_paths, model=segmentation_model, device=device, reader=reader
)
# Preparation
model_name = "mobilenet-v2-pytorch"

# Imports
import json
import sys
from pathlib import Path

from IPython.display import Markdown, display
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import DeviceNotFoundAlert, NotebookAlert

# Settings and Configuration
base_model_dir = Path("~/open_model_zoo_models").expanduser()
omz_cache_dir = Path("~/open_model_zoo_cache").expanduser()
precision = "FP16"

# Check if an iGPU is available on this system to use with Benchmark App
ie = Core()
gpu_available = "GPU" in ie.available_devices

print(
    f"base_model_dir: {base_model_dir}, omz_cache_dir: {omz_cache_dir}, gpu_availble: {gpu_available}"
)

# Download Model from Open Model Zoo
download_command = (
    f"omz_downloader --name {model_name} --output_dir {base_model_dir} --cache_dir {omz_cache_dir}"
)
display(Markdown(f"Download command: `{download_command}`"))
display(Markdown(f"Downloading {model_name}..."))
! $download_command

# Convert Model to OpenVINO IR format
convert_command = f"omz_converter --name {model_name} --precisions {precision} --download_dir {base_model_dir} --output_dir {base_model_dir}"
display(Markdown(f"Convert command: `{convert_command}`"))
display(Markdown(f"Converting {model_name}..."))

! $convert_command

# Get Model Information
model_info_output = %sx omz_info_dumper --name $model_name
model_info = json.loads(model_info_output.get_nlstr())

if len(model_info) > 1:
    NotebookAlert(
        f"There are multiple IR files for the {model_name} model. The first model in the "
        "omz_info_dumper output will be used for benchmarking. Change "
        "`selected_model_info` in the cell below to select a different model from the list.",
        "warning",
    )

model_info

selected_model_info = model_info[0]
model_path = (
    base_model_dir
    / Path(selected_model_info["subdirectory"])
    / Path(f"{precision}/{selected_model_info['name']}.xml")
)
print(model_path, "exists:", model_path.exists())

benchmark_command = f"benchmark_app -m {model_path} -t 15"
display(Markdown(f"Benchmark command: `{benchmark_command}`"))
display(Markdown(f"Benchmarking {model_name} on CPU with async inference for 15 seconds..."))

! $benchmark_command

# Benchmark with Different Settings
def benchmark_model(model_xml, device="CPU", seconds=60, api="async", batch=1):
    ie = Core()
    model_path = Path(model_xml)
    if ("GPU" in device) and ("GPU" not in ie.available_devices):
        DeviceNotFoundAlert("GPU")
    else:
        benchmark_command = f"benchmark_app -m {model_path} -d {device} -t {seconds} -api {api} -b {batch}"
        display(Markdown(f"**Benchmark {model_path.name} with {device} for {seconds} seconds with {api} inference**"))
        display(Markdown(f"Benchmark command: `{benchmark_command}`"))

        benchmark_output = %sx $benchmark_command
        print("command ended")
        benchmark_result = [line for line in benchmark_output
                            if not (line.startswith(r"[") or line.startswith("  ") or line == "")]
        print("\n".join(benchmark_result))

ie = Core()

# Show devices available for OpenVINO Inference Engine
for device in ie.available_devices:
    device_name = ie.get_property(device, "FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")

benchmark_model(model_path, device="CPU", seconds=15, api="async")

benchmark_model(model_path, device="AUTO", seconds=15, api="async")

benchmark_model(model_path, device="GPU", seconds=15, api="async")

benchmark_model(model_path, device="MULTI:CPU,GPU", seconds=15, api="async")
# Imports
import os
import time

import cv2
import matplotlib.pyplot as plt
import numpy as np
import paddlehub as hub
from IPython.display import Markdown, display
from PIL import Image
from openvino.runtime import Core
from paddle.static import InputSpec
from scipy.special import softmax

# Settings
IMAGE_FILENAME = "coco_close.png"

MODEL_NAME = "mobilenet_v3_large_imagenet_ssld"
hub.config.server = "https://paddlepaddle.org.cn/paddlehub"

# Show Inference on PaddlePaddle Model
classifier = hub.Module(name=MODEL_NAME)

# Load image in BGR format, as specified in model documentation
image = cv2.imread(filename=IMAGE_FILENAME)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
result = classifier.classification(images=[image], top_k=3)
for class_name, softmax_probability in result[0].items():
    print(f"{class_name}, {softmax_probability:.5f}")

from mobilenet_v3_large_imagenet_ssld.data_feed import process_image

pil_image = Image.open(IMAGE_FILENAME)
processed_image = process_image(pil_image)
print(f"Processed image shape: {processed_image.shape}")
# Processed image is in (C,H,W) format, convert to (H,W,C) to show the image
plt.imshow(np.transpose(processed_image, (1, 2, 0)))

# Preparation
input_shape = list(classifier.cpu_predictor.get_input_tensor_shape().values())
print("input shape:", input_shape)
print("mean:", classifier.get_pretrained_images_mean())
print("std:", classifier.get_pretrained_images_std())

# Convert PaddlePaddle Model to ONNX
target_height, target_width = next(iter(input_shape))[2:]
x_spec = InputSpec([1, 3, target_height, target_width], "float32", "x")
print(
    "Exporting PaddlePaddle model to ONNX with target_height "
    f"{target_height} and target_width {target_width}"
)
classifier.export_onnx_model(".", input_spec=[x_spec], opset_version=11)

# Convert ONNX model to OpenVINO IR Format
model_xml = f"{MODEL_NAME}.xml"
if not os.path.exists(model_xml):
    mo_command = f'mo --input_model {MODEL_NAME}.onnx --input_shape "[1,3,{target_height},{target_width}]"'
    display(Markdown(f"Model Optimizer command to convert the ONNX model to IR: `{mo_command}`"))
    display(Markdown("_Converting model to IR. This may take a few minutes..._"))
    ! $mo_command
else:
    print(f"{model_xml} already exists.")

# Show Inference on OpenVINO Model
# Load Inference Engine and IR model
ie = Core()
model = ie.read_model(model=f"{MODEL_NAME}.xml", weights=f"{MODEL_NAME}.bin")
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Get model output
output_layer = compiled_model.output(0)

# Read, show, and preprocess input image
# See the "Show Inference on PaddlePaddle Model" section for source of process_image
image = Image.open(IMAGE_FILENAME)
plt.imshow(image)
input_image = process_image(image)[None,]

# Do inference
ie_result = compiled_model([input_image])[output_layer][0]

# Compute softmax probabilities for the inference result and find the top three values
softmax_result = softmax(ie_result)
top_indices = np.argsort(softmax_result)[-3:][::-1]
top_softmax = softmax_result[top_indices]

# Convert the inference results to class names, using the same labels as the PaddlePaddle classifier
for index, softmax_probability in zip(top_indices, top_softmax):
    print(f"{classifier.label_list[index]}, {softmax_probability:.5f}")

# Timing and Comparison
num_images = 50

# PaddlePaddle's classification method expects a BGR numpy array
image = cv2.imread(filename=IMAGE_FILENAME)

# The process_image function expects a PIL image
pil_image = Image.open(fp=IMAGE_FILENAME)

# Show CPU information
ie = Core()
print(f"CPU: {ie.get_property(device_name='CPU', name='FULL_DEVICE_NAME')}")

# Show inference speed on PaddlePaddle model
start = time.perf_counter()
for _ in range(num_images):
    result = classifier.classification(images=[image], top_k=3)
end = time.perf_counter()
time_ir = end - start
print(
    f"PaddlePaddle model on CPU: {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}\n"
)
print("PaddlePaddle result:")
for class_name, softmax_probability in result[0].items():
    print(f"{class_name}, {softmax_probability:.5f}")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));

# Show inference speed on OpenVINO IR model
compiled_model = ie.compile_model(model=model, device_name="CPU")
output_layer = compiled_model.output(0)


start = time.perf_counter()
input_image = process_image(pil_image)[None,]
for _ in range(num_images):
    ie_result = compiled_model([input_image])[output_layer][0]
    result_index = np.argmax(ie_result)
    class_name = classifier.label_list[np.argmax(ie_result)]
    softmax_result = softmax(ie_result)
    top_indices = np.argsort(softmax_result)[-3:][::-1]
    top_softmax = softmax_result[top_indices]

end = time.perf_counter()
time_ir = end - start

print(
    f"IR model in Inference Engine (CPU): {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)
print()
print("OpenVINO result:")
for index, softmax_probability in zip(top_indices, top_softmax):
    print(f"{classifier.label_list[index]}, {softmax_probability:.5f}")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));
# Imports
import sys
import time
from pathlib import Path

import cv2
import numpy as np
import torch
from IPython.display import Markdown, display
from fastseg import MobileV3Large
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import CityScapesSegmentation, segmentation_map_to_image, viz_result_image

# Settings
IMAGE_WIDTH = 1024  # Suggested values: 2048, 1024 or 512. The minimum width is 512.
# Set IMAGE_HEIGHT manually for custom input sizes. Minimum height is 512
IMAGE_HEIGHT = 1024 if IMAGE_WIDTH == 2048 else 512
DIRECTORY_NAME = "model"
BASE_MODEL_NAME = DIRECTORY_NAME + f"/fastseg{IMAGE_WIDTH}"

# Paths where PyTorch, ONNX and OpenVINO IR models will be stored
model_path = Path(BASE_MODEL_NAME).with_suffix(".pth")
onnx_path = model_path.with_suffix(".onnx")
ir_path = model_path.with_suffix(".xml")

# Download the Fastseg Model
print("Downloading the Fastseg model (if it has not been downloaded before)....")
model = MobileV3Large.from_pretrained().cpu().eval()
print("Loaded PyTorch Fastseg model")

# Save the model
model_path.parent.mkdir(exist_ok=True)
torch.save(model.state_dict(), str(model_path))
print(f"Model saved at {model_path}")

# Convert PyTorch model to ONNX
if not onnx_path.exists():
    dummy_input = torch.randn(1, 3, IMAGE_HEIGHT, IMAGE_WIDTH)

    # For the Fastseg model, setting do_constant_folding to False is required
    # for PyTorch>1.5.1
    torch.onnx.export(
        model,
        dummy_input,
        onnx_path,
        opset_version=11,
        do_constant_folding=False,
    )
    print(f"ONNX model exported to {onnx_path}.")
else:
    print(f"ONNX model {onnx_path} already exists.")

# Convert ONNX Model to OpenVINO IR Format
# Construct the command for Model Optimizer
mo_command = f"""mo
                 --input_model "{onnx_path}"
                 --input_shape "[1,3, {IMAGE_HEIGHT}, {IMAGE_WIDTH}]"
                 --mean_values="[123.675, 116.28 , 103.53]"
                 --scale_values="[58.395, 57.12 , 57.375]"
                 --data_type FP16
                 --output_dir "{model_path.parent}"
                 """
mo_command = " ".join(mo_command.split())
print("Model Optimizer command to convert the ONNX model to OpenVINO:")
display(Markdown(f"`{mo_command}`"))

if not ir_path.exists():
    print("Exporting ONNX model to IR... This may take a few minutes.")
    mo_result = %sx $mo_command
    print("\n".join(mo_result))
else:
    print(f"IR model {ir_path} already exists.")

# Show results: Load and Preprocess an Input Image
def normalize(image: np.ndarray) -> np.ndarray:
    """
    Normalize the image to the given mean and standard deviation
    for CityScapes models.
    """
    image = image.astype(np.float32)
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    image /= 255.0
    image -= mean
    image /= std
    return image

image_filename = "data/street.jpg"
image = cv2.cvtColor(cv2.imread(image_filename), cv2.COLOR_BGR2RGB)

resized_image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT))
normalized_image = normalize(resized_image)

# Convert the resized images to network input shape
input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0)
normalized_input_image = np.expand_dims(np.transpose(normalized_image, (2, 0, 1)), 0)

# ONNX Model in Inference Engine
# Load network to Inference Engine
ie = Core()
model_onnx = ie.read_model(model=onnx_path)
compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU")

output_layer_onnx = compiled_model_onnx.output(0)

# Run inference on the input image
res_onnx = compiled_model_onnx([normalized_input_image])[output_layer_onnx]

# Convert network result to segmentation map and display the result
result_mask_onnx = np.squeeze(np.argmax(res_onnx, axis=1)).astype(np.uint8)
viz_result_image(
    image,
    segmentation_map_to_image(result_mask_onnx, CityScapesSegmentation.get_colormap()),
    resize=True,
)

# IR Model in Inference Engine
# Load the network in Inference Engine
ie = Core()
model_ir = ie.read_model(model=ir_path)
compiled_model_ir = ie.compile_model(model=model_ir, device_name="CPU")

# Get input and output layers
output_layer_ir = compiled_model_ir.output(0)

# Run inference on the input image
res_ir = compiled_model_ir([input_image])[output_layer_ir]

result_mask_ir = np.squeeze(np.argmax(res_ir, axis=1)).astype(np.uint8)
viz_result_image(
    image,
    segmentation_map_to_image(result=result_mask_ir, colormap=CityScapesSegmentation.get_colormap()),
    resize=True,
)

# PyTorch Comparison
with torch.no_grad():
    result_torch = model(torch.as_tensor(normalized_input_image).float())

result_mask_torch = torch.argmax(result_torch, dim=1).squeeze(0).numpy().astype(np.uint8)
viz_result_image(
    image,
    segmentation_map_to_image(result=result_mask_torch, colormap=CityScapesSegmentation.get_colormap()),
    resize=True,
)

# Performance Comparison
num_images = 20

start = time.perf_counter()
for _ in range(num_images):
    compiled_model_onnx([normalized_input_image])
end = time.perf_counter()
time_onnx = end - start
print(
    f"ONNX model in Inference Engine/CPU: {time_onnx/num_images:.3f} "
    f"seconds per image, FPS: {num_images/time_onnx:.2f}"
)

start = time.perf_counter()
for _ in range(num_images):
    compiled_model_ir([input_image])
end = time.perf_counter()
time_ir = end - start
print(
    f"IR model in Inference Engine/CPU: {time_ir/num_images:.3f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)

with torch.no_grad():
    start = time.perf_counter()
    for _ in range(num_images):
        model(torch.as_tensor(input_image).float())
    end = time.perf_counter()
    time_torch = end - start
print(
    f"PyTorch model on CPU: {time_torch/num_images:.3f} seconds per image, "
    f"FPS: {num_images/time_torch:.2f}"
)

if "GPU" in ie.available_devices:
    compiled_model_onnx_gpu = ie.compile_model(model=model_onnx, device_name="GPU")
    start = time.perf_counter()
    for _ in range(num_images):
        compiled_model_onnx_gpu([input_image])
    end = time.perf_counter()
    time_onnx_gpu = end - start
    print(
        f"ONNX model in Inference Engine/GPU: {time_onnx_gpu/num_images:.3f} "
        f"seconds per image, FPS: {num_images/time_onnx_gpu:.2f}"
    )

    compiled_model_ir_gpu = ie.compile_model(model=model_ir, device_name="GPU")
    start = time.perf_counter()
    for _ in range(num_images):
        compiled_model_ir_gpu([input_image])
    end = time.perf_counter()
    time_ir_gpu = end - start
    print(
        f"IR model in Inference Engine/GPU: {time_ir_gpu/num_images:.3f} "
        f"seconds per image, FPS: {num_images/time_ir_gpu:.2f}"
    )

# Show Device Information
devices = ie.available_devices
for device in devices:
    device_name = ie.get_property(device_name=device, name="FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")
# Imports
import time
from pathlib import Path

import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Markdown
from openvino.runtime import Core

# Settings
# The paths of the source and converted models
model_path = Path("model/v3-small_224_1.0_float.pb")
ir_path = Path(model_path).with_suffix(".xml")

# Convert TensorFlow Model to OpenVINO IR Format
# Construct the command for Model Optimizer
mo_command = f"""mo
                 --input_model "{model_path}" 
                 --input_shape "[1,224,224,3]" 
                 --mean_values="[127.5,127.5,127.5]"
                 --scale_values="[127.5]" 
                 --data_type FP16 
                 --output_dir "{model_path.parent}"
                 """
mo_command = " ".join(mo_command.split())
print("Model Optimizer command to convert TensorFlow to OpenVINO:")
display(Markdown(f"`{mo_command}`"))

# Run Model Optimizer if the IR model file does not exist
if not ir_path.exists():
    print("Exporting TensorFlow model to IR... This may take a few minutes.")
    ! $mo_command
else:
    print(f"IR model {ir_path} already exists.")

# Load the Model
ie = Core()
model = ie.read_model(model=ir_path, weights=ir_path.with_suffix(".bin"))
compiled_model = ie.compile_model(model=model, device_name="CPU")

# Get Model Information
input_key = compiled_model.input(0)
output_key = compiled_model.output(0)
network_input_shape = input_key.shape

# Load an Image
# The MobileNet network expects images in RGB format
image = cv2.cvtColor(cv2.imread(filename="data/coco.jpg"), code=cv2.COLOR_BGR2RGB)

# Resize image to network input image shape
resized_image = cv2.resize(src=image, dsize=(224, 224))

# Transpose image to network input shape
input_image = np.expand_dims(resized_image, 0)

plt.imshow(image);

# Do Inference
result = compiled_model([input_image])[output_key]
result_index = np.argmax(result)

# Convert the inference result to a class name.
imagenet_classes = open("utils/imagenet_2012.txt").read().splitlines()

# The model description states that for this model, class 0 is background,
# so we add background at the beginning of imagenet_classes
imagenet_classes = ['background'] + imagenet_classes

imagenet_classes[result_index]

# Timing
num_images = 1000

start = time.perf_counter()

for _ in range(num_images):
    compiled_model([input_image])

end = time.perf_counter()
time_ir = end - start

print(
    f"IR model in Inference Engine/CPU: {time_ir/num_images:.4f} "
    f"seconds per image, FPS: {num_images/time_ir:.2f}"
)
# Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
from openvino.runtime import Core

# Load the Model
ie = Core()

model = ie.read_model(model="model/horizontal-text-detection-0001.xml")
compiled_model = ie.compile_model(model=model, device_name="CPU")

input_layer_ir = compiled_model.input(0)
output_layer_ir = compiled_model.output("boxes")

# Load an Image
# Text detection models expects image in BGR format
image = cv2.imread("data/intel_rnb.jpg")

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer_ir.shape

# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))

# Reshape to network input shape
input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0)

plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));

# Do Inference
# Create inference request
boxes = compiled_model([input_image])[output_layer_ir]

# Remove zero only boxes
boxes = boxes[~np.all(boxes == 0, axis=1)]

# Visualize Results
# For each detection, the description has the format: [x_min, y_min, x_max, y_max, conf]
# Image passed here is in BGR format with changed width and height. To display it in colors expected by matplotlib we use cvtColor function
def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True):
    # Define colors for boxes and descriptions
    colors = {"red": (255, 0, 0), "green": (0, 255, 0)}

    # Fetch image shapes to calculate ratio
    (real_y, real_x), (resized_y, resized_x) = bgr_image.shape[:2], resized_image.shape[:2]
    ratio_x, ratio_y = real_x / resized_x, real_y / resized_y

    # Convert base image from bgr to rgb format
    rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

    # Iterate through non-zero boxes
    for box in boxes:
        # Pick confidence factor from last place in array
        conf = box[-1]
        if conf > threshold:
            # Convert float to int and multiply corner position of each box by x and y ratio
            # In case that bounding box is found at the top of the image, 
            # we position upper box bar little lower to make it visible on image 
            (x_min, y_min, x_max, y_max) = [
                int(max(corner_position * ratio_y, 10)) if idx % 2 
                else int(corner_position * ratio_x)
                for idx, corner_position in enumerate(box[:-1])
            ]

            # Draw box based on position, parameters in rectangle function are: image, start_point, end_point, color, thickness
            rgb_image = cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3)

            # Add text to image based on position and confidence
            # Parameters in text function are: image, text, bottom-left_corner_textfield, font, font_scale, color, thickness, line_type
            if conf_labels:
                rgb_image = cv2.putText(
                    rgb_image,
                    f"{conf:.2f}",
                    (x_min, y_min - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.8,
                    colors["red"],
                    1,
                    cv2.LINE_AA,
                )

    return rgb_image

plt.figure(figsize=(10, 6))
plt.axis("off")
plt.imshow(convert_result_to_image(image, resized_image, boxes, conf_labels=False));
# Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
import sys
from openvino.runtime import Core

sys.path.append("../utils")
from notebook_utils import segmentation_map_to_image

# Load the Model
ie = Core()

model = ie.read_model(model="model/road-segmentation-adas-0001.xml")
compiled_model = ie.compile_model(model=model, device_name="CPU")

input_layer_ir = compiled_model.input(0)
output_layer_ir = compiled_model.output(0)

# Load an Image
# The segmentation network expects images in BGR format
image = cv2.imread("data/empty_road_mapillary.jpg")

rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_h, image_w, _ = image.shape

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer_ir.shape

# OpenCV resize expects the destination size as (width, height)
resized_image = cv2.resize(image, (W, H))

# reshape to network input shape
input_image = np.expand_dims(
    resized_image.transpose(2, 0, 1), 0
)  
plt.imshow(rgb_image)

# Run the inference
result = compiled_model([input_image])[output_layer_ir]

# Prepare data for visualization
segmentation_mask = np.argmax(result, axis=1)
plt.imshow(segmentation_mask.transpose(1, 2, 0))

# Prepare Data for Visualization
# Define colormap, each color represents a class
colormap = np.array([[68, 1, 84], [48, 103, 141], [53, 183, 120], [199, 216, 52]])

# Define the transparency of the segmentation mask on the photo
alpha = 0.3

# Use function from notebook_utils.py to transform mask to an RGB image
mask = segmentation_map_to_image(segmentation_mask, colormap)
resized_mask = cv2.resize(mask, (image_w, image_h))

# Create image with mask put on
image_with_mask = cv2.addWeighted(resized_mask, alpha, rgb_image, 1 - alpha, 0)

# Visualize data
# Define titles with images
data = {"Base Photo": rgb_image, "Segmentation": mask, "Masked Photo": image_with_mask}

# Create subplot to visualize images
fig, axs = plt.subplots(1, len(data.items()), figsize=(15, 10))

# Fill subplot
for ax, (name, image) in zip(axs, data.items()):
    ax.axis('off')
    ax.set_title(name)
    ax.imshow(image)

# Display image
plt.show(fig)
# Load Inference Engine and Show Info
from openvino.runtime import Core
ie = Core()

devices = ie.available_devices

for device in devices:
    device_name = ie.get_property(device_name=device, name="FULL_DEVICE_NAME")
    print(f"{device}: {device_name}")

# Loading a Model
from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"

model = ie.read_model(model=classification_model_xml)
compiled_model = ie.compile_model(model=model, device_name="CPU")

from openvino.runtime import Core

ie = Core()
onnx_model_path = "model/segmentation.onnx"
model_onnx = ie.read_model(model=onnx_model_path)
compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU")

from openvino.offline_transformations import serialize

serialize(model=model_onnx, model_path="model/exported_onnx_model.xml", weights_path="model/exported_onnx_model.bin")

# Getting Information about a Model
from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
model.input(0).any_name

input_layer = model.input(0)

print(f"input precision: {input_layer.element_type}")
print(f"input shape: {input_layer.shape}")

from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
model.output(0).any_name

output_layer = model.output(0)
output_layer

print(f"output precision: {output_layer.element_type}")
print(f"output shape: {output_layer.shape}")

from openvino.runtime import Core

ie = Core()
classification_model_xml = "model/classification.xml"
model = ie.read_model(model=classification_model_xml)
compiled_model = ie.compile_model(model=model, device_name="CPU")
input_layer = compiled_model.input(0)
output_layer = compiled_model.output(0)

import cv2

image_filename = "data/coco_hollywood.jpg"
image = cv2.imread(image_filename)
image.shape

# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = input_layer.shape
# OpenCV resize expects the destination size as (width, height)
resized_image = cv2.resize(src=image, dsize=(W, H))
resized_image.shape

import numpy as np

input_data = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0).astype(np.float32)
input_data.shape

# Do Inference
result = compiled_model([input_data])[output_layer]

request = compiled_model.create_infer_request()
request.infer(inputs={input_layer.any_name: input_data})
result = request.get_output_tensor(output_layer.index).data

from openvino.runtime import Core, PartialShape

ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)

print("~~~~ ORIGINAL MODEL ~~~~")
print(f"input shape: {segmentation_input_layer.shape}")
print(f"output shape: {segmentation_output_layer.shape}")

new_shape = PartialShape([1, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
# help(segmentation_compiled_model)
print("~~~~ RESHAPED MODEL ~~~~")
print(f"model input shape: {segmentation_input_layer.shape}")
print(
    f"compiled_model input shape: "
    f"{segmentation_compiled_model.input(index=0).shape}"
)
print(f"compiled_model output shape: {segmentation_output_layer.shape}")

# Change Batch Size
from openvino.runtime import Core, PartialShape

ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
new_shape = PartialShape([2, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")

print(f"input shape: {segmentation_input_layer.shape}")
print(f"output shape: {segmentation_output_layer.shape}")

import numpy as np
from openvino.runtime import Core, PartialShape

ie = Core()
segmentation_model_xml = "model/segmentation.xml"
segmentation_model = ie.read_model(model=segmentation_model_xml)
segmentation_input_layer = segmentation_model.input(0)
segmentation_output_layer = segmentation_model.output(0)
new_shape = PartialShape([2, 3, 544, 544])
segmentation_model.reshape({segmentation_input_layer.any_name: new_shape})
segmentation_compiled_model = ie.compile_model(model=segmentation_model, device_name="CPU")
input_data = np.random.rand(2, 3, 544, 544)

output = segmentation_compiled_model([input_data])

print(f"input data shape: {input_data.shape}")
print(f"result data data shape: {segmentation_output_layer.shape}")

# Caching a Model
import time
from pathlib import Path

from openvino.runtime import Core, PartialShape

ie = Core()

device_name = "GPU"  # Model Caching is not available for CPU

if device_name in ie.available_devices and device_name != "CPU":
    cache_path = Path("model/model_cache")
    cache_path.mkdir(exist_ok=True)
    # Enable caching for Inference Engine. To disable caching set enable_caching = False
    enable_caching = True
    config_dict = {"CACHE_DIR": str(cache_path)} if enable_caching else {}

    classification_model_xml = "model/classification.xml"
    model = ie.read_model(model=classification_model_xml)

    start_time = time.perf_counter()
    compiled_model = ie.compile_model(model=model, device_name=device_name, config=config_dict)
    end_time = time.perf_counter()
    print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
else:
    print("Model caching is not available on CPU devices.")

if device_name in ie.available_devices and device_name != "CPU":
    del compiled_model
    start_time = time.perf_counter()
    compiled_model = ie.compile_model(model=model, device_name=device_name, config=config_dict)
    end_time = time.perf_counter()
    print(f"Loading the network to the {device_name} device took {end_time-start_time:.2f} seconds.")
# Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
from openvino.runtime import Core

# Load the model
ie = Core()
model = ie.read_model(model="model/v3-small_224_1.0_float.xml")
compiled_model = ie.compile_model(model=model, device_name="CPU")
output_layer = compiled_model.output(0)

# Load an Image
# The MobileNet model expects images in RGB format
image = cv2.cvtColor(cv2.imread(filename="data/coco.jpg"), code=cv2.COLOR_BGR2RGB)

# resize to MobileNet image shape
input_image = cv2.resize(src=image, dsize=(224, 224))

# reshape to model input shape
input_image = np.expand_dims(input_image.transpose(2, 0, 1), 0)
plt.imshow(image);

# Do Inference
result_infer = compiled_model([input_image])[output_layer]
result_index = np.argmax(result_infer)

# Convert the inference result to a class name.
imagenet_classes = open("utils/imagenet_2012.txt").read().splitlines()

# The model description states that for this model, class 0 is background,
# so we add background at the beginning of imagenet_classes
imagenet_classes = ['background'] + imagenet_classes

imagenet_classes[result_index]
# Upload the required wheel files, models and images in a google drive folder
# Uncomment and run the below command to copy them in your current workspace
#!cp /content/drive/MyDrive/TF-OV/working_dir_files/* . 

import os

# Enable these variables for runtime inference optimizations
os.environ["OPENVINO_TF_CONVERT_VARIABLES_TO_CONSTANTS"] = "1"
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "1"

!python3 -m pip -q install --upgrade pip
!python3 -m pip -q install pillow
!python3 -m pip -q install keras_applications

# Install TensorFlow (v2.8.0) and OpenVINO-TensorFlow (v2.0.0) only if they aren't found
!if python3 -c "import tensorflow"; then echo "Found TensorFlow. Skipping."; else echo "TensorFlow Not Found. Installing."; python3 -m pip -q install tensorflow==2.8.0; fi
!if python3 -c "import openvino_tensorflow"; then echo "Found OpenVINO-TensorFlow. Skipping."; else echo "OpenVINO-TensorFlow Not Found. Installing."; 

OVTF_DIR = "/"
RAW_GITHUB_COMMON = "https://raw.githubusercontent.com/openvinotoolkit/openvino_tensorflow/master/examples/common/"
GITHUB_EXAMPLES = "https://github.com/openvinotoolkit/openvino_tensorflow/raw/master/examples/data/"
RAW_GITHUB_EXAMPLES = "https://raw.githubusercontent.com/openvinotoolkit/openvino_tensorflow/master/examples/"

import os

files = os.listdir('.')
if ('common' not in files or 'examples' not in files) and 'openvino_tensorflow' not in os.listdir(OVTF_DIR):
    !mkdir ./common
    !wget {RAW_GITHUB_COMMON}/post_process.py -O ./common/post_process.py
    !wget {RAW_GITHUB_COMMON}/pre_process.py -O ./common/pre_process.py
    !wget {RAW_GITHUB_COMMON}/utils.py -O ./common/utils.py
    !mkdir -p ./examples/data
    !wget {GITHUB_EXAMPLES}/grace_hopper.jpg -O ./examples/data/grace_hopper.jpg
    !wget {GITHUB_EXAMPLES}/yolov4_anchors.txt -O ./examples/data/yolov4_anchors.txt
    !wget {RAW_GITHUB_EXAMPLES}/convert_yolov4.sh -O ./examples/convert_yolov4.sh
    !wget {RAW_GITHUB_EXAMPLES}/keras_to_tensorflow.patch -O ./examples/keras_to_tensorflow.patch

import sys

if 'openvino_tensorflow' in os.listdir(OVTF_DIR):
    sys_append = os.path.abspath(OVTF_DIR + "/openvino_tensorflow/examples/")
    sys.path.append(sys_append)

from __future__ import absolute_import, division, print_function, unicode_literals

import numpy as np
import tensorflow as tf
import openvino_tensorflow as ovtf
from PIL import Image
import cv2
import matplotlib.pyplot as plt

from common.utils import get_input_mode, get_colors, draw_boxes, get_anchors, rename_file
from common.pre_process import preprocess_image_yolov3 as preprocess_image
from common.post_process import yolo3_postprocess_np

# Download and Convert the YoloV4 model
files = os.listdir('.')
if 'examples' in files:
    path = "examples"
else:
    path = "{0}/openvino_tensorflow/examples/".format(OVTF_DIR)
%cd {path}
!chmod +x convert_yolov4.sh && bash convert_yolov4.sh
# Once the model conversion is completed; move back to outside of examples directory
%cd ../
  
def load_coco_names(file_name):
    """Parses the label file with only class names,
      and returns a dictionary mapping the class IDs to class names.
    """
    names = {}
    with open(file_name) as f:
        for id_, name in enumerate(f):
            names[id_] = name
    return names

def load_labels(label_file):
    """Parses the label file, assuming that labels are separated with a newline
       in the file and returns the list of labels.
    """  
    label = []
    proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines()
    for l in proto_as_ascii_lines:
        label.append(l.rstrip())
    return label

def infer_openvino_tensorflow(model_file, image_file , input_height, input_width, label_file, anchor_file, conf_threshold, iou_threshold):
    """Takes the tensorflow model and all other input parameters as arguments. 
       Runs inference with the object detection model and prints the predictions.
    """
    print("CREATE MODEL - BEGIN")

    # Load model and process input image
    model =     model = tf.saved_model.load(model_file)
    print("CREATE MODEL - END")

    if label_file:
        classes = load_coco_names(label_file)
        labels = load_labels(label_file)
        colors = get_colors(labels)

    if anchor_file:
        anchors = get_anchors(anchor_file)

    print("PREDICTION - BEGIN")
    
    #Preprocess Image
    image = Image.open(image_file)
    img = np.asarray(image)
    image_width, image_height = image.size
    img_resized = tf.convert_to_tensor(preprocess_image(image, (input_height, input_width)))

    # Warmup
    detected_boxes = model(img_resized)
    # Run
    import time
    start = time.time()
    detected_boxes = model(img_resized)
    elapsed = time.time() - start
    print('Inference time in ms: %f' % (elapsed * 1000))
    print("PREDICTION - END")  
    
    image_shape = tuple((image_height, image_width))  
    # apply non max suppresion, draw boxes and save updated image
    out_boxes, out_classes, out_scores = yolo3_postprocess_np(
            detected_boxes,
            image_shape,
            anchors,
            len(labels), (input_height, input_width),
            max_boxes=10,
            confidence=conf_threshold,
            iou_threshold=iou_threshold,
            elim_grid_sense=True)
    img_bbox = draw_boxes(img, out_boxes, out_classes, out_scores,
                        labels, colors)
    if output_dir:
        cv2.imwrite(os.path.join(output_dir, "detections.jpg"), img_bbox)
    else:
        cv2.imwrite("detections.jpg", img_bbox)
    plt.imshow(img)

input_file = "examples/data/grace_hopper.jpg"
model_file = "examples/data/yolo_v4"
label_file = "examples/data/coco.names"
anchor_file = "examples/data/yolov4_anchors.txt"
input_height = 416
input_width = 416
backend_name = "CPU"
output_dir = "."
conf_threshold = 0.6
iou_threshold = 0.5

#Print list of available backends
print('Available Backends:')
backends_list = ovtf.list_backends()
for backend in backends_list:
    print(backend)
ovtf.set_backend(backend_name)

print("OpenVINO TensorFlow is enabled")
infer_openvino_tensorflow(model_file, input_file, input_height, input_width, label_file, anchor_file, conf_threshold, iou_threshold )

ovtf.disable() ## Disabling OVTF
print("OpenVINO TensorFlow is disabled")
infer_openvino_tensorflow(model_file, input_file, input_height, input_width, label_file, anchor_file, conf_threshold, iou_threshold )
ovtf.enable()
import requests

url = 'https://dink.ga/api/criarURL?url=SEU_URL'

response = requests.request('GET', url)
print(response.text)
def add(*vectors): 
	return (sum([v[0] for v in vectors]), sum([v[1] for v in vectors]))
$ pip install -r requirements.txt --no-index --find-links file:///tmp/packages


pip3 install -r requirements.txt #ubuntu
pip install -r requirements.txt #win


py -m pip install --upgrade pip
a = int(input("What's the first number? "))
b = int(input("What's the second number? ")) 
while 1:
    r=a%b
    if not r:
        break
    a=b
    b=r
print('GCD is:', b)
a = int(input("What's the first number? "))
b = int(input("What's the second number? ")) 
r=a%b
while r:
    a=b
    b=r
    r=a%b
print('GCD is:', b)
Start = -2
End = 2
Increment = 0.25
Array =  np.arange(Start, End, Increment)

for i in range(0, Array.size):
    print (Array[i])

df1 = (df.set_index(["location", "name"])
         .stack()
         .reset_index(name='Value')
         .rename(columns={'level_2':'Date'}))
print (df1)
  location  name        Date  Value
0        A  test    Jan-2010     12
1        A  test    Feb-2010     20
2        A  test  March-2010     30
3        B   foo    Jan-2010     18
4        B   foo    Feb-2010     20
5        B   foo  March-2010     25
# Drop first column of dataframe
df = df.iloc[: , 1:]
df_sample['prediction']=prediction
## Now let's save it to the proper submission.csv file. It is important to set index parameter to "False",
## otherwise our row index would be included in the output file. I would't recommend it.
df_sample.to_csv('submission.csv', index=False)
#!/usr/bin/env python3
import random
from time import monotonic


def main():
    aufgabenanzahl = int(input("Wie viele Durchgänge? "))
    punkte = 0
    total_time = 0
    for _ in range(aufgabenanzahl):
        operanden = [random.randint(1, 10) for _ in range(4)]
        start_time = monotonic()
        antwort = int(
            input("Wieviel ist {} * {} + {} - {}? ".format(*operanden))
        )
        elapsed_time = monotonic() - start_time
        total_time += elapsed_time
        loesung = operanden[0] * operanden[1] + operanden[2] - operanden[3]
        if antwort != loesung:
            print(f"Falsch! Richtig ist {loesung}.")
        else:
            print(f"Richtig! {elapsed_time:.2f} Sekunden")
            punkte += 1

    print(
        f"{punkte} Punkte! Du hast {punkte} von {aufgabenanzahl} Aufgaben"
        f" richtig gerechnet!"
    )
    print(f"Das sind {punkte / aufgabenanzahl:.0%}!")
    print(f"Gesamtzeit für {aufgabenanzahl} Aufgaben: {total_time:.2f}s")


if __name__ == "__main__":
    main()
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
  X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
  train_errors, val_errors = [], []
  for m in range(1, len(X_train)):
    model.fit(X_train[:m], y_train[:m])
	y_train_predict = model.predict(X_train[:m])
	y_val_predict = model.predict(X_val)
	train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
    val_errors.append(mean_squared_error(y_val, y_val_predict))
  plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
  plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
import random
#https://gist.github.com/cwil323/9b1bfd25523f75d361879adfed550be2

def display_intro():
    title = "** A Simple Math Quiz **"
    print("*" * len(title))
    print(title)
    print("*" * len(title))


def display_menu():
    menu_list = ["1. Addition", "2. Subtraction", "3. Multiplication", "4. Integer Division", "5. Exit"]
    print(menu_list[0])
    print(menu_list[1])
    print(menu_list[2])
    print(menu_list[3])
    print(menu_list[4])


def display_separator():
    print("-" * 24)


def get_user_input():
    user_input = int(input("Enter your choice: "))
    while user_input > 5 or user_input <= 0:
        print("Invalid menu option.")
        user_input = int(input("Please try again: "))
    else:
        return user_input


def get_user_solution(problem):
    print("Enter your answer")
    print(problem, end="")
    result = int(input(" = "))
    return result


def check_solution(user_solution, solution, count):
    if user_solution == solution:
        count = count + 1
        print("Correct.")
        return count
    else:
        print("Incorrect.")
        return count


def menu_option(index, count):
    number_one = random.randrange(1, 21)
    number_two = random.randrange(1, 21)
    if index is 1:
        problem = str(number_one) + " + " + str(number_two)
        solution = number_one + number_two
        user_solution = get_user_solution(problem)
        count = check_solution(user_solution, solution, count)
        return count
    elif index is 2:
        problem = str(number_one) + " - " + str(number_two)
        solution = number_one - number_two
        user_solution = get_user_solution(problem)
        count = check_solution(user_solution, solution, count)
        return count
    elif index is 3:
        problem = str(number_one) + " * " + str(number_two)
        solution = number_one * number_two
        user_solution = get_user_solution(problem)
        count = check_solution(user_solution, solution, count)
        return count
    else:
        problem = str(number_one) + " // " + str(number_two)
        solution = number_one // number_two
        user_solution = get_user_solution(problem)
        count = check_solution(user_solution, solution, count)
        return count


def display_result(total, correct):
    if total > 0:
        result = correct / total
        percentage = round((result * 100), 2)
    if total == 0:
        percentage = 0
    print("You answered", total, "questions with", correct, "correct.")
    print("Your score is ", percentage, "%. Thank you.", sep = "")


def main():
    display_intro()
    display_menu()
    display_separator()

    option = get_user_input()
    total = 0
    correct = 0
    while option != 5:
        total = total + 1
        correct = menu_option(option, correct)
        option = get_user_input()

    print("Exit the quiz.")
    display_separator()
    display_result(total, correct)

main()
import random
import operator
def randomCalc():
    ops = {'+':operator.add,
           '-':operator.sub,
           '*':operator.mul,
           '/':operator.truediv}
    num1 = random.randint(0,12)
    num2 = random.randint(1,10)   # I don't sample 0's to protect against divide-by-zero
    op = random.choice(list(ops.keys()))
    answer = ops.get(op)(num1,num2)
    print('What is {} {} {}?\n'.format(num1, op, num2))
    return answer

def askQuestion():
    answer = randomCalc()
    guess = float(input())
    return guess == answer

def quiz():
    print('Welcome. This is a 10 question math quiz\n')
    score = 0
    for i in range(10):
        correct = askQuestion()
        if correct:
            score += 1
            print('Correct!\n')
        else:
            print('Incorrect!\n')
    return 'Your score was {}/10'.format(score)

import xarray as xr
import xesmf as xe
import numpy as np

def regrid(dr_in):
    """Convenience function for one-time regridding"""
    ds_out = xr.Dataset(
                {
                "lat": (["lat"], np.arange(-90, 91, 1.0)),
                "lon": (["lon"], np.arange(-180, 182, 2.0)),
                }
            )
#ds_out
    regridder = xe.Regridder(dr_in, ds_out, 'bilinear', periodic=True)
    dr_out = regridder(dr_in)
    return dr_out

ERAin=xr.open_dataset('ERA5_monthly_SST.nc')

ERAOut=regrid(ERAin)

ERAOut

ERAOut.to_netcdf(path='ERA5_SST.nc',mode='w',format='NETCDF4')
df.withColumn('Day', f.date_format('date', 'EEEE')).show()
+----------+--------+
|      date|     Day|
+----------+--------+
|2010-01-07|Thursday|
+----------+--------+

collection = bpy.data.collections["collection_name"]
for obj in collection.all_objects:
	//example #1
	obj.data.bevel_depth = 0.0005
	//example #2
	print(obj.name)
	//example #3
	obj.location[2] = 0
currentFile = __file__  # May be 'my_script', or './my_script' or
                        # '/home/user/test/my_script.py' depending on exactly how
                        # the script was run/loaded.
realPath = os.path.realpath(currentFile)  # /home/user/test/my_script.py
dirPath = os.path.dirname(realPath)  # /home/user/test
dirName = os.path.basename(dirPath) # test
def plot_roc_curve(fpr, tpr, label=None):
    plt.plot(fpr, tpr, linewidth=2, label=label)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.axis([0, 1, 0, 1])                                  
    plt.xlabel('False Positive Rate (Fall-Out)', fontsize=16)
    plt.ylabel('True Positive Rate (Recall)', fontsize=16)   
    plt.grid(True)                                           
plt.figure(figsize=(8, 6))                                   
plot_roc_curve(fpr, tpr)
plt.show()
def word_count(str):
    counts = dict()
    words = str.split()

    for word in words:
        if word in counts:
            counts[word] += 1
        else:
            counts[word] = 1

    counts_x = sorted(counts.items(), key=lambda kv: kv[1])
    #print(counts_x)
    return counts_x[-2]
 
print(word_count("Both of these issues are fixed by postponing the evaluation of annotations. Instead of compiling code which executes expressions in annotations at their definition time, the compiler stores the annotation in a string form equivalent to the AST of the expression in question. If needed, annotations can be resolved at runtime using typing.get_type_hints(). In the common case where this is not required, the annotations are cheaper to store (since short strings are interned by the interpreter) and make startup time faster."))


pathlib.Path('/tmp/sub1/sub2').mkdir(parents=True, exist_ok=True)
clc; clear; close all;
% known parameter
c=0.95;
siga=0.02;
sigt=siga/(1-c);
D=1/(3*sigt);
nu_sigf=0.0165;
N=100;
H=100;
h=H/N;
% calculate parameter
sigd=D/h^2;
ave_siga=siga;
ave_nu_sigf=nu_sigf;
alpha_l=10^7; %black
alpha_r=alpha_l; %black
% set matrix for M and F (linear system)
M=zeros(N+1);
F=zeros(N+1);
M(1,1)=sigd+(alpha_l/h)+(0.5*siga);
M(N+1,N+1)=M(1,1);
F(1,1)=0.5*nu_sigf;
F(N+1,N+1)=F(1,1);
for i=2:N
M(i-1,i)=-sigd;
M(i,i-1)=-sigd;
M(i,i)=2*sigd+ave_siga;
F(i,i)=ave_nu_sigf;
end

%%% solve matrix
%%% using eig and inv
A=inv(M)*F;
A
[flux,k]=eig(A)
flux_eig1=flux(:,1);
k_eig1=k(1,1);
flux_eig2=flux(:,2);
k_eig2=k(2,2);
flux_eig3=flux(:,3);
k_eig3=k(3,3);
k_eig=[k_eig1 k_eig2 k_eig3]
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
In [12]: df.groupby('A', group_keys=False).apply(lambda x: x.loc[x.B.idxmax()])
Out[12]: 
   A   B
A       
1  1  20
2  2  40
3  3  10
def plot_precision_vs_recall(precisions, recalls):
    plt.plot(recalls, precisions, "b-", linewidth=2)
    plt.xlabel("Recall", fontsize=16)
    plt.ylabel("Precision", fontsize=16)
    plt.axis([0, 1, 0, 1])
    plt.grid(True)

plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
# plt.plot([recall_90_precision, recall_90_precision], [0., 0.9], "r:")
# plt.plot([0.0, recall_90_precision], [0.9, 0.9], "r:")
# plt.plot([recall_90_precision], [0.9], "ro")
plt.show()
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
    plt.plot(thresholds, precisions[:-1], "b--", label="Precision", linewidth=2)
    plt.plot(thresholds, recalls[:-1], "g-", label="Recall", linewidth=2)
    plt.legend(loc="center right", fontsize=16) # Not shown in the book
    plt.xlabel("Threshold", fontsize=16)        # Not shown
    plt.grid(True)                              # Not shown
    plt.axis([-50000, 50000, 0, 1])             # Not shown



recall_90_precision = recalls[np.argmax(precisions >= 0.90)]
threshold_90_precision = thresholds[np.argmax(precisions >= 0.90)]


plt.figure(figsize=(8, 4))                                                                  # Not shown
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], "r:")                 # Not shown
plt.plot([-50000, threshold_90_precision], [0.9, 0.9], "r:")                                # Not shown
plt.plot([-50000, threshold_90_precision], [recall_90_precision, recall_90_precision], "r:")# Not shown
plt.plot([threshold_90_precision], [0.9], "ro")                                             # Not shown
plt.plot([threshold_90_precision], [recall_90_precision], "ro")                             # Not shown
plt.show()
import os 
dir_path = os.path.dirname(os.path.realpath(__file__))
data = open("vesko.txt") #Sa ovom komandom postaje fajl iterable da mozemo liniju po liniju da ga citamo i zato mozemo dolje gdje je vesko da idemo komandom for da iscitamo sve iz njega
print(data.readline(), end = '')
data.seek(0)
print(data.readline(), end = '')


print(data.readline(), end = '')

data.seek(0) # postavljamo na 0

for vesko in data:
    print(vesko, end='')
a = ['GSW', 'LAKERS', 'CLIPPERS']
b = [1, 2, 3]
dict(zip(a, b))
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_5):
  clone_clf = clone(sgd_clf)
  X_train_folds = X_train[train_index]
  y_train_folds = y_train_5[train_index]
  X_test_fold = X_train[test_index]
  y_test_fold = y_train_5[test_index]
  clone_clf.fit(X_train_folds, y_train_folds)
  y_pred = clone_clf.predict(X_test_fold)
  n_correct = sum(y_pred == y_test_fold)
  print(n_correct / len(y_pred))



logreg=LogisticRegression()
stratifiedkf=StratifiedKFold(n_splits=5)
score=cross_val_score(logreg,X,Y,cv=stratifiedkf)
pd.set_option('display.max_columns', 200)
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 100)
pd.set_option('display.expand_frame_repr', True)
import pandas as pd

df1 = pd.DataFrame({'Data': ['a', 'b', 'c', 'd']})

df2 = pd.DataFrame({'Data': [1, 2, 3, 4]})

df3 = pd.DataFrame({'Data': [1.1, 1.2, 1.3, 1.4]})

writer = pd.ExcelWriter('multiple.xlsx', engine='xlsxwriter')

df1.to_excel(writer, sheet_name='Sheeta')

df2.to_excel(writer, sheet_name='Sheetb')

df3.to_excel(writer, sheet_name='Sheetc')

writer.save()
from sklearn.model_selection import cross_val_score
scores = cross_val_score(lin_reg, full_df_prepared, label, scoring="neg_mean_squared_error", cv=10)
rmse_scores = np.sqrt(-scores)
def display_scores(scores):
  print("Scores:", scores)
  print("Mean:", scores.mean())
  print("Standard deviation:", scores.std())
display_scores(rmse_scores)
from sklearn.metrics import mean_squared_error
car_predictions = lin_reg.predict(full_df_prepared)
lin_mse = mean_squared_error(car_predictions, label)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
import pygame,sys,os,random,math,cmath
def initierea_n_secta():
    pygame.init()
initierea_n_secta()
lun,lat=1100,800
WIN=pygame.display.set_mode((lun,lat))
FPS=60
WIN.fill((0,255,0))
clock=pygame.time.Clock()
pygame.display.set_caption("INVADERS OF PERDOR")
fundal=pygame.image.load(os.path.join('shits',"themap.jpg"))
pius=pygame.image.load(os.path.join('shits',"bulletsus.png"))
piuj=pygame.image.load(os.path.join('shits',"bulletjos.png"))
Razboinic=pygame.image.load(os.path.join("shits","warrior.png"))
Control_Panel=pygame.image.load(os.path.join("shits","Kontrol.png"))
run=True
### PLAYER STATISTICS####################33
playerX=490
playerY=600
playerbx=490
playerby=575
move=0
statusul='ready'
bullety_change_player=0
#MIN SI MAX DE MOVEMENT RANGE PT ORICE ENEMY####################33
maximx=1050
maximy=350
minx=0
miny=20
random_enemy_x=random.randint(minx,maximx)
random_enemy_y=random.randint(miny,maximy)
##################### REPERE PT TIMP ###########################
current_time=0
shittime=0
kronos=0
######## AMOUNT DAMAGE TAKEN SI HEAL CU BUFFURI #######################
amount_damage_taken=0
amount_damage_taken_mob=0
heal=0
buff=0
from niqqa import *
#### CLASA PLAYER + GROUP SPRITES PT PLAYER#################33
class Player(pygame.sprite.Sprite):
    def __init__(self):
        super().__init__()
        self.x=playerX
        self.y=playerY
        self.image=Razboinic
        self.rect=Razboinic.get_rect(topleft=[playerX,playerY])
        self.glont=pius
        self.health=100
        self.patratglontstang=pius.get_rect()
        self.patratglontdrept=pius.get_rect()  
        self.patratlungimeglont=self.glont.get_width()
        self.patratlatimeglont=self.glont.get_height()
        self.bullet_damage=5
        self.aparitie=True
    def bariere():
        global playerX
        if playerX<0:
            playerX=0
        if playerX>1000:
            playerX=1000 
    def health_bar():    
        global amount_damage_taken
        all_health_bar=pygame.draw.rect(Control_Panel,(0,0,0),(30,30,240,40),3,border_radius=5)
        current_health=200
        target_health=current_health-amount_damage_taken+heal
        if current_health>=target_health:
            transition_health_rect_width=current_health-target_health
        elif current_health<target_health:
            transition_health_rect_width=target_health-current_health   
        red_bar=pygame.draw.rect(Control_Panel,(255,0,0),(32,32,target_health+36,36),0,border_radius=5)
        transition_bar=pygame.draw.rect(Control_Panel,(100,0,200),(target_health+36,32,transition_health_rect_width,38),0,border_radius=5)
        for n in range(1,5):
            slash=pygame.draw.line(Control_Panel,(0,0,0),(59*n+32,30),(32+59*(n-1),68),4)                
    def APARITIE(self):
        if self.aparitie:
            group_Player.add(self)
            cioata_stanga_PL.aparitie=True
            cioata_dreapta_PL.aparitie=True
            group_Player.draw(WIN)
        elif self.aparitie==False:
            group_Player.remove(self)  
    def control_panel():
        global buff
        global amount_damage_taken
        WIN.blit(Control_Panel,(0,580))
        pygame.draw.circle(Control_Panel,(0,0,0),(77,150),17,2)
        pygame.draw.circle(Control_Panel,(0,0,0),(205,150),17,2)
        font=pygame.font.SysFont("chalkduster.ttf",25)
        font_as=pygame.font.SysFont("chalkduster.ttf",23)
        movement_speed_text=font.render("Speeduleanu",True,"green")
        WIN.blit(movement_speed_text,(18,680))
        attack_speed_text1=font_as.render("Mai fumeaza din ele",True,"yellow")
        WIN.blit(attack_speed_text1,(133,662))
        attack_speed_text2=font_as.render("da le drecu",True,"yellow")
        WIN.blit(attack_speed_text2,(165,685))
        if buff==0:
            pygame.draw.circle(Control_Panel,(100,0,150),(77,150),15)
            pygame.draw.circle(Control_Panel,(100,0,150),(205,150),15)
        if buff!=0:
            pygame.draw.circle(Control_Panel,(0,255,0),(205,150),15)
            pygame.draw.circle(Control_Panel,(0,255,0),(77,150),15)   
group_Player=pygame.sprite.Group()
America=Player()
############### CLASA BULLET + CLASA CIOTARE PT PLAYER ###############
class Bullet(pygame.sprite.Sprite):
    def __init__(self,image,q):
        super().__init__()
        self.x=random_enemy_x
        self.y=random_enemy_y
        self.image=image
        self.rect=self.image.get_rect()
        self.width=self.rect.width
        self.height=self.rect.height
        self.aparitie=q 
class Ciotarele(Bullet):
    def __init__(self,x):
        super().__init__(pius,False)
        self.stanga_sau_dreapta=x
        if self.stanga_sau_dreapta==1:
            self.rect.topleft=(playerbx-20,1000)
        elif self.stanga_sau_dreapta==2:
            self.rect.topleft=(playerbx+50,1000)
        else:
            self.rect.topleft=(-300,0)
    def cioateletata(self):
        WIN.blit(pius,(playerX,playerY)) 
        WIN.blit(pius,(playerX+70,playerY))  
    def fireb(self):
        global statusul
        global playerby
        global bullety_change_player
        statusul='fire'
        bullety_change_player=-7
        if self.stanga_sau_dreapta==1:
            self.rect.topleft=(playerbx-20,playerby)
        if self.stanga_sau_dreapta==2:
            self.rect.topleft=(playerbx+50,playerby)       
        if playerby<=-65:
            statusul='ready' 
            playerby=565  
            bullety_change_player=0
    def trage(self):
        global playerby
        if statusul == "fire":
            self.fireb()
cioata_stanga_PL=Ciotarele(1)
cioata_dreapta_PL=Ciotarele(2)
group_bullets_player=pygame.sprite.Group()         
def Ciotarele_afisaj():
    global group_bullets_player
    global playerby
    playerby+=bullety_change_player
    if cioata_dreapta_PL.aparitie and cioata_stanga_PL.aparitie: 
        if cioata_stanga_PL.stanga_sau_dreapta==1:   
            group_bullets_player.add(cioata_stanga_PL)
            cioata_stanga_PL.trage()
        if cioata_dreapta_PL.stanga_sau_dreapta==2:      
            cioata_dreapta_PL.trage()
            group_bullets_player.add(cioata_dreapta_PL)
        cioata_dreapta_PL.cioateletata()    
    group_bullets_player.draw(WIN)             
####################################################################3
######### CLASA ENEMY + PERDOARELE #################3
class Enemy(pygame.sprite.Sprite):
    def __init__(self,image,x,y,q):
        super().__init__()
        self.image=pygame.image.load(image)
        self.x=x
        self.y=y
        self.health=100
        self.bulletx=self.x
        self.bullety=self.y
        self.rect=self.image.get_rect(topleft=(self.x,self.y))
        self.width=self.image.get_width()
        self.height=self.image.get_height()
        self.cioata=piuj
        self.cioatapatrat=piuj.get_rect()
        self.cioatalatime=self.cioatapatrat.height
        self.cioatalungime=self.cioatapatrat.width
        self.xglontincur=self.x+15
        self.yglontincur=self.y+30
        self.patratglontincur=piuj.get_rect()
        self.patratglontincur.topleft=(self.xglontincur,self.yglontincur)
        self.bullet_damage=20
        self.aparitie=q
        self.cacare_time=False
    x_change=2
    y_change=2
    def moverestr(self):
        if self.x==maximx and self.y==maximy:
            self.x_change=-2
            self.y_change=-2
        if self.x==minx and self.y==miny:
            self.x_change=2
            self.y_change=2      
        if self.y==maximy and self.x==minx:
            self.y_change=-2
            self.x_change=2
        if self.y==miny and self.x==maximx:
            self.y_change=2
            self.x_change=-2
        if self.x==minx and self.y!=maximy and self.y_change==-2 and self.x_change==-2:
            self.y_change=-2
            self.x_change=2
        if self.x==minx and self.y!=maximy and self.y_change==2 and self.x_change==-2:
            self.y_change=2
            self.x_change=-2
        if self.y==miny and self.x!=maximx and self.x_change==-2 and self.y_change==-2:
            self.y_change=2
            self.x_change=-2
        if self.y==miny and self.x!=maximy and self.x_change==2 and self.y_change==-2:
            self.y_change=2
            self.x_change=2 
        if self.x==minx and self.y!=maximy and self.y_change==-2 and self.x_change==2:
            self.y_change=-2
            self.x_change=2
        if self.x==minx and self.y!=maximy and self.y_change==2 and self.x_change==-2:
            self.y_change=2
            self.x_change=2
        if self.y==miny and self.x!=maximx and self.x_change==-2 and self.y_change==2:
            self.y_change=2
            self.x_change=-2
        if self.y==miny and self.x!=maximy and self.x_change==2 and self.y_change==2:
            self.y_change=2
            self.x_change=2
        if self.x==maximx and self.y!=miny and self.y_change==-2:
            self.y_change=-2
            self.x_change=-2
        if self.x==maximx and self.y!=maximy and self.y_change==2:
            self.y_change=-2
            self.x_change=-2
        if self.y==maximy and self.x!=maximx and self.x_change==-2:
            self.y_change=-2
            self.x_change=-2
        if self.y==maximy and self.x!=maximx and self.x_change==2:
            self.y_change=-2
            self.x_change=2
        if self.x==maximx and self.y!=maximy and self.y_change==2 and self.x_change==2:
            self.x_change=-2
            self.y_change=-2
        if self.x==maximx and self.y!=maximy and self.y_change==-2 and self.x_change==2:
            self.x_change=-2
            self.y_change=2  
        if self.x<minx:
            self.x=minx
        if self.x>maximx:
            self.x=maximx
        if self.y<miny:
            self.y=miny
        if self.y>maximy:
            self.y=maximy
        if self.y==miny:
            self.y_change=2
        if self.y==maximy:
            self.y_change=-2
        if self.x==maximx and (self.y!=miny or self.y==maximy or self.y==miny) and self.x_change==2:
            self.x_change=-2
        if self.x==maximx and (self.y!=maximy or self.y!=miny) and self.x_change==-2 and self.y_change==-2:
            self.y_change=2
            self.x_change=-2              
    def APARITIE(self):
        global Perdor 
        if self.aparitie==False: 
            self.cacare_time=False
            group_enemies.remove(self)
        if self.aparitie:
            self.cacare_time=True
            Perdor(self).aparitie=True
            group_enemies.add(self)   
            group_enemies.draw(WIN) 
group_enemies=pygame.sprite.Group()
Butoi=Enemy(os.path.join("shits","DADA.png"),random_enemy_x,random_enemy_y,True)                
class Perdor(Bullet):
    def __init__(self,al_cui):
        super().__init__(piuj,False)
        self.al_cui=al_cui 
        self.x=al_cui.bulletx
        self.y=al_cui.bullety
        self.al_cui_aparitie=al_cui.aparitie
        self.rect=piuj.get_rect()
        if al_cui.aparitie:
            self.aparitie=True
        else:
            self.aparitie=False
        self.rect.topleft=(self.x,self.y)
        self.rect.bottomright=(self.x+self.rect.width,self.y+self.rect.height)
    def timer(self):
        global kronos
        global current_time
        global shittime
        if current_time-shittime<1500:
            kronos=10
        else:
            kronos=0     
    def lansarea(self):
        global current_time
        global shittime
        if current_time-shittime>1500:
            shittime=pygame.time.get_ticks()
    def Aparitie_perdor(self):
        global group_boss_bullet
        if self.aparitie:
            group_boss_bullet.add(self)
        else:
            group_boss_bullet.remove(self)        
group_boss_bullet=pygame.sprite.Group()
perdor_Butoi=Perdor(Butoi)
def Perdor_afisaj(al_cui):
    global group_boss_bullet
    global kronos 
    al_cui.xglontincur+=al_cui.x_change
    al_cui.bullety+=kronos
    al_cui.yglontincur+=al_cui.y_change
    Perdor(al_cui).timer()
    Perdor(al_cui).lansarea()
    Perdor(al_cui).Aparitie_perdor()
    if al_cui.bullety>=800:
        al_cui.bulletx=al_cui.xglontincur
        al_cui.bullety=al_cui.yglontincur                       
############### ATRIBUTES FUNCTIONS + GAME ATRIBUTES############3
def Enemy_atributes(ale_cui):
    global amount_damage_taken_mob
    ale_cui.xglontincur+=0
    ale_cui.APARITIE()      
    ale_cui.health-=amount_damage_taken_mob
    ale_cui.rect.topleft=(ale_cui.x,ale_cui.y)
    ale_cui.x+=ale_cui.x_change
    ale_cui.y+=ale_cui.y_change
    if ale_cui.health<=0:
        ale_cui.health=0
        ale_cui.aparitie=False 
    ale_cui.moverestr()
def Bullet_Atributes():
    Ciotarele_afisaj()
    Perdor_afisaj(Butoi)
    group_boss_bullet.draw(WIN)  
def Player_Atributes():
    global playerX
    playerX+=move
    Player.bariere()
    America.APARITIE()
    America.health+=heal
    America.health-=amount_damage_taken
    America.rect.topleft=(playerX,playerY)
    America.patratglontstang.topleft=(playerbx-20,playerby-7)
    America.patratglontdrept.topleft=(playerbx+50,playerby-7)
    America.patratglontstang.bottomright=(playerbx-20+America.patratlungimeglont,playerby-7+America.patratlatimeglont)
    America.patratglontdrept.bottomright=(playerbx+50+America.patratlungimeglont,playerby-7+America.patratlatimeglont)
    if America.health<=0:
        America.health=0
        America.aparitie=False    
############################ FUNCTIONALITATE JOC #####################################
def eventtypes():
    global move
    global playerbx
    global playerby
    global playerX
    for event in pygame.event.get():
        if event.type==pygame.QUIT:
            global run
            run==False
            pygame.quit() 
            sys.exit()
        if event.type==pygame.KEYDOWN:
            if event.key==pygame.K_LEFT:
                move=-2.5
            if event.key==pygame.K_RIGHT:
                move=2.5
            if event.key==pygame.K_UP:
                if statusul=='ready':
                    playerbx=playerX+20 
                    if cioata_stanga_PL.aparitie==True:
                        Ciotarele.fireb(cioata_dreapta_PL)
                        cioata_stanga_PL.fireb()      
        if event.type==pygame.KEYUP:
            if event.key==pygame.K_LEFT:
                move=0
            if event.key==pygame.K_RIGHT:
                move=0      
def wallpaper():
    WIN.blit(fundal,(0,0))
def currenting_time():
   global current_time
   current_time=pygame.time.get_ticks() 
def GAME_ATRIBUTES():
    currenting_time()
    eventtypes()
    wallpaper()
def control_panel():
    Player.health_bar()
    Player.control_panel()
def flip_the_screen():
    pygame.display.flip()    
while run:
    clock.tick(FPS)
    GAME_ATRIBUTES()
    Player_Atributes() 
    Enemy_atributes(Butoi)
    Bullet_Atributes()
    control_panel()
    flip_the_screen()

some_data = df.iloc[:5]
some_labels = label.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:", lin_reg.predict(some_data_prepared))
print("Labels:", list(some_labels))
from sklearn.compose import ColumnTransformer
full_pipeline = ColumnTransformer([("num", num_pipeline, numerical_attribute), ("cat", OneHotEncoder(), categorical_attribute)])
full_df_prepared = full_pipeline.fit_transform(df)
full_df_prepared
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([('imputer', SimpleImputer(strategy="median")), ('std_scaler', StandardScaler())])
transformed = num_pipeline.fit_transform(numerical_df)
numerical_df_transformed = pd.DataFrame(transformed, columns=numerical_df.columns, index=numerical_df.index)
numerical_df_transformed
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
imputed = imputer.fit_transform(numerical_df)
numerical_df_imputed = pd.DataFrame(imputed, columns=numerical_df.columns, index=numerical_df.index)
numerical_df_imputed
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(df[attributes], figsize=(12, 8))
df.reset_index().plot(kind='scatter', x='index', y='column')
df.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4, s=df["population"]/100, label="population", figsize=(10,7), c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True)
# Open Anaconda Prompt
# Activate the desired enviroment
pip install package-name
from sklearn.model_selection import StratifiedShuffleSplit
sss=StratifiedShuffleSplit(n_splits=1,test_size=0.25,random_state=0)

for train_index, test_index in sss.split(df, df["km_driven_cat"]):
    train_set = df.iloc[train_index]
    test_set = df.iloc[test_index]
class LabelSmoothingLoss(torch.nn.Module):
    def __init__(self, smoothing: float = 0.1, 
                 reduction="mean", weight=None):
        super(LabelSmoothingLoss, self).__init__()
        self.smoothing   = smoothing
        self.reduction = reduction
        self.weight    = weight

    def reduce_loss(self, loss):
        return loss.mean() if self.reduction == 'mean' else loss.sum() \
         if self.reduction == 'sum' else loss

    def linear_combination(self, x, y):
        return self.smoothing * x + (1 - self.smoothing) * y

    def forward(self, preds, target):
        assert 0 <= self.smoothing < 1

        if self.weight is not None:
            self.weight = self.weight.to(preds.device)

        n = preds.size(-1)
        log_preds = F.log_softmax(preds, dim=-1)
        loss = self.reduce_loss(-log_preds.sum(dim=-1))
        nll = F.nll_loss(
            log_preds, target, reduction=self.reduction, weight=self.weight
        )
        return self.linear_combination(loss / n, nll)
job = {
	'title': title,
	'company': company,
	'salary': salary,
	'summary': summary
}

joblist.append(job)

print(*joblist, sep='\n')
import tkinter as tk
import numpy as np


# COPY HERE SHADOW CLASS


def test_click():
    button1.configure(text='You have clicked me!')


if __name__ == '__main__':
    root = tk.Tk()
    
    # Create dummy buttons
    button1 = tk.Button(root, text="Click me!", width=20, height=2, command=test_click)
    button1.grid(row=0, column=0, padx=50, pady=20)
    button2 = tk.Button(root, text="Hover me!", width=20, height=2)
    button2.bind('<Enter>', lambda e: button2.configure(text='You have hovered me!'))
    button2.grid(row=1, column=0, padx=50, pady=20)
    
    # Add shadow
    Shadow(button1, color='#ff0000', size=1.3, offset_x=-5, onclick={'color':'#00ff00'})
    Shadow(button2, size=10, offset_x=10, offset_y=10, onhover={'size':5, 'offset_x':5, 'offset_y':5})
    
    root.mainloop()
import os
from dotenv import load_dotenv

# Load .env variables
load_dotenv()

DATABASES = {
    'default': {
        'ENGINE': os.environ['ENGINE'],
        'NAME': os.environ['DB'],
        'USER': os.environ['NAME'],
        'PASSWORD': os.environ['PASSWORD'],
        'HOST': os.environ['HOST'],
        'PORT': os.environ['PORT'],
    }
}
DATABASES = {                                                                      
    'default': {                                                                   
        'ENGINE': 'django.db.backends.postgresql',                                 
        'NAME': 'database',                                                            
        'USER': 'user',                                                           
        'PASSWORD': 'password',                                                       
        'HOST': 'host',                                                       
        'PORT': '5432',                                                            
    }                                                                              
} 
import os,json

path_to_json = 'corona/'

for file_name in [file for file in os.listdir(path_to_json) if file.endswith('.json')]:
  with open(path_to_json + file_name) as json_file:
    data = json.load(json_file)
    print(data)
# Import pandas package 
import pandas as pd
  
# create a dictionary with five fields each
data = {
    'A':['A1', 'A2', 'A3', 'A4', 'A5'], 
    'B':['B1', 'B2', 'B3', 'B4', 'B5'], 
    'C':['C1', 'C2', 'C3', 'C4', 'C5'], 
    'D':['D1', 'D2', 'D3', 'D4', 'D5'], 
    'E':['E1', 'E2', 'E3', 'E4', 'E5'] }
  
# Convert the dictionary into DataFrame 
df = pd.DataFrame(data)
  
# Remove two columns name is 'C' and 'D'
df.drop(['C', 'D'], axis = 1)
  
# df.drop(columns =['C', 'D'])
cleanedList = [x for x in countries if str(x) != 'nan']

# another way
# x is the list
[v for v in x if v == v]
import itertools

a = [1, 2, 3]
n = 3

perm_iterator = itertools.permutations(a, n)

for item in perm_iterator:
    print(item)
pd.merge(product,customer,on='Product_ID',how='outer')
from collections import Counter
from tkinter import *



GRID_DICT = { 
    1: {"r": 2, "c": 0},
    2: {"r": 2, "c": 1},
    3: {"r": 2, "c": 2},
    4: {"r": 3, "c": 0},
    5: {"r": 3, "c": 1},
    6: {"r": 3, "c": 2},
    7: {"r": 4, "c": 0},
    8: {"r": 4, "c": 1},
    9: {"r": 4, "c": 2},
    0: {"r": 5, "c": 0},
    "+": {"r": 2, "c": 3},
    "-": {"r": 3, "c": 3},
    "*": {"r": 4, "c": 3},
    "/": {"r": 5, "c": 3},
    "=": {"r": 5, "c": 2},
    "c": {"r": 5, "c": 1},
    ".": {"r": 6, "c": 0},
}


expression = ""

def press(num):
    global expression
    expression = expression + str(num)
    equation .set(expression)

def equalpress():
    try:
        global expression
        total = str(eval(expression))
        equation.set(total)
        expression = ""        
    except:
        equation.set("error")
        expression = ""
        
def clear():
        global expression
        expression = ""
        equation.set("")
        
if __name__ == '__main__':
        gui = Tk()
        
        gui.configure(background="dark blue")
        gui.title("Simple calculator")
        gui.geometry("270x150")
        equation = StringVar()
        expression_field = Entry(gui, textvariable=equation)
        expression_field.grid(columnspan=6, ipadx=70)
        
        bn1 = Button(gui, text='1', fg='white', bg='blue', command=lambda: press(1), height=1, width=7)
        bn1.grid(row=GRID_DICT[1]["r"], column=GRID_DICT[1]["c"])  
        
        bn2 = Button(gui, text='2', fg='white', bg='blue', command=lambda: press(2), height=1, width=7)
        bn2.grid(row=GRID_DICT[2]["r"], column=GRID_DICT[2]["c"])
        
        bn3 = Button(gui, text='3', fg='white', bg='blue', command=lambda: press(3), height=1, width=7)
        bn3.grid(row=GRID_DICT[3]["r"], column=GRID_DICT[3]["c"])
        
        bn4 = Button(gui, text='4', fg='white', bg='blue', command=lambda: press(4), height=1, width=7)
        bn4.grid(row=GRID_DICT[4]["r"], column=GRID_DICT[4]["c"])
        
        bn5 = Button(gui, text='5', fg='white', bg='blue', command=lambda: press(5), height=1, width=7)
        bn5.grid(row=GRID_DICT[5]["r"], column=GRID_DICT[5]["c"])
        
        bn6 = Button(gui, text='6', fg='white', bg='blue', command=lambda: press(6), height=1, width=7)
        bn6.grid(row=GRID_DICT[6]["r"], column=GRID_DICT[6]["c"])
        
        bn7 = Button(gui, text='7', fg='white', bg='blue', command=lambda: press(7), height=1, width=7)
        bn7.grid(row=GRID_DICT[7]["r"], column=GRID_DICT[7]["c"])
        
        bn8 = Button(gui, text='8', fg='white', bg='blue', command=lambda: press(8), height=1, width=7)
        bn8.grid(row=GRID_DICT[8]["r"], column=GRID_DICT[8]["c"])
        
        bn9 = Button(gui, text='9', fg='white', bg='blue', command=lambda: press(9), height=1, width=7)
        bn9.grid(row=GRID_DICT[9]["r"], column=GRID_DICT[9]["c"])
        
        bn0 = Button(gui, text='0', fg='white', bg='blue', command=lambda: press(0), height=1, width=7)
        bn0.grid(row=GRID_DICT[0]["r"], column=GRID_DICT[0]["c"])
        
        plus = Button(gui, text='+', fg='white', bg='blue', command=lambda: press("+"), height=1, width=7)
        plus.grid(row=GRID_DICT["+"]["r"], column=GRID_DICT["+"]["c"])

        minus = Button(gui, text='-', fg='white', bg='blue', command=lambda: press("-"), height=1, width=7)
        minus.grid(row=GRID_DICT["-"]["r"], column=GRID_DICT["-"]["c"])        
        
        multiply = Button(gui, text='*', fg='white', bg='blue', command=lambda: press("*"), height=1, width=7)
        multiply.grid(row=GRID_DICT["*"]["r"], column=GRID_DICT["*"]["c"])

        divide = Button(gui, text='/', fg='white', bg='blue', command=lambda: press("/"), height=1, width=7)
        divide.grid(row=GRID_DICT["/"]["r"], column=GRID_DICT["/"]["c"]) 
        
        equals = Button(gui, text='=', fg='white', bg='blue', command=equalpress, height=1, width=7)
        equals.grid(row=GRID_DICT["="]["r"], column=GRID_DICT["="]["c"])
        
        clear = Button(gui, text='C', fg='white', bg='blue', command=clear, height=1, width=7)
        clear.grid(row=GRID_DICT["c"]["r"], column=GRID_DICT["c"]["c"])
        
        dec = Button(gui, text='.', fg='white', bg='blue', command=lambda: press("."), height=1, width=7)
        dec.grid(row=GRID_DICT["."]["r"], column=GRID_DICT["."]["c"])
        
        gui.mainloop()
import numpy as np
import pandas as pd
from functools import partial
from sklearn import ensemble
from sklearn import metrics
from sklearn import model_selection
from skopt import gp_minimize
from skopt import space


def optimize(params, param_names, x, y):
  """
  The main optimization function.
  This function takes all the arguments from the search space
  and training features and targets. It then initializes
  the models by setting the chosen parameters and runs
  cross-validation and returns a negative accuracy score
  :param params: list of params from gp_minimize
  :param param_names: list of param names. order is important!
  :param x: training data
  :param y: labels/targets
  :return: negative accuracy after 5 folds
  """
  # convert params to dictionary
  params = dict(zip(param_names, params))
  # initialize model with current parameters
  model = ensemble.RandomForestClassifier(**params)
  # initialize stratified k-fold
  kf = model_selection.StratifiedKFold(n_splits=5)
  # initialize accuracy list
  accuracies = []
  # loop over all folds
  for idx in kf.split(X=x, y=y):
    train_idx, test_idx = idx[0], idx[1]
    xtrain = x[train_idx]
    ytrain = y[train_idx]
    xtest = x[test_idx]
    ytest = y[test_idx]
    # fit model for current fold
    model.fit(xtrain, ytrain)
    #create predictions
    preds = model.predict(xtest)
    # calculate and append accuracy
    fold_accuracy = metrics.accuracy_score(
    ytest,
    preds
    )
    accuracies.append(fold_accuracy)
  # return negative accuracy
  return -1 * np.mean(accuracies)

if __name__ == "__main__":
  
  # read the training data
  df = pd.read_csv("../input/mobile_train.csv")
  # features are all columns without price_range
  # note that there is no id column in this dataset
  # here we have training features
  X = df.drop("price_range", axis=1).values
  # and the targets
  y = df.price_range.values
  # define a parameter space
  param_space = [
  # max_depth is an integer between 3 and 10
  space.Integer(3, 15, name="max_depth"),
  # n_estimators is an integer between 50 and 1500
  space.Integer(100, 1500, name="n_estimators"),
  # criterion is a category. here we define list of categories
  space.Categorical(["gini", "entropy"], name="criterion"),
  # you can also have Real numbered space and define a
  # distribution you want to pick it from
  space.Real(0.01, 1, prior="uniform", name="max_features")
  ]
  # make a list of param names
  # this has to be same order as the search space
  # inside the main function
  param_names = [
  "max_depth",
  "n_estimators",
  "criterion",
  "max_features"
  ]
  # by using functools partial, i am creating a
  # new function which has same parameters as the
  # optimize function except for the fact that
  # only one param, i.e. the "params" parameter is
  # required. this is how gp_minimize expects the
  # optimization function to be. you can get rid of this
  # by reading data inside the optimize function or by
  # defining the optimize function here.
  optimization_function = partial(
  optimize,
  param_names=param_names,
  x=X,
  y=y
  )

  # now we call gp_minimize from scikit-optimize
  # gp_minimize uses bayesian optimization for
  # minimization of the optimization function.
  # we need a space of parameters, the function itself,
  # the number of calls/iterations we want to have
  result = gp_minimize(
  optimization_function,
  dimensions=param_space,
  n_calls=15,
  n_random_starts=10,
  verbose=10
  )
  # create best params dict and print it
  best_params = dict(
  zip(
  param_names,
  result.x
  )
  )
  print(best_params)
# Python - Generate a 10-char, alphanumeric base36 aims id:

# Server ID (1)
# Year digit (1)
# Month digits (2)
# Date digits (2)
# Hour digits (2)
# Minute digits (2)
# Second digits (2)
# Millisecond digits (3)


from datetime import datetime
import re

# Get current date-time, removing all non numeric chars
current = re.sub(r'\D','',str(datetime.now()))

# remove 3 digits from either end and convert to int
current = int(current[3:17])


def convert_to_base36(int_str):
    # convert current integer 'string' to base36
    base36_alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
    server_id = Watch.GetVariable("global.AIMS_Server_ID")

    b36_str = ""

    while int_str != 0:
        int_str, i = divmod(int_str, len(base36_alphabet))
        b36_str = base36_alphabet[i] + b36_str

    # ensure 9 characters in str
    b36_str = ("000000000" + b36_str)[-9:]

    return server_id + b36_str

# min & max values for current
min_test = int("00101000000000")
max_test = 91231235959999

aims_id = convert_to_base36(current)
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt

datagen = keras.preprocessing.image.ImageDataGenerator(
    rescale=1./255,
    rotation_range=180,
    width_shift_range=0.2,
    height_shift_range=0.2,
)

dir_It = datagen.flow_from_directory(
    "data/",
    batch_size=1,
    save_to_dir="output/",
    save_prefix="",
    save_format='png',
)

for _ in range(5):
    img, label = dir_It.next()
    print(img.shape)   #  (1,256,256,3)
    plt.imshow(img[0])
    plt.show()
# Positive & Negative Correlation:
dataset2 = dataset[['SeniorCitizen', 'Partner', 'Dependents',
       'tenure', 'PhoneService', 'PaperlessBilling',
        'MonthlyCharges', 'TotalCharges']]

correlations = dataset2.corrwith(dataset.Churn)
correlations = correlations[correlations!=1]

positive_correlations = correlations[correlations >0].sort_values(ascending = False)
negative_correlations =correlations[correlations<0].sort_values(ascending = False)

print('Most Positive Correlations: \n', positive_correlations)
print('\nMost Negative Correlations: \n', negative_correlations)


# Correlation Plot among Independent Variables:
#Set and compute the Correlation Matrix:
sn.set(style="white")
corr = dataset2.corr()

#Generate a mask for the upper triangle:
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True

#Set up the matplotlib figure and a diverging colormap:
f, ax = plt.subplots(figsize=(18, 15))
cmap = sn.diverging_palette(220, 10, as_cmap=True)

#Draw the heatmap with the mask and correct aspect ratio:
sn.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
git clone git@github.com:runekaagaard/django-hypergen.git
cd django-hypergen/
virtualenv -p python3.9 venv
source venv/bin/activate
pip install -r requirements.txt
pip install -r examples/requirements.txt
cd examples
python manage.py migrate
python manage.py runserver
import math
import numpy as np
import pandas as pd

def nully(obj: any) -> bool:
    """Checks for all kinds of NaN, NaT and None
    Args:
        obj[any]: Object to check
    Returns:
        bool: True if obj is nully
    """
    conditions: List[bool] = [
      obj is None, # python None
      obj is np.nan, # numpy NaN
      obj is pd.nan, # pandas NA,
      obj is pd.NaT # pandas NaT
    ]
    try:
        conditions.append(math.isnan(obj)) # python NaN
    except TypeError: # not a number type
        pass
    if any(conditions):
        return True
    return False
in main:

def setup_logging(console, logfile):
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(message)s')
    if (console):
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler)
    if (logfile):
        filename = '{}.log'.format(time.strftime('%Y%m%d%H%M%S'))
        file_handler = logging.FileHandler(filename)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
    if ((logfile == False) and (console == False)):
        logger.addHandler(logging.NullHandler())

    return logger


in module:

...
self._logger = logging.getLogger(__name__)
...
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('-c', '--com-port', metavar='N', type=int, required=True, help='HART modem com port')
parser.add_argument('-s', '--slave-address', metavar='N', type=int, default=0, help='HART slave address')
parser.add_argument('-l', '--log', action='store_true', help='log to file')
parser.add_argument('-v', '--verbose', action='store_true', help='print values to console')
args = parser.parse_args()

logger = setup_logging(args.verbose, args.log)
class Dsu:
    
    def __init__(self, n):
        self.parent = list(range(n))
        self.rank = [1] * n
        
    def find(self, x):
        if self.parent[x] != x:
            self.parent[x] = self.find(self.parent[x])
        return self.parent[x]
    
    def union(self, x, y):
        x = self.find(x)
        y = self.find(y)
        
        if x == y:
            return
        
        if self.rank[x] > self.rank[y]:
            self.parent[y] = x
        elif self.rank[x] < self.rank[y]:
            self.parent[x] = y
        else:
            self.parent[y] = x
            self.rank[x] += 1
class Node:
    def __init__(self, data):
        self.data = data
        self.next = None


class CircularLinkedList:
    def __init__(self):
        self.last = None

    def addToEmpty(self, data):

        if self.last != None:
            return self.last

        newNode = Node(data)

        self.last = newNode
        self.last.next = self.last
        return self.last
    def addFront(self, data):

        if self.last == None:
            return self.addToEmpty(data)

        newNode = Node(data)

        newNode.next = self.last.next

        self.last.next = newNode

        return self.last
    def addEnd(self, data):
        if self.last == None:
            return self.addToEmpty(data)
        newNode = Node(data)
        newNode.next = self.last.next
        self.last.next = newNode

        self.last = newNode

        return self.last

    def addAfter(self, data, item):

        if self.last == None:
            return None

        newNode = Node(data)
        p = self.last.next
        while p:

            if p.data == item:
                newNode.next = p.next
                p.next = newNode

                if p == self.last:
                    self.last = newNode
                    return self.last
                else:
                    return self.last
            p = p.next
            if p == self.last.next:
                print(item, "The given node is not present in the list")
                break

    def deleteNode(self, last, key):
        if last == None:
            return
"""
My numpydoc description of a kind
of very exhautive numpydoc format docstring.

Parameters
----------
first : array_like
    the 1st param name `first`
second :
    the 2nd param
third : {'value', 'other'}, optional
    the 3rd param, by default 'value'

Returns
-------
string
    a value in a string

Raises
------
KeyError
    when a key error
OtherError
    when an other error
"""
"""
This is an example of Google style.

Args:
    param1: This is the first param.
    param2: This is a second param.

Returns:
    This is a description of what is returned.

Raises:
    KeyError: Raises an exception.
"""
import pandas as pd
data = pd.read_csv('../input/udemy-courses/udemy_courses.csv')

x = pd.Series([int(i[:4]) for i in data.published_timestamp])
xx = pd.DataFrame(x,columns=['published_year'])
z = pd.Series([int(i[5:7]) for i in data.published_timestamp])
zz = pd.DataFrame(z,columns=['published_day'])

data = pd.concat([data,xx,zz],axis=1)
data
def numToEng(n):
  m =list(str(n))
  a = { '0':'zero','1':'one','2':'tow','3':'three','4':'four','5':'five','6':'six','7':'seven','8':'eight','9':'nine'}
  b = {'10':'ten','11':'eleven','12': 'twelve','13': 'therteen','14': 'fourteen','15': 'fifteen' ,'16':'sixteen' ,'17':'seventeen', '18':'eighteen','19': 'nineteen'}
  c = { '2': 'twenty','3': 'thirty', '4': 'forty' ,'5': 'fifty' ,'6':'sixty' ,'7': 'seventy' ,'8':'eighty','9': 'ninety'}
  
  def anum(m):
    return a.get(''.join(m))
  def bnum(m):
    if n<20 or int(''.join(m))<20:
      return b.get(''.join(m))
    else:
      if m[1] is '0':
        return c.get(m[0])
      if m[1] is not '0':
        return c.get(m[0])+'-'+anum(m[1])
  def cnum(m):
    rr = int(''.join(m))
    if rr in [100,200,300,400,500,600,700,800,900]:
      return a.get(m[0])+' hundred'
    elif m[1]=='0':
      return str(a.get(m[0])+' hundred '+a.get(m[2]))
    else:
      return str(a.get(m[0])+' hundred '+ bnum(m[1:3]))
  if len(m)==1:
    return anum(m)
  elif len(m)==2:
    return bnum(m)
  elif len(m)==3:
    return cnum(m)
from pandas_profiling import ProfileReport
import pandas as pd

test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')

profile = ProfileReport(train, title="train Pandas Profiling Report")
profile.to_file("train_report.html")
profile = ProfileReport(test, title="test Pandas Profiling Report")
profile.to_file("test_report.html")
>>> def z(n, c):
...     if n == 0:
...         return 0
...     else:
...         return z(n - 1, c) ** 2 + c
class MyClass:
    def __init__(self, name, age):
        self.name = name
        self.age = age

    def __eq__(self, other):
        return self.name == other.name and self.age == other.age

    def __repr__(self):
        return f"MyClass({self.name}, {self.age})"

    def __hash__(self):
        return hash(self.__repr__())


a = MyClass('taha', 30)
b = MyClass('taha', 30)

c = {a, b}
print(c)
def create_dir(path, sub_dirs, label_dirs):
    for sub_dir in sub_dirs: 
        for label_dir in label_dirs:
            new_dir = os.path.join(path, sub_dir, label_dir)
            Path(new_dir).mkdir(parents=True, exist_ok=True)
            print(new_dir)
    
    print('All directories created successfully!')

FOLDER_PATH = 'dataset/'
SUB_DIRS = ['train/', 'test/']
LABEL_DIR = ['dogs/', 'cats/']

create_dir(FOLDER_PATH, SUB_DIRS, LABEL_DIR)
Python 3.4.1 (default, May 21 2014, 12:39:51) 
[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.2.79)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import pickle
>>> with open('parrot.pkl', 'rb') as f:
...   mynewlist = pickle.load(f)
... 
>>> mynewlist
['I wish to complain about this parrot what I purchased not half an hour ago from this very boutique.', "Oh yes, the, uh, the Norwegian Blue...What's,uh...What's wrong with it?", "I'll tell you what's wrong with it, my lad. 'E's dead, that's what's wrong with it!", "No, no, 'e's uh,...he's resting."]
>>>
train_datagen = image.ImageDataGenerator(
  	rescale = 1./255,  # to normalize bigger values. Convert from 0-255 to 0-1 range.
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip = True
)


# only rescaling done on test dataset
test_datagen = image.ImageDataGenerator(
    rescale = 1./255
)

train_generator = train_datagen.flow_from_directory(
    directory=TRAIN_PATH,
    target_size=(224,224),
    batch_size=32,
    class_mode='binary',
    save_to_dir = SAVE_TRAIN_PATH,
    save_prefix='',
    save_format='png'
)

validation_generator = test_datagen.flow_from_directory(
    VAL_PATH,
    target_size = (224,224),
    batch_size = 32,
    class_mode = 'binary'
)

# 
train_generator.class_indices
validation_generator.class_indices

# generate augmented images and save into the directory
for i in range(5):
  train_generator.next()
  
  
import pathlib
from pathlib import Path

def create_folder(path):
  if Path(path).is_dir():
    print ("Folder already exists!")
  else:
    pathlib.Path(path).mkdir(parents=True, exist_ok=True) 
    print ("Folder created!")    


FOLDER_PATH = '/content/drive/MyDrive/detect-covid19-xray/data-preprocessed'
create_folder(FOLDER_PATH)

def int2base(num, base, abc="0123456789abcdefghijklmnopqrstuvwxyz"):
  if num < 0:
    return '-' + int2base(-num, base, abc)

  else:
    output = abc[num % base] # rightmost digit

    while num >= base:
      num //= base # move to next digit to the left
      output = abc[num % base] + output # this digit

    return output
from hcsr04 import HCSR04
from time import sleep

sensor = HCSR04(trigger_pin=2, echo_pin=3, echo_timeout_us=10000)

while True:
    distance = sensor.distance_cm()
    print('Distance:', distance, 'cm')
    sleep(1)
import machine, time
from machine import Pin

__version__ = '0.2.0'
__author__ = 'Roberto S鐠嬶箯chez'
__license__ = "Apache License 2.0. https://www.apache.org/licenses/LICENSE-2.0"

class HCSR04:
    """
    Driver to use the untrasonic sensor HC-SR04.
    The sensor range is between 2cm and 4m.
    The timeouts received listening to echo pin are converted to OSError('Out of range')
    """
    # echo_timeout_us is based in chip range limit (400cm)
    def __init__(self, trigger_pin, echo_pin, echo_timeout_us=500*2*30):
        """
        trigger_pin: Output pin to send pulses
        echo_pin: Readonly pin to measure the distance. The pin should be protected with 1k resistor
        echo_timeout_us: Timeout in microseconds to listen to echo pin. 
        By default is based in sensor limit range (4m)
        """
        self.echo_timeout_us = echo_timeout_us
        # Init trigger pin (out)
        self.trigger = Pin(trigger_pin, mode=Pin.OUT, pull=None)
        self.trigger.value(0)

        # Init echo pin (in)
        self.echo = Pin(echo_pin, mode=Pin.IN, pull=None)

    def _send_pulse_and_wait(self):
        """
        Send the pulse to trigger and listen on echo pin.
        We use the method `machine.time_pulse_us()` to get the microseconds until the echo is received.
        """
        self.trigger.value(0) # Stabilize the sensor
        time.sleep_us(5)
        self.trigger.value(1)
        # Send a 10us pulse.
        time.sleep_us(10)
        self.trigger.value(0)
        try:
            pulse_time = machine.time_pulse_us(self.echo, 1, self.echo_timeout_us)
            return pulse_time
        except OSError as ex:
            if ex.args[0] == 110: # 110 = ETIMEDOUT
                raise OSError('Out of range')
            raise ex

    def distance_mm(self):
        """
        Get the distance in milimeters without floating point operations.
        """
        pulse_time = self._send_pulse_and_wait()

        # To calculate the distance we get the pulse_time and divide it by 2 
        # (the pulse walk the distance twice) and by 29.1 becasue
        # the sound speed on air (343.2 m/s), that It's equivalent to
        # 0.34320 mm/us that is 1mm each 2.91us
        # pulse_time // 2 // 2.91 -> pulse_time // 5.82 -> pulse_time * 100 // 582 
        mm = pulse_time * 100 // 582
        return mm

    def distance_cm(self):
        """
        Get the distance in centimeters with floating point operations.
        It returns a float
        """
        pulse_time = self._send_pulse_and_wait()

        # To calculate the distance we get the pulse_time and divide it by 2 
        # (the pulse walk the distance twice) and by 29.1 becasue
        # the sound speed on air (343.2 m/s), that It's equivalent to
        # 0.034320 cm/us that is 1cm each 29.1us
        cms = (pulse_time / 2) / 29.1
        return cms
from machine import Pin
from mfrc522 import MFRC522
import utime
       
reader = MFRC522(spi_id=0,sck=2,miso=4,mosi=3,cs=1,rst=0)

red = Pin(13, Pin.OUT)
green = Pin(12, Pin.OUT)
blue = Pin(11, Pin.OUT)

print("Bring RFID TAG Closer...")
print("")


while True:
    reader.init()
    (stat, tag_type) = reader.request(reader.REQIDL)
    if stat == reader.OK:
        (stat, uid) = reader.SelectTagSN()
        if stat == reader.OK:
            card = int.from_bytes(bytes(uid),"little",False)
            
            if card == 612121286:
                print("Card ID: "+ str(card)+" PASS: Green Light Activated")
                red.value(0)
                green.value(1)
                blue.value(0)
                
                
            elif card == 1002696326:
                print("Card ID: "+ str(card)+" PASS: Blue Light Activated")
                red.value(0)
                green.value(0)
                blue.value(1)
                
            else:
                print("Card ID: "+ str(card)+" UNKNOWN CARD! Red Light Activated")
                red.value(1)
                green.value(0)
                blue.value(0)           
from mfrc522 import MFRC522
import utime

reader = MFRC522(spi_id=0,sck=2,miso=4,mosi=3,cs=1,rst=0)

print("Bring TAG closer...")
print("")


while True:
    reader.init()
    (stat, tag_type) = reader.request(reader.REQIDL)
    if stat == reader.OK:
        (stat, uid) = reader.SelectTagSN()
        if stat == reader.OK:
            card = int.from_bytes(bytes(uid),"little",False)
            print("CARD ID: "+str(card))
utime.sleep_ms(500) 
from machine import Pin, SPI
from os import uname


class MFRC522:

    DEBUG = False
    OK = 0
    NOTAGERR = 1
    ERR = 2

    REQIDL = 0x26
    REQALL = 0x52
    AUTHENT1A = 0x60
    AUTHENT1B = 0x61
  
    PICC_ANTICOLL1 = 0x93
    PICC_ANTICOLL2 = 0x95
    PICC_ANTICOLL3 = 0x97
  

    def __init__(self, sck, mosi, miso, rst, cs,baudrate=1000000,spi_id=0):

        self.sck = Pin(sck, Pin.OUT)
        self.mosi = Pin(mosi, Pin.OUT)
        self.miso = Pin(miso)
        self.rst = Pin(rst, Pin.OUT)
        self.cs = Pin(cs, Pin.OUT)

        self.rst.value(0)
        self.cs.value(1)
        
        board = uname()[0]

        if board == 'WiPy' or board == 'LoPy' or board == 'FiPy':
            self.spi = SPI(0)
            self.spi.init(SPI.MASTER, baudrate=1000000, pins=(self.sck, self.mosi, self.miso))
        elif (board == 'esp8266') or (board == 'esp32'):
            self.spi = SPI(baudrate=100000, polarity=0, phase=0, sck=self.sck, mosi=self.mosi, miso=self.miso)
            self.spi.init()
        elif board == 'rp2':
            self.spi = SPI(spi_id,baudrate=baudrate,sck=self.sck, mosi= self.mosi, miso= self.miso)
        else:
            raise RuntimeError("Unsupported platform")

        self.rst.value(1)
        self.init()

    def _wreg(self, reg, val):

        self.cs.value(0)
        self.spi.write(b'%c' % int(0xff & ((reg << 1) & 0x7e)))
        self.spi.write(b'%c' % int(0xff & val))
        self.cs.value(1)

    def _rreg(self, reg):

        self.cs.value(0)
        self.spi.write(b'%c' % int(0xff & (((reg << 1) & 0x7e) | 0x80)))
        val = self.spi.read(1)
        self.cs.value(1)

        return val[0]

    def _sflags(self, reg, mask):
        self._wreg(reg, self._rreg(reg) | mask)

    def _cflags(self, reg, mask):
        self._wreg(reg, self._rreg(reg) & (~mask))

    def _tocard(self, cmd, send):

        recv = []
        bits = irq_en = wait_irq = n = 0
        stat = self.ERR

        if cmd == 0x0E:
            irq_en = 0x12
            wait_irq = 0x10
        elif cmd == 0x0C:
            irq_en = 0x77
            wait_irq = 0x30

        self._wreg(0x02, irq_en | 0x80)
        self._cflags(0x04, 0x80)
        self._sflags(0x0A, 0x80)
        self._wreg(0x01, 0x00)

        for c in send:
            self._wreg(0x09, c)
        self._wreg(0x01, cmd)

        if cmd == 0x0C:
            self._sflags(0x0D, 0x80)

        i = 2000
        while True:
            n = self._rreg(0x04)
            i -= 1
            if ~((i != 0) and ~(n & 0x01) and ~(n & wait_irq)):
                break

        self._cflags(0x0D, 0x80)

        if i:
            if (self._rreg(0x06) & 0x1B) == 0x00:
                stat = self.OK

                if n & irq_en & 0x01:
                    stat = self.NOTAGERR
                elif cmd == 0x0C:
                    n = self._rreg(0x0A)
                    lbits = self._rreg(0x0C) & 0x07
                    if lbits != 0:
                        bits = (n - 1) * 8 + lbits
                    else:
                        bits = n * 8

                    if n == 0:
                        n = 1
                    elif n > 16:
                        n = 16

                    for _ in range(n):
                        recv.append(self._rreg(0x09))
            else:
                stat = self.ERR

        return stat, recv, bits

    def _crc(self, data):

        self._cflags(0x05, 0x04)
        self._sflags(0x0A, 0x80)

        for c in data:
            self._wreg(0x09, c)

        self._wreg(0x01, 0x03)

        i = 0xFF
        while True:
            n = self._rreg(0x05)
            i -= 1
            if not ((i != 0) and not (n & 0x04)):
                break

        return [self._rreg(0x22), self._rreg(0x21)]

    def init(self):

        self.reset()
        self._wreg(0x2A, 0x8D)
        self._wreg(0x2B, 0x3E)
        self._wreg(0x2D, 30)
        self._wreg(0x2C, 0)
        self._wreg(0x15, 0x40)
        self._wreg(0x11, 0x3D)
        self.antenna_on()

    def reset(self):
        self._wreg(0x01, 0x0F)

    def antenna_on(self, on=True):

        if on and ~(self._rreg(0x14) & 0x03):
            self._sflags(0x14, 0x03)
        else:
            self._cflags(0x14, 0x03)

    def request(self, mode):

        self._wreg(0x0D, 0x07)
        (stat, recv, bits) = self._tocard(0x0C, [mode])

        if (stat != self.OK) | (bits != 0x10):
            stat = self.ERR

        return stat, bits
  
    def anticoll(self,anticolN):

        ser_chk = 0
        ser = [anticolN, 0x20]

        self._wreg(0x0D, 0x00)
        (stat, recv, bits) = self._tocard(0x0C, ser)

        if stat == self.OK:
            if len(recv) == 5:
                for i in range(4):
                    ser_chk = ser_chk ^ recv[i]
                if ser_chk != recv[4]:
                    stat = self.ERR
            else:
                stat = self.ERR

        return stat, recv

    
    def PcdSelect(self, serNum,anticolN):
        backData = []
        buf = []
        buf.append(anticolN)
        buf.append(0x70)
        #i = 0
        ###xorsum=0;
        for i in serNum:
            buf.append(i)
        #while i<5:
        #    buf.append(serNum[i])
        #    i = i + 1
        pOut = self._crc(buf)
        buf.append(pOut[0])
        buf.append(pOut[1])
        (status, backData, backLen) = self._tocard( 0x0C, buf)
        if (status == self.OK) and (backLen == 0x18):
            return  1
        else:
            return 0
    
    
    def SelectTag(self, uid):
        byte5 = 0
        
        #(status,puid)= self.anticoll(self.PICC_ANTICOLL1)
        #print("uid",uid,"puid",puid)
        for i in uid:
            byte5 = byte5 ^ i
        puid = uid + [byte5]
        
        if self.PcdSelect(puid,self.PICC_ANTICOLL1) == 0:
            return (self.ERR,[])
        return (self.OK , uid)
        
    def tohexstring(self,v):
        s="["
        for i in v:
            if i != v[0]:
                s = s+ ", "
            s=s+ "0x{:02X}".format(i)
        s= s+ "]"
        return s
        
  
            
    
    def SelectTagSN(self):
        valid_uid=[]
        (status,uid)= self.anticoll(self.PICC_ANTICOLL1)
        #print("Select Tag 1:",self.tohexstring(uid))
        if status != self.OK:
            return  (self.ERR,[])
        
        if self.DEBUG:   print("anticol(1) {}".format(uid))
        if self.PcdSelect(uid,self.PICC_ANTICOLL1) == 0:
            return (self.ERR,[])
        if self.DEBUG:   print("pcdSelect(1) {}".format(uid))
        
        #check if first byte is 0x88
        if uid[0] == 0x88 :
            #ok we have another type of card
            valid_uid.extend(uid[1:4])
            (status,uid)=self.anticoll(self.PICC_ANTICOLL2)
            #print("Select Tag 2:",self.tohexstring(uid))
            if status != self.OK:
                return (self.ERR,[])
            if self.DEBUG: print("Anticol(2) {}".format(uid))
            rtn =  self.PcdSelect(uid,self.PICC_ANTICOLL2)
            if self.DEBUG: print("pcdSelect(2) return={} uid={}".format(rtn,uid))
            if rtn == 0:
                return (self.ERR,[])
            if self.DEBUG: print("PcdSelect2() {}".format(uid))
            #now check again if uid[0] is 0x88
            if uid[0] == 0x88 :
                valid_uid.extend(uid[1:4])
                (status , uid) = self.anticoll(self.PICC_ANTICOLL3)
                #print("Select Tag 3:",self.tohexstring(uid))
                if status != self.OK:
                    return (self.ERR,[])
                if self.DEBUG: print("Anticol(3) {}".format(uid))
                if self.MFRC522_PcdSelect(uid,self.PICC_ANTICOLL3) == 0:
                    return (self.ERR,[])
                if self.DEBUG: print("PcdSelect(3) {}".format(uid))
        valid_uid.extend(uid[0:5])
        # if we are here than the uid is ok
        # let's remove the last BYTE whic is the XOR sum
        
        return (self.OK , valid_uid[:len(valid_uid)-1])
        #return (self.OK , valid_uid)
    
    
   
       
    

    def auth(self, mode, addr, sect, ser):
        return self._tocard(0x0E, [mode, addr] + sect + ser[:4])[0]
    
    def authKeys(self,uid,addr,keyA=None, keyB=None):
        status = self.ERR
        if keyA is not None:
            status = self.auth(self.AUTHENT1A, addr, keyA, uid)
        elif keyB is not None:
            status = self.auth(self.AUTHENT1B, addr, keyB, uid)
        return status
       

    def stop_crypto1(self):
        self._cflags(0x08, 0x08)

    def read(self, addr):

        data = [0x30, addr]
        data += self._crc(data)
        (stat, recv, _) = self._tocard(0x0C, data)
        return stat, recv

    def write(self, addr, data):

        buf = [0xA0, addr]
        buf += self._crc(buf)
        (stat, recv, bits) = self._tocard(0x0C, buf)

        if not (stat == self.OK) or not (bits == 4) or not ((recv[0] & 0x0F) == 0x0A):
            stat = self.ERR
        else:
            buf = []
            for i in range(16):
                buf.append(data[i])
            buf += self._crc(buf)
            (stat, recv, bits) = self._tocard(0x0C, buf)
            if not (stat == self.OK) or not (bits == 4) or not ((recv[0] & 0x0F) == 0x0A):
                stat = self.ERR
        return stat


    def writeSectorBlock(self,uid, sector, block, data, keyA=None, keyB = None):
        absoluteBlock =  sector * 4 + (block % 4)
        if absoluteBlock > 63 :
            return self.ERR
        if len(data) != 16:
            return self.ERR
        if self.authKeys(uid,absoluteBlock,keyA,keyB) != self.ERR :
            return self.write(absoluteBlock, data)
        return self.ERR

    def readSectorBlock(self,uid ,sector, block, keyA=None, keyB = None):
        absoluteBlock =  sector * 4 + (block % 4)
        if absoluteBlock > 63 :
            return self.ERR, None
        if self.authKeys(uid,absoluteBlock,keyA,keyB) != self.ERR :
            return self.read(absoluteBlock)
        return self.ERR, None

    def MFRC522_DumpClassic1K(self,uid, Start=0, End=64, keyA=None, keyB=None):
        for absoluteBlock in range(Start,End):
            status = self.authKeys(uid,absoluteBlock,keyA,keyB)
            # Check if authenticated
            print("{:02d} S{:02d} B{:1d}: ".format(absoluteBlock, absoluteBlock//4 , absoluteBlock % 4),end="")
            if status == self.OK:                    
                status, block = self.read(absoluteBlock)
                if status == self.ERR:
                    break
                else:
                    for value in block:
                        print("{:02X} ".format(value),end="")
                    print("  ",end="")
                    for value in block:
                        if (value > 0x20) and (value < 0x7f):
                            print(chr(value),end="")
                        else:
                            print('.',end="")
                    print("")
            else:
                break
        if status == self.ERR:
            print("Authentication error")
            return self.ERR
        return self.OK
buffer = io.BytesIO()
# ...
with open("test.dat", "wb") as f:
    f.write(buffer.getvalue())
df['mean_1year'] = df.groupby('type')['price'].rolling('365D').mean().reset_index(0,drop=True)
import time
from functools import lru_cache


def calculate_time(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        print(end_time - start_time)
        return result

    return wrapper


def fib(n):
    if n == 0 or n == 1:
        return 1

    return fib(n - 1) + fib(n - 2)


@calculate_time
@lru_cache
def call_fib(n):
    return fib(n)


print(call_fib(36))
print(call_fib(36))
from sfgdpr import client

BBC = BumblebeeClient()
KPC = client.KeyLimePIIClient()


def set_table_temporary(namespace, table):
    resource = f"{namespace}.{table}"
    print(f"Marking {resource} temporary...")
    perm_result = KPC.set_table_permanence(resource, is_permanent=False)
    status = KPC.get_compliance_status(resource)
    assert status['success'] == True
    assert status['result'][0] == 'compliant'
    
    
def set_namespace_temporary(namespace):
    ns = BBC.get_database(namespace)
    tables = [t["table_name"] for t in ns["tables"]]
    
    for table in tables:
        set_table_temporary(namespace, table)
        
    print("Done! 🎉")

    
def set_tables_temporary(namespace, tables):
    for table in tables:
        set_table_temporary(namespace, table)
        
set_namespace_temporary('mercury_dev_mzhao')
from time import sleep
import threading


def say_hi():
    while True:
        print('hi')
        sleep(1)


def say_hello():
    while True:
        print('Helloooooooooo')
        sleep(2)


threading.Thread(target=say_hi).start()
threading.Thread(target=say_hello).start()
import asyncio


async def say_hi():
    while True:
        print('hi')
        await asyncio.sleep(1)


async def say_hello():
    while True:
        print('Helloooooooooo')
        await asyncio.sleep(2)


async def main():
    task1 = loop.create_task(say_hi())
    task2 = loop.create_task(say_hello())
    await asyncio.wait([task1, task2])

    """This is also correct"""
    # await asyncio.gather(say_hi(), say_hello())


if __name__ == '__main__':
    loop = asyncio.get_event_loop()

    try:
        loop.run_until_complete(main())
    finally:
        loop.close()
    ____  __           __        __          _     
   / __ )/ /___  _____/ /_______/ /_  ____ _(_)____
  / __  / / __ \/ ___/ //_/ ___/ __ \/ __ `/ / ___/
 / /_/ / / /_/ / /__/ ,< / /__/ / / / /_/ / / /    
/_____/_/\____/\___/_/|_|\___/_/ /_/\__,_/_/_/     
                                                   
"python.formatting.autopep8Args": [
    "--max-line-length=200"
]
class Node:
    def __init__(self, data):
        self.data = data
        self.next = None


class LinkedList:
    def __init__(self):
        self.head = None

    def push(self, new_data):
        new_node = Node(new_data)
        new_node.next = self.head
        self.head = new_node

    def insertAfter(self, prev, new_data):

        if prev is None:
            print("The given previous node must in LinkedList")
            return

        new_node = Node(new_data)
        new_node.next = prev.next
        prev.next = new_node

    def append(self, new_data):
        new_node = Node(new_data)
        if self.head is None:
            self.head = new_node
            return
        last = self.go_to_end(self.head)
        last.next = new_node

    def delete(self, key, node=None):
        temp = self.head

        if temp is not None:
            if temp.data == key:
                self.head = temp.next
                temp = None
                return
        prev, temp = self.search_recursion(key, temp)

        if temp is None:
            return
        prev.next = temp.next
        temp = None

    def go_to_end(self, head):
        if head.next:
            head = head.next
            self.go_to_end(head)
        return head.next

    def search_recursion(self, key, node=None):
        if node is not None:
            if node.data != key:
                node = node.next
                self.search_recursion(key, node)
        return node, node.next

    def printLinkList(self):
        temp = self.head
        while(temp):
            print(temp.data, end=" ")
            temp = temp.next


if __name__ == '__main__':
    list = LinkedList()
    list.push(4)
    list.push(5)
    list.push(6)
    list.push(7)
    list.printLinkList()
    list.delete(5)
    list.printLinkList()
for i in range((len(features)//100)+1):
    start=i*100
    end=min((i+1)*100,len(features))
    features[start,end]
#  without forcing dtype it changes None to NaN!
s_bad = pd.Series([1, None], dtype=object)
s_good = pd.Series([1, np.nan])

In [13]: s_bad.dtype
Out[13]: dtype('O')

In [14]: s_good.dtype
Out[14]: dtype('float64')
import pyinstaller_versionfile

pyinstaller_versionfile.create_versionfile(
    output_file="test_versionfile.txt",
    version="1.0",
    company_name="somecompany pvt ltd",
    file_description="This is lms for desktop from some industry",
    internal_name="App",
    legal_copyright="© some companry pvt ltd. All rights reserved.",
    original_filename="Uls.exe",
    product_name="uls_lms"
)
pyinstaller  --noconsole --onefile  --icon=uls-icon.ico --version-file uls_lms_versionfile.txt  lms_app.py
if grouped:
    last_value = None
    for obj in data["datasets"]:
        if obj["stack"] == last_value:
            obj["first_of_stack"] = ""
        else:
            obj["first_of_stack"] = True
        last_value = obj["stack"]
        
        
data["datasets"].append(
                {
                    "data": labels,
                    "label": el.name,
                    "backgroundColor": el.color,
                    "info": ...,
                    "date": f"{report_month:02d}/{str(report_year)[-2:]}",
                    "stack": f"Report {report_pk}",
                    "first_of_stack": "",
                }
            )
last_value = None
for obj in collection:
    if obj.name == last_value:
        # do something
    else:
        # do something different
    last_value = obj.name
df2 = df1.filter(['Col 1', 'Col 2'], axis=1)
df2.write_csv('output.csv', index=False) # do not create a separate column index
import os
import telebot
import main
from dotenv import load_dotenv
load_dotenv()

API_KEY = os.environ.get('TELEGRAM_API_KEY')
bot = telebot.TeleBot(
    API_KEY, parse_mode=None)


@bot.message_handler(commands=['start', 'help'])
def greet(message):
    bot.reply_to(message, "Hey My G")


@bot.message_handler(commands=['btc', 'BTC', 'bitcoin', 'BITCOIN'])
def prices(message):
    bot.reply_to(message, main.get_cmc_data('BTC'))


@bot.message_handler(commands=['eth', 'ETH', 'ethereum', 'ETHEREUM'])
def prices(message):
    bot.reply_to(message, main.get_cmc_data('ETH'))


@bot.message_handler(commands=['wagmi', 'WAGMI'])
def prices(message):
    bot.reply_to(message, main.get_cmc_data('WAGMI'))


bot.polling()
print("bot running...")
sales.groupby('Platform').get_group('X360')

platform_names = {
    'PS3':'Playstation',
    'PS4':'Playstation',
    'X360':'Xbox',
    'XOne':'Xbox'
}

sales.set_index('Platform').groupby(platform_names).sum()
# initialize
a = []

# create the table (name, age, job)
a.append(["Nick", 30, "Doctor"])
a.append(["John",  8, "Student"])
a.append(["Paul", 22, "Car Dealer"])
a.append(["Mark", 66, "Retired"])    

# sort the table by age
import operator
a.sort(key=operator.itemgetter(1))    

# print the table
print(a)
import random

print("Lower or higher game:")
value = list(range(1, 14))
suit = ["clubs", "diamonds", "heats", "slades"]
rank = ["ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "jack", "queen", "king"]
cards = []
# for role in suit:
#     for grade in rank:
#         cards.append((role, grade, value[(rank.index(grade))]))
cards=[(role,grade,value[(rank.index(grade))]) for role in suit for grade in rank]   
     
        # card.extend((role,value))     #for example value "1" is Ace, "2" is 2 and so on
        # you can reach grade with using .index() they are related together

eight_cards = random.sample(cards, 8)
temp1 = dict(zip(("suit", "rank", "value"), eight_cards[0]))
temp2 = 0
print(temp1)
user_points = 0

# for cards in eight_cards[1:]:

#     user_input = input("your turn: higher or lower???\n")
#     if cards[2] >

for cards in range(1, 8):

    user_input = input("your turn: higher or lower???\n")
    if (eight_cards[cards][2] > eight_cards[cards - 1][2] and user_input == "h") or (
        eight_cards[cards][2] < eight_cards[cards - 1][2] and user_input == "l"
    ):
        user_points += 20
    else:
        user_points-=15
        
    print(user_points)
    
    
# Set default figure size in a notebook
plt.rcParams['figure.figsize'] = [11,8.5]
def listtostr(list,strvar):
  for x in list:
    stvar=strvar+=x
import requests
from datetime import datetime

URL = "https://min-api.cryptocompare.com/data/price?fsym={}&tsyms={}"

def get_price(coin, currency):
    try:
        response = requests.get(URL.format(coin, currency)).json()
        return response
    except:
        return False

while True:
    date_time = datetime.now()
    date_time = date_time.strftime("%d/%m/%Y %H:%M:%S")
    currentPrice = get_price("BTC", "USD")
    if currentPrice:
        print(date_time, "$", currentPrice["USD"])
# web3 library that contains functions to generate Ethereum accounts and sign transactions and data
#Importing Account from the eth_account module of Web3.py
from eth_account import Account
# python module used for generating random numbers
import secrets

#Generating a random hexadecimal string of 32 bytes / 64 characters and storing it in priv variable
priv = secrets.token_hex(32)

#Attaching 0x prefix to our 64 character hexadecimal string stored in priv and storing the new string in variable private_key
private_key = "0x" + priv
print("SAVE BUT DO NOT SHARE:", private_key)

#Creating a new account using the private_key and storing it in variable acct
acct = Account.from_key(private_key)
print("Address:", acct.address)
python -m pip install --upgrade pip
import json
import os
import requests
from models import Asset

apikey = os.environ.get('API_KEY')

headers = {
    'X-CMC_PRO_API_KEY': apikey,
    'Accepts': 'application/json'
}

params = {
    'start': '1',
    'limit': '5',
    'convert': 'USD'
}


def getTokePrice(symbol):
    url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'

    json = requests.get(url, params=params, headers=headers).json()
    coins = json['data']
from google.cloud import bigquery

# Construct a BigQuery client object.
client = bigquery.Client(credentials=credentials)

# TODO(developer): Set table_id to the ID of the model to fetch.
table_id = 'raw_engagement.users_landmarks'

table = client.get_table(table_id)  # Make an API request.

table_schema = [{'name':bqfield.name,'type':bqfield.field_type,'mode':bqfield.mode} for bqfield in table.schema]
import json
import requests

apikey = '43979f1b-f353-4842-ad8a-ecb2735f3b71'

headers = {
    'X-CMC_PRO_API_KEY' : apikey,
    'Accepts' : 'application/json'
}

params = {
    'start' : '1',
    'limit' : '5',
    'convert' : 'USD'
}

url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'

json = requests.get(url, params=params, headers=headers).json()

coins = json['data']

for coin in coins:
    if coin['symbol'] == 'BTC':
        print(coin['symbol'], coin['quote']['USD']['price'])
import json
import requests

apikey = '43979f1b-f353-4842-ad8a-ecb2735f3b71'

headers = {
    'X-CMC_PRO_API_KEY' : apikey,
    'Accepts' : 'application/json'
}

params = {
    'start' : '1',
    'limit' : '5',
    'convert' : 'USD'
}

url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'

json = requests.get(url, params=params, headers=headers).json()

coins = json['data']

for coin in coins:
    print(coin['symbol'], coin['quote']['USD']['price'])
import json
import requests

eth = requests.get(
    "https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd")

print(eth.json()["ethereum"]["usd"])
cv.rectangle(blank,(250,500),(0,0),(0,255,0),thickness=cv.FILLED)
print(json.dumps(books_results, indent=2))
books_results.append({
    "title": title,
    "link": link,
    "displayed_link": displayed_link,
    "snippet": snippet,
    "author": author,
    "author_link": author_link,
    "date_published": date_published,
    "preview_link": preview_link,
    # if URL is present, add "https://www.google.com" to the URL, instead to None: "Nonehttps://www.google.com"
    "more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None, 
    "thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
})
for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
    title = book_result.css(".DKV0Md::text").get()
    link = book_result.css(".bHexk a::attr(href)").get()
    displayed_link = book_result.css(".tjvcx::text").get()
    snippet = book_result.css(".cmlJmd span::text").get()
    author = book_result.css(".fl span::text").get()
    author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
    date_published = book_result.css(".fl+ span::text").get()
    preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
    more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()
# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)
html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)
# https://docs.python-requests.org/en/master/user/quickstart/#passing-parameters-in-urls
params = {
    "q": "richard branson",  # search query
    "tbm": "bks",            # book results
    "gl": "us",              # country to search from
    "hl": "en"               # language
}

# https://requests.readthedocs.io/en/master/user/quickstart/#custom-headers
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}
from parsel import Selector
import requests, json
from parsel import Selector
import requests, json, re

params = {
    "q": "richard branson",
    "tbm": "bks",
    "gl": "us",
    "hl": "en"
}

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}

html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)

books_results = []

# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)

for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
    title = book_result.css(".DKV0Md::text").get()
    link = book_result.css(".bHexk a::attr(href)").get()
    displayed_link = book_result.css(".tjvcx::text").get()
    snippet = book_result.css(".cmlJmd span::text").get()
    author = book_result.css(".fl span::text").get()
    author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
    date_published = book_result.css(".fl+ span::text").get()
    preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
    more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()

    books_results.append({
        "title": title,
        "link": link,
        "displayed_link": displayed_link,
        "snippet": snippet,
        "author": author,
        "author_link": author_link,
        "date_published": date_published,
        "preview_link": preview_link,
        "more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None,
        "thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
    })


print(json.dumps(books_results, indent=2))
import cv2 as cv
import numpy as np
blank=np.zeros((500,500,3),dtype="uint8")
cv.rectangle(blank,(0,0),(250,250),(0,255,0),thickness=2)

img=cv.imread("C:/xy.jpg")
cv.imshow("gree",blank)
cv.waitKey(0)
#view.py

from django.shortcuts import get_object_or_404
from my_project.example.models import Profile
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.views import APIView


class ProfileDetail(APIView):
    renderer_classes = [TemplateHTMLRenderer]
    template_name = 'profile_detail.html'

    def get(self, request, pk):
        profile = get_object_or_404(Profile, pk=pk)
        serializer = ProfileSerializer(profile)
        return Response({'serializer': serializer, 'profile': profile})

    def post(self, request, pk):
        profile = get_object_or_404(Profile, pk=pk)
        serializer = ProfileSerializer(profile, data=request.data)
        if not serializer.is_valid():
            return Response({'serializer': serializer, 'profile': profile})
        serializer.save()
        return redirect('profile-list')

#profile_detail.html

{% load rest_framework %}

<html><body>

<h1>Profile - {{ profile.name }}</h1>

<form action="{% url 'profile-detail' pk=profile.pk %}" method="POST">
    {% csrf_token %}
    {% render_form serializer %}
    <input type="submit" value="Save">
</form>

</body></html>
# Definition for a binary tree node.
# class TreeNode:
#     def __init__(self, val=0, left=None, right=None):
#         self.val = val
#         self.left = left
#         self.right = right
class Solution:
    def isSymmetric(self, root: Optional[TreeNode]) -> bool:
        
        if not root:
            return True
        def check(left,right):
            if not left and not right:
                return True
            
            if not left or not right:
                return False
            
            if left.val !=right.val:
                return False
            
            return check(left.left, right.right) and check(left.right, right.left)
            
        return check(root.left,root.right)
capture=cv.VideoCapture("C:/videoplayback (1).mp4")
while True:
    isTrue,frame=capture.read()
    cv.imshow("videoplayback (1)",frame)
    if cv.waitKey(20) & 0xFF==ord("d"):
        break
capture.release()
cv.destroyWindow()
# -- Importing Modules
import pickle
import os
clear = lambda: os.system('cls' if os.name in ('nt', 'dos') else 'clear')
clear()

def check(name,Class):
    return True
    
def studentHeader():
    clear()
    print("you are welcome to the student module")
# -- Importing some important modules ...
# from collections import OrderedDict
from logging import exception
import os
import json
from datetime import date
import time
import Student_Module as std
def clear(): return os.system('cls' if os.name in ('nt', 'dos') else 'clear')



# >> ....................................... Auxillary functions ....................................... <<

# // Function for spacing 
def tab(n):
    a=" "
    b=str()
    for i in range(n):
        b=b+a
    return b

# // Function to return today's date
def today_date():
    dateIs = date.isoformat(date.today())
    return dateIs

# // Function for returning the no. of available records of books
def serial_no():
    f = open("Stationaries.txt")
    ser = int()
    while True:
        try:
            a=f.readline()
            if a=="":
                break
            ser = ser+1
            print(ser)
        except:
            break

    f.close()
    return ser+1

# // Function for printing the list of books/students
def printList(takeList,arg1):
    clear()
    bookName=[]
    Class=[]
    Bool=[]
    Status=[]
    Date=[]
    Sn=[]

    for item in takeList:

        bookName.append(item['Book'])
        Class.append(item['Class'])
        Bool.append(item['Bool'])
        Status.append(item['Status'])
        Date.append(item['Date'])
        Sn.append(item['SerialNo'])

    if arg1 == "C" or arg1 == "D":

        print(" Sn. I  Book   I    Class  I   Type   I      Date     I      Status    I")

        for i in range(len(Sn)):
            stat=Status[i]
            if stat==True:
                stats="Available"
            elif stat==False:
                stats="Unavailable"
            print(f" {i}.{tab(4)}{bookName[i]}{tab(15-len(bookName[i]))}{Class[i]}{tab(9-len(str(Class[i])))}{Bool[i]}{tab(8-len(str(Bool[i])))}{tab(3)}{Date[i]}        {stats}\n")

    elif arg1=="P":
        pass

# >> ..................................................................................................... <<



# >> ........................................... Main Functions .......................................... <<

# // Returns the list of books [returns list]
def listOfBooks():
    clear()
    f = open("Stationaries.txt")

    def sortDateWise():
        sorted_list1 = []
        while True:
            try:
                a=f.readline()
                b=json.loads(a)
                sorted_list1.append(b)
            except:
                break 
        print(sorted_list1)
        time.sleep(2)
        return sorted_list1

    def sortClassWise():
        a = sortDateWise()
        li = {}
        sorted_list2 = []
        for item in a:
            dict1 = item
            sr = dict1['SerialNo']
            cl = dict1['Class']
            li[sr] = cl
        sort_orders = sorted(li.items(), key=lambda x: x[1])
        # print(sort_orders)
        # time.sleep(4)
        for item in sort_orders:
            seri = item[0]
            for item2 in a:
                seri2 = item2['SerialNo']
                if seri2 == seri:
                    sorted_list2.append(item2)
        print(sorted_list2)
        time.sleep(2)
        return sorted_list2
    
    def sortPendingWise():
        pass

    print("""
    Sort according to Date or Class -
    \n 1.Date [D]
    \n 2.Class [C]
    \n 3.Pending Books[P]
    """)
    user_response = input("Choose any one : ")
    if user_response == "d":
        sorted_list = sortDateWise()
        f.close()
        return sorted_list,"D"
    elif user_response == "c":
        sorted_list = sortClassWise()
        f.close()
        return sorted_list,"C"
    elif user_response == "p":
        sorted_list = sortPendingWise()
        f.close()
        return sorted_list,"P"

# // Add new item
def addNewBook():
    f = open(&qu