Snippets Collections
import threading
import queue
import time

def read_kbd_input(inputQueue):
    print('Ready for keyboard input:')
    while (True):
        input_str = input()
        inputQueue.put(input_str)

def main():
    EXIT_COMMAND = "exit"
    inputQueue = queue.Queue()

    inputThread = threading.Thread(target=read_kbd_input, args=(inputQueue,), daemon=True)
    inputThread.start()

    while (True):
        if (inputQueue.qsize() > 0):
            input_str = inputQueue.get()
            print("input_str = {}".format(input_str))

            if (input_str == EXIT_COMMAND):
                print("Exiting serial terminal.")
                break
            
            # Insert your code here to do whatever you want with the input_str.

        # The rest of your program goes here.

        time.sleep(0.01) 
    print("End.")

if (__name__ == '__main__'): 
    main()
import serial
import time

# Set up the serial connection to the SIM800 module
ser = serial.Serial('/dev/serial0', baudrate=9600, timeout=1)

def send_at_command(command, delay=1):
    """Send an AT command to the SIM800 module."""
    ser.write((command + '\r').encode())
    time.sleep(delay)
    reply = ser.read(ser.inWaiting()).decode()
    return reply

# Enable GPS
send_at_command("AT+CGNSPWR=1")

# Check GPS status (optional)
gps_status = send_at_command("AT+CGNSPWR?")
print("GPS Status: ", gps_status)

# Get the GPS location
location_data = send_at_command("AT+CGNSINF", delay=2)
print("Location Data: ", location_data)

ser.close()
import dataclasses


@dataclasses.dataclass()
class Parent:
    def __post_init__(self):
        for (name, field_type) in self.__annotations__.items():
            if not isinstance(self.__dict__[name], field_type):
                current_type = type(self.__dict__[name])
                raise TypeError(f"The field `{name}` was assigned by `{current_type}` instead of `{field_type}`")

        print("Check is passed successfully")


@dataclasses.dataclass()
class MyClass(Parent):
    value: str


obj1 = MyClass(value="1")
obj2 = MyClass(value=1)
import bpy

class MI_PANEL(bpy.types.Panel):
    bl_label = "Mi Panel"
    bl_idname = "OBJECT_PT_mi_panel"
    bl_space_type = 'VIEW_3D'
    bl_region_type = 'UI'
    bl_category = 'Mi Addon'

    def draw(self, context):
        layout = self.layout

def register():
    bpy.utils.register_class(MI_PANEL)

def unregister():
    bpy.utils.unregister_class(MI_PANEL)

if __name__ == "__main__":
    register()
import time
import pygame
import pendulum

def alarm():
    pygame.mixer.init()
    pygame.mixer.music.load('alarm.mp3')
    pygame.mixer.music.play()

    while pygame.mixer.music.get_busy():
        time.sleep(1)


def set_alarm(target_time):
    while True:
        dt = pendulum.now()
        current_time = dt.strftime('%I:%M:%S %p')
        print(current_time, target_time)
        time.sleep(1)
        if current_time == target_time:
            break


target_time = '07:09:00 PM'
set_alarm(target_time)
alarm()
pip list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1  | xargs -n1 pip install -U
import pendulum

current_date = pendulum.now().date()
uploaded_date = pendulum.date(2018, 8, 1)

diff_days = current_date.diff(uploaded_date).in_days()

print(current_date.subtract(days = diff_days).diff_for_humans()) # 6 years ago
import pendulum

try:
    print(pendulum.parse('{:0>2}-{:0>2}-{:0>2}'.format(2024, 9, 21)))
except Exception as e:
    print(e)
from pynput.keyboard import Listener

def on_press(key):
    print(str(key))


with Listener(on_press = on_press) as l:
    l.join()
import random

number = random.randint(1, 6)

guess = 0

while guess != number:
    guess = int(input('Number : '))
    if guess == number:
        print('You won ;)')
import random

def guess_the_number():
    number_to_guess = random.randint(1, 100)
    attempts = 0
    while True:
        guess = int(input("Guess the number (1-100): "))
        attempts += 1
        if guess < number_to_guess:
            print("Too low!")
        elif guess > number_to_guess:
            print("Too high!")
        else:
            print(f"Congratulations! You guessed the number in {attempts} attempts.")
            break

guess_the_number()
#Sort a list without using sort()
def sort_list(nums):
    sorted_list = []
    while nums:
        minimum = min(nums)
        sorted_list.append(minimum)
        nums.remove(minimum)
    return sorted_list

numbers = [5, 3, 8, 2, 7]
sorted_numbers = sort_list(numbers)
print(f"Sorted list: {sorted_numbers}")
numbers = [5, 3, 8, 2, 7]

# Find max and min
max_number = max(numbers)
min_number = min(numbers)

print(f"Max: {max_number}, Min: {min_number}")
grades = {}

# Add student grades
grades["Emily"] = 88
grades["James"] = 76
grades["Olivia"] = 92

# Print all students and their grades
for student, grade in grades.items():
    print(f"{student}: {grade}")
# Create a dictionary - paired values
student_grades = {
    "Alice": 90,
    "Bob": 85,
    "Charlie": 92
}

# Access and modify
print(student_grades["Alice"])
student_grades["Bob"] = 88  # Update Bob's grade

# Add a new entry
student_grades["Dave"] = 95

print(student_grades)
favorite_movies = []

# Add movies
favorite_movies.append("The Godfather")
favorite_movies.append("Pulp Fiction")

# Remove a movie
favorite_movies.remove("Pulp Fiction")

# Print the list
print(f"My favorite movies: {favorite_movies}")
# Lists
movies = ["Inception", "The Matrix", "Interstellar"]
movies.append("The Shawshank Redemption")
movies.remove("The Matrix")

# Access elements
print(movies[0])  # First element

# Tuples (immutable)
dimensions = (1920, 1080)
print(dimensions[1])  # Access element
def multiply_numbers(x, y):
    return x * y

num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))

result = multiply_numbers(num1, num2)
print(f"The product of {num1} and {num2} is {result}.")
def multiply(a, b):
    return a * b

result = multiply(5, 4)
print(f"Multiplication result: {result}")
a = 5
b = 4
def multiply():
    return a * b
print(multiply())
# Using a while loop
sum_numbers = 0
num = 1

while num <= 100:
    sum_numbers += num
    num += 1

print(f"Sum of numbers 1 to 100: {sum_numbers}")
for i in range(1, 10):
    print(i)
for i in range(10):
    print(i)
age = int(input("Enter your age: "))

if age < 13:
    print("You are a child.")
elif 13 <= age < 20:
    print("You are a teenager.")
elif 20 <= age < 60:
    print("You are an adult.")
else:
    print("You are a senior.")
age = int(input("Enter your age: "))

if age < 18:

    print("You are a minor.")
elif 18 <= age < 65:
    print("You are an adult.")
else:
    print("You are a senior citizen.")
#Basic operators: + - * / // % **
a = 10
b = 3

addition = a + b
subtraction = a - b
multiplication = a * b
division = a / b
floor_division = a // b
modulus = a % b
exponentiation = a ** b

print(addition, subtraction, multiplication, division, floor_division, modulus, exponentiation)
name = input("Enter your name: ")
age = int(input("Enter your age: "))
print(f"Hello, {name}! You are {age} years old.")
# Create variables
username = "JohnDoe"
user_age = 25
balance = 150.75
is_member = False

# Manipulate and print variables
new_balance = balance + 100
print (f"Username: {username}, Age: {user_age}")
print (f"Hi {username} you are {user_age} and your balance is {new_balance}")
# Create variables
username = "JohnDoe"
user_age = 25
balance = 150.75
is_member = False

# Manipulate and print variables
new_balance = balance + 100

print("Username:", username)
print("Age:", user_age)
print("New Balance:", new_balance)
print("Is Member:", is_member)
# Create variables
userfirstname = "John"
userlastname = "Doe"
user_age = 25

print(username, user_age)
print(userfirstname + userlastname) #concatenate
print("Hello " + userfirstname + " " + userlastname) #concatenate with spaces
# Variables
name = "Alice"        # String
age = 30              # Integer
height = 5.7          # Float
is_student = True     # Boolean

# Print variable values
print(name)
print(age)
print(height)
print(is_student)
print("Hello, World!")
import clickhouse_connect

client = clickhouse_connect.get_client(host='localhost', username='default', password='atlife789')

def execute_query(client,trans_query):
    try:
        data = client.query(trans_query)
        dataset = pd.DataFrame(data.result_rows,columns=data.column_names)
        return dataset
    except Exception as e:
        print(f'Erron on executing the query as {e}.')
        return pd.DataFrame()
import ctypes
from ctypes import wintypes
 
# Definition der winmm.dll Funktionen
winmm = ctypes.WinDLL('winmm')
 
# Definieren der mciSendString Funktion
mciSendString = winmm.mciSendStringW
mciSendString.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.UINT, wintypes.HWND]
mciSendString.restype = wintypes.UINT
 
def main():
    while True:
        # Befehl zum Öffnen der CD
        mciSendString("open d: type cdaudio alias mycd", None, 0, None)
        # Befehl zum Öffnen des CD-Laufwerks
        mciSendString("set mycd door open", None, 0, None)
 
if __name__ == "__main__":
    main()
# main.py

from fastapi import FastAPI
from routes.tasks import tasks_route

app = FastAPI()

app.include_router(tasks_route)

-------------------------

# routes > tasks.py

from fastapi import APIRouter

tasks_route = APIRouter()

@tasks_route.get('/tasks')
def get_tasks():
  return {'tasks': []}
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
from uuid import UUID, uuid4

class Task(BaseModel):
    id : UUID = Field(default_factory = uuid4)
    name : str
    description : str
    done: bool = False

tasks_db = []

def find_task(_id):
    for idx, task in enumerate(tasks_db):
        if task.id == _id:
            return idx, task

app = FastAPI()

@app.get('/')
def index():
    return {'msg': 'hello, world'}

# Add new task to database
@app.put('/add_task')
def add_task(task : Task):
    tasks_db.append(task)

    return {'msg': 'Task created', 'task': task}

# Get all tasks from database
@app.get('/tasks')
def get_tasks():
    return {'tasks': tasks_db}

# Get one task by id
@app.get('/task/{_id}')
def get_task(_id: UUID):
    task = find_task(_id)[1]
    if task:
        return {'task': task}
    
    
    return HTTPException(status_code = 404, detail = 'This task is not found :(')

# Update task by id
@app.put('/update_task/{_id}')
def update_task(_id : UUID, updatedtask : Task):
    task = find_task(_id)
    if task:
        task_idx = task[0]
        tasks_db[task_idx] = updatedtask
        return {'msg': 'Task updated', 'task': updatedtask}
    
    return HTTPException(status_code = 404, detail = 'This task is not found :(')

# Delete task
@app.delete('/delete_task/{_id}')
def delete_task(_id : UUID):
    task = find_task(_id)[1]
    tasks_db.remove(task)

    return {'msg': f'task {task.name} deleted ;)'}
import pendulum

items = [
    {'name': 'Milk', 'expire_date': '2024-09-15'},
    {'name': 'Bread', 'expire_date': '2024-09-18'},
    {'name': 'Yogurt', 'expire_date': '2024-09-25'},
]

def check_expire_date(items):
    for item in items:
        expire_date = pendulum.parse(item.get('expire_date'))
        cuurent_date = pendulum.now()

        if expire_date <= cuurent_date:
            print(f' {item.get('name')} expired')


check_expire_date(items=items)
<!-- QuillJS CDN -->
<link href="https://cdn.quilljs.com/1.3.6/quill.snow.css" rel="stylesheet">
<script src="https://cdn.quilljs.com/1.3.6/quill.min.js"></script>

<!-- Quill Image Resize Module CDN -->
<script src="https://cdn.jsdelivr.net/npm/quill-image-resize-module@3.0.0/image-resize.min.js"></script>

<!-- Quill Editor Container -->
<div id="quill-editor-container" style="height: 500px; border: 1px solid #ddd;"></div>

<script>
  // Add the ImageResize module to Quill
  Quill.register('modules/imageResize', ImageResize);

  // Initialize Quill with the image resize module
  var quill = new Quill('#quill-editor-container', {
    theme: 'snow',
    modules: {
      toolbar: {
        container: [
          [{ 'font': [] }, { 'header': [1, 2, 3, 4, 5, 6, false] }],
          ['bold', 'italic', 'underline', 'strike'],
          [{ 'list': 'ordered' }, { 'list': 'bullet' }],
          ['link', 'image', 'video'],
          [{ 'align': [] }],
          ['clean']
        ],
        handlers: {
          // Custom image handler to embed via URL
          image: function() {
            const imageUrl = prompt('Enter the image URL');
            if (imageUrl) {
              const range = this.quill.getSelection();
              this.quill.insertEmbed(range.index, 'image', imageUrl);
            }
          }
        }
      },
      imageResize: {}  // Enable image resizing module
    }
  });
</script>

<!-- Styles for Editor -->
<style>
  #quill-editor-container {
    height: 500px;
    width: 100%;  /* Set to 100% width */
    max-width: 1200px;  /* Optionally, limit the max width */
    padding: 10px;
    border: 1px solid #ddd;
  }
</style>
def lines_that_equal(line_to_match, fp):
    return [line for line in fp if line == line_to_match]

def lines_that_contain(string, fp):
    return [line for line in fp if string in line]

def lines_that_start_with(string, fp):
    return [line for line in fp if line.startswith(string)]

def lines_that_end_with(string, fp):
    return [line for line in fp if line.endswith(string)]
def uploadData(sheet, email, sheet_name):
    if sheet_name == "2.BaseData":
        data = []
        headers = [cell.value for cell in sheet[1]]

        for row in sheet.iter_rows(min_row=2):
            row_data = {
                headers[i]: cell.value
                for i, cell in enumerate(row)
                if cell.value is not None
            }
            if row_data:  # Only append if row_data is not empty
                row_data = trim_dict_spaces(row_data)
                data.append(row_data)

        print(data)

        # If socio_econ_vulnerability_collection expects each row to be uploaded individually

        for row in data:
            socio_econ_vulnerability_collection.add(row)

        return data
from math import*
  
x = [1,2,3]
import random
names = [
    "黄彦祯",
    "陈子睿",
    "杨芷涵",
    "李若璇",
    "麦家俊",
    "蔡培钧",
    "李泽丰",
    # "陈蓬宇",
    "郭芃泽"
    ]
    
random.shuffle(names)

for idx, name in enumerate(names):
    end_symb = " " if idx%2==0 else " | "
    print(f"{name}", end=end_symb)
    
from faker import Faker
from SmileyDB3 import SmileyDB3
from random import randint

import webbrowser

f = Faker()
db = SmileyDB3('mydb.db')

users = db.table('users')

for i in range(20):
    users.Insert(data={
        'name': f.name(),
        'email': f.email(),
        'age': randint(20, 40)
    })

users.convert().to_html('out.html')

webbrowser.open('out.html')
import sqlite3

db = sqlite3.connect('mydb.db')
cur = db.cursor()

# create users table
cur.execute('CREATE TABLE IF NOT EXISTS users (name str, image blob)')

# insert new user in users table
cur.execute(
    'INSERT INTO users (name, image) VALUES (?, ?)', 
    ('USER-123', open('smile.png', 'rb').read())
)

db.commit()
import customtkinter as ctk

root = ctk.CTk()
root.geometry('200x200')

led_on = '#4cff66'
led_off = '#1e3c22'

led = ctk.CTkLabel(root, text='\t', width=50, bg_color=led_off)
led.place(x = 80, y = 80)

def off_on():
    if led._bg_color == led_off:
        led.configure(bg_color = led_on)
        led.update()
        return
    
    if led._bg_color == led_on:
        led.configure(bg_color = led_off)
        led.update()
        return


bt = ctk.CTkButton(root, text = 'OFF/ON', command=off_on, width=70)
bt.place(x = 80, y = 150)

root.mainloop()
from faker import Faker
from random import randint

f = Faker()

data = {'name': 'Barry Long', 'email': 'tammy28@example.com', 'age': 20}

# Generate data from this 
def Generate_like_this(data : dict, f : Faker):
    result = {}

    funcs = {
        'name': f.name,
        'email': f.email
    }

    for k in data:

        if k in funcs:
            result[k] = funcs[k]()

        if isinstance(data[k], int):
            result[k] = randint(1, data[k])
        
    
    return result

print(Generate_like_this(data = data, f = Faker()))
Embark on a lucrative journey with our turnkey vacation rental business – your golden ticket to making money while providing unforgettable travel experiences! Just like Airbnb, We at Appticz develop a vacation rental business that empowers you to transform your property into a revenue-generating oasis. Imagine waking up to a stream of bookings and delighted guests, all while enjoying the financial rewards of the booming vacation rental market. Get a free airbnb clone business quotation.
https://appticz.com/airbnb-clone
print("She said: \"Hello\" and then left.")
import requests
import json
import sys
sys.path.append('/Workspace/Users/khoa.nguyen@houstontexans.com/production_jobs/Data_Ops_Tools')
import EnvVars
from EnvVars import env_vars_dict


apigeeKey = env_vars_dict['tm']['apiKey']
eventName = '23COPAST'
startDateTime = '2023-07-26 12:00'

url = "https://app.ticketmaster.com/sth-buy/ticketing_services.aspx?dsn=texans"

payload = {
  "header":{
    "src_sys_type":2,"ver":1
    ,"src_sys_name":"test"
    ,"archtics_version":"V999"
    },
  "command1":{
    "cmd":"get_attendance_incremental"
    ,"start_datetime":startDateTime
    ,"uid":"texans64"
    ,"dsn":"texans"
    ,"event_name":eventName
    }
  }

headers = {
  'apikey': apigeeKey, 
  'Content-Type': 'application/json',
  'Connection': 'keep-alive'
}

response = requests.post( url, headers=headers , json=payload)
data = response.json()
data
from datetime import datetime as dt , timedelta
print(len(os.listdir(processed_tm_sftp_files_root_dir)))

for f_dir in os.listdir(processed_tm_sftp_files_root_dir):
  folder_path = f"{processed_tm_sftp_files_root_dir}\{f_dir}"
  folder_date = f_dir.replace('texans_texans_texans_','').split('_')[0][:8]

  folder_date = dt.strptime(folder_date,'%Y%m%d')

  if folder_date < dt.today() - timedelta(days=120):

    for f in os.listdir(folder_path):
      file_path = f"{folder_path}\{f}"
      if os.path.isfile(file_path):
        os.remove(file_path)
        print(f"Deleted file: {file_path}")
import sys 
sys.path.append ('/Workspace/Users/khoa.nguyen@houstontexans.com/production_jobs/Data_Ops_Tools/') 

import EnvVars
from EnvVars import *

dev_credentials = env_vars_dict['azure_storage_dev']

from azure.storage.blob import BlobServiceClient , BlobClient

# Replace the following variables with your Azure Storage account and container information
storage_account_name = dev_credentials['accountName']
storage_account_key = dev_credentials['secretValue']
container_name = "raw"
directory_name = "PSL/"  # Leave empty to list files from the root of the container

mount_name = 'PSL'
directory_path = 'PSL'


dbutils.fs.mount(
  source=f"wasbs://{container_name}@{storage_account_name}.blob.core.windows.net/{directory_path}",
  mount_point=f"/mnt/azure_storage/{mount_name}",
  extra_configs={
    f"fs.azure.account.key.{storage_account_name}.blob.core.windows.net": storage_account_key
  }
)

# List all mount points
mounts = dbutils.fs.mounts()
## Date Formatting 

#### Sql
```sql
date_format(dt_obj, 'yyyy-MM-dd hh:mm:ss a') 
```

#### Python
```python
dt_obj.strftime("%-m-%d-%Y %-I:%M%p")
```
---
qry = """
SELECT COUNT(*) CNT , 'eloqua_contact' AS tbl
FROM reporting.eloqua_contact
UNION 
SELECT COUNT(*) CNT , 'eloqua_form' AS tbl
FROM reporting.eloqua_form
UNION 
SELECT COUNT(*) CNT , 'eloqua_form_submission' AS tbl
FROM reporting.eloqua_form_submission
UNION
SELECT COUNT(*) CNT , 'eloqua_system_preference_center' AS tbl
FROM reporting.eloqua_system_preference_center
UNION
SELECT COUNT(*) CNT , 'eloqua_system_sms_preference_center' AS tbl
FROM reporting.eloqua_system_sms_preference_center
UNION 
SELECT COUNT(*) CNT , 'eloqua_system_preference_center_toro_s_kids_club' AS tbl
FROM reporting.eloqua_system_preference_center_toro_s_kids_club
UNION
SELECT COUNT(*) CNT , 'eloqua_system_preference_center_historic' AS tbl
FROM reporting.eloqua_system_preference_center_historic
UNION
SELECT COUNT(*) CNT , 'eloqua_system_bi_prospect_scores' AS tbl
FROM reporting.eloqua_system_bi_prospect_scores

"""

# Get Pre Run Cnts
pre_run_df = spark.read.format("sqlserver")\
      .option("host", "sqls-houstontexans-db-01.database.windows.net")\
      .option("user", prodProperties['user']).option("password", prodProperties['password'])\
      .option("database","sqls-houstontexans")\
      .option("query", qry)\
      .option("fetchSize", 35000)\
      .load()
 
# write data from dbrix to azure
prod_dbrix_to_azure('eloqua.contact','reporting.eloqua_contact')
prod_dbrix_to_azure('eloqua.form','reporting.eloqua_form')
prod_dbrix_to_azure('eloqua.form_submission','reporting.eloqua_form_submission')
prod_dbrix_to_azure('eloqua.system_preference_center','reporting.eloqua_system_preference_center')
prod_dbrix_to_azure('eloqua.system_sms_preference_center','reporting.eloqua_system_sms_preference_center')
prod_dbrix_to_azure('eloqua.system_preference_center_toro_s_kids_club','reporting.eloqua_system_preference_center_toro_s_kids_club')
prod_dbrix_to_azure('eloqua.system_preference_center_historic','reporting.eloqua_system_preference_center_historic')
prod_dbrix_to_azure('eloqua.system_bi_prospect_scores','reporting.eloqua_system_bi_prospect_scores')

# Get Post Run Cnts
post_run_df = spark.read.format("sqlserver")\
      .option("host", "sqls-houstontexans-db-01.database.windows.net")\
      .option("user", prodProperties['user']).option("password", prodProperties['password'])\
      .option("database","sqls-houstontexans")\
      .option("query", qry)\
      .option("fetchSize", 35000)\
      .load()
      
# Compare Pre and Post Run Df 
pre_post_cnts_df = pre_run_df.join(post_run_df, on='tbl', how='inner').select(pre_run_df.tbl ,  pre_run_df.CNT.alias('pre_run_cnt'), post_run_df.CNT.alias('post_run_cnt'))
pre_post_cnts_df.display()
import sys 
sys.path.append ('/Workspace/Users/khoa.nguyen@houstontexans.com/production_jobs/Data_Ops_Tools/')
from DataOpsMainFunctions import prod_azure_to_dbrix, prod_dbrix_to_azure , prodProperties

query_azure_db_df = spark.read.format("sqlserver")\
      .option("host", "sqls-houstontexans-db-01.database.windows.net")\
      .option("user", prodProperties['user']).option("password", prodProperties['password'])\
      .option("database","sqls-houstontexans")\
      .option("query", "SELECT * FROM reporting.db_data_catalogue WHERE table_schema='survey_monkey'")\
      .option("fetchSize", 35000)\
      .load()
query_azure_db_df.display()
def find_type_of_research(soup):
    if soup.find_all('lbj-title', attrs = {'variant':'eyebrow'}):
        return soup.find_all('lbj-title', attrs = {'variant':'eyebrow'})[0].text.strip()
    else:
        return np.nan

def find_title(soup):
    if soup.find_all('lbj-title', attrs = {'variant':'heading-1'}):
        return soup.find_all('lbj-title', attrs = {'variant':'heading-1'})[0].text.strip()
    if soup.find_all('h1', attrs = {'class':'title heading-1'}):
        return soup.find_all('h1', attrs = {'class':'title heading-1'})[0].text.strip()
    return np.nan

def find_subtitle(soup):
    if soup.find_all('lbj-title', attrs = {'variant':'subtitle'}):
        return soup.find_all('lbj-title', attrs = {'variant':'subtitle'})[0].text.strip()

def find_authors(soup):
    if soup.find_all('lbj-title', attrs = {'variant':'paragraph'}):
        author_code = soup.find_all('lbj-title', attrs = {'variant':'paragraph'})[0]
        # find every lbj-link
        authors = author_code.find_all('lbj-link')
        authors = [author.text.strip() for author in authors]
        authors_str =  ', '.join(authors)
        return authors_str
    return np.nan

def find_date(soup):
    if soup.find_all('lbj-title', attrs = {'variant':'date'}):
        date_code = soup.find_all('lbj-title', attrs = {'variant':'date'})[0]
        date = date_code.find_all('time')[0].text.strip()
        return date
    return np.nan

def find_report_link(soup):
    if soup.find_all('lbj-button'):
        return soup.find_all('lbj-button')[0].get('href')
    return np.nan

def find_tags(soup):
    if soup.find_all('lbj-link', attrs = {'variant':'tag'}):
        tags = soup.find_all('lbj-link', attrs = {'variant':'tag'})
        print(tags)

def find_data_json(soup):
    # get the javascript code
    script = soup.find_all('script', attrs = {'type':'text/javascript'})

    # found the json after dataLayer_tags
    pattern = 'dataLayer_tags = (.+);'
    for s in script:
        if re.search(pattern, str(s)):
            info_json = re.search(pattern, str(s)).group(1)
            # transform it into a dictionary
            info = eval(info_json)['urban_page']
            publish_date = info.get('publish_date')
            title = info.get('urban_title')
            research_area = info.get('research_area')
            authors = info.get('authors')
            publication_type = info.get('publication_type')
            eyebrow = info.get('eyebrow')
            tags = info.get('tags')
            
            return publish_date, title, research_area, authors, publication_type, eyebrow, tags, info_json
    return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan

df_links = pd.read_csv('urban_links.csv')
df_links_details = df_links.copy()

for i in range(len(df_links)):
    URL = "https://www.urban.org/" + df_links['link'][i]
    print(URL)
    r = requests.get(URL)
    soup = BeautifulSoup(r.content, 'html5lib')
    report_link = find_report_link(soup)
    publish_date, title, research_area, authors, publication_type, eyebrow, tags, info_json = find_data_json(soup)
    
    df_links_details.loc[i, 'eyebrow'] = eyebrow
    df_links_details.loc[i, 'title'] = title
    df_links_details.loc[i, 'authors'] = authors
    df_links_details.loc[i, 'date'] = publish_date
    df_links_details.loc[i, 'research_area'] = research_area
    df_links_details.loc[i, 'publication_type'] = publication_type
    df_links_details.loc[i, 'tags'] = tags
    df_links_details.loc[i, 'info_json'] = info_json
    df_links_details.loc[i, 'report_link'] = report_link
    
    print(publish_date, title, research_area, authors, publication_type, eyebrow, tags, report_link)
    
    if i % 200 == 0:
        df_links_details.to_csv('urban_links_details.csv', index = False)
df_links = []

PAGE_NUM = 281

for page in range(1, PAGE_NUM):
    URL = f"https://www.urban.org/research?page={page}"
    r = requests.get(URL) 
    soup = BeautifulSoup(r.content, 'html5lib')
    articles = soup.find_all('li', attrs = {'class':'mb-16 md:col-span-2 mb-8 sm:mb-0'})
    for article in articles:
        pattern = 'href="(.+?)"'
        if re.search(pattern, str(article)):
            df_links.append([page, re.search(pattern, str(article)).group(1)])
    print(f'Page {page} done')

df_links = pd.DataFrame(df_links, columns = ['page', 'link'])
df_links.to_csv('urban_links.csv', index = False)
from pydantic import BaseModel, Field
from uuid import UUID, uuid4

class item(BaseModel):
    name : str
    price : float
    item_id : UUID = Field(default_factory = uuid4)


items = []

for i in range(3):
    items.append(item(name = 'test', price = 20))


print(items)
import pandas as pd

# Cargar el archivo CSV
input_file = 'input.csv'  # Reemplaza con el nombre de tu archivo CSV
output_file = 'filtered_output.csv'

# Leer el CSV en un DataFrame
df = pd.read_csv(input_file)

# Primer filtro: conservar filas donde 'orden' aparece más de una vez y 'item' contiene la palabra "Tip"
filtered_df = df.groupby('orden').filter(lambda x: (x['item'].str.contains('Tip', case=False, na=False).any()))

# Segundo filtro: eliminar filas donde el campo 'metodo' está vacío
filtered_df = filtered_df[filtered_df['metodo'].notna()]

# Eliminar la columna 'item'
filtered_df = filtered_df.drop(columns=['item'])

# Guardar el DataFrame filtrado en un nuevo archivo CSV
filtered_df.to_csv(output_file, index=False)

print(f'Filtrado completado. Archivo guardado como {output_file}')
import os
import requests
import json
from dotenv import load_dotenv

# Load env from .env file.
load_dotenv()

'''
Step 1: Get all env variables.
'''

url_to_get_all_envs = (
    f"https://api.vercel.com/v9/projects/"
    f"{os.getenv('PROJECT_ID')}/env"
)
payload = {
    'decrypt': 'true',
    'slug': os.getenv('SLUG'),
    'source': 'vercel-cli:pull',
    'teamId': os.getenv('TEAM_ID')
}
headers = {
    'Authorization': f"Bearer {os.getenv('VERCEL_TOKEN')}",
    'Content-Type': 'application/json'
}

all_env_vars = requests.get(
    url_to_get_all_envs,
    params=payload,
    headers=headers
)

# Print the response in a safe environment if required.
all_env_vars = all_env_vars.json()['envs']


# Delete items which are not required for update endpoint from payload.
del payload['decrypt']
del payload['source']


for env in all_env_vars:
    # Update only if production is one of the targets of the variable.
    if 'production' in env['target'] and env['type'] != 'sensitive':

        '''
        Check if target array has development in it.
        Because development target cant have sensitive variables.
        '''

        targets = env['target'][:]
        if 'development' in targets:
            targets.remove('development')

        '''
        Step 2: Get decrypted value of each env variable.
        '''

        url_to_get_or_update_env = (
            f"https://api.vercel.com/v9/projects/"
            f"{os.getenv('PROJECT_ID')}/env/{env['id']}"
        )
        decrypted_env_response = requests.get(
            url_to_get_or_update_env,
            params=payload,
            headers=headers
        )
        decrypted_env = decrypted_env_response.json()
        # Print the response in a safe environment if required.

        '''
        Step 3: Update all variables to be sensitive if it has production
        target and also remove development target if exists.
        '''

        data = {
            'key': env['key'],
            'target': targets,
            'type': 'sensitive',
            'value': decrypted_env['value']
        }
        data = json.dumps(data)
        response = requests.patch(
            url_to_get_or_update_env,
            params=payload,
            headers=headers,
            data=data
        )
        # Print the response in a safe environment if required.

        '''
        Step 4: Recreate the variable with development target
        if it existed before updation in previous step.
        '''

        if 'development' in env['target']:
            data = {
                'key': env['key'],
                'target': ['development'],
                'type': 'encrypted',
                'value': decrypted_env['value']
            }

            data = json.dumps(data)
            url_to_create_env = (
                f"https://api.vercel.com/v9/projects/"
                f"{os.getenv('PROJECT_ID')}/env"
            )
            response = requests.post(
                url_to_create_env,
                params=payload,
                headers=headers,
                data=data
            )

            # Print the response in a safe environment if required.
for env in all_env_vars:
    # Update only if production is one of the targets of the variable.
    if 'production' in env['target'] and env['type'] != 'sensitive':

        '''
        Check if target array has development in it.
        Because development target cant have sensitive variables.
        '''

        targets = env['target'][:]
        if 'development' in targets:
            targets.remove('development')

        '''
        Step 2: Get decrypted value of each env variable.
        '''

        url_to_get_or_update_env = f"https://api.vercel.com/v9/projects/{os.getenv('PROJECT_ID')}/env/{env['id']}"
        decrypted_env_response = requests.get(
            url_to_get_or_update_env,
            params=payload,
            headers=headers
        )
        decrypted_env = decrypted_env_response.json()
        # Print the response in a safe environment if required.

        '''
        Step 3: Update all variables to be sensitive if it has production
        target and also remove development target if exists.
        '''

        data = {
            'key': env['key'],
            'target': targets,
            'type': 'sensitive',
            'value': decrypted_env['value']
        }
        data = json.dumps(data)
        response = requests.patch(
            url_to_get_or_update_env,
            params=payload,
            headers=headers,
            data=data
        )
        # Print the response in a safe environment if required.

        '''
        Step 4: Recreate the variable with development target
        if it existed before updation in previous step.
        '''

        if 'development' in env['target']:
            data = {
                'key': env['key'],
                'target': ['development'],
                'type': 'encrypted',
                'value': decrypted_env['value']
            }

            data = json.dumps(data)
            url_to_create_env = f"https://api.vercel.com/v9/projects/{os.getenv('PROJECT_ID')}/env"
            response = requests.post(
                url_to_create_env,
                params=payload,
                headers=headers,
                data=data
            )

            # Print the response in a safe environment if required.
for env in all_env_vars:
    # Update only if production is one of the targets of the variable.
    if 'production' in env['target'] and env['type'] != 'sensitive':

        '''
        Check if target array has development in it.
        Because development target cant have sensitive variables.
        '''

        targets = env['target'][:]
        if 'development' in targets:
            targets.remove('development')

        '''
        Step 2: Get decrypted value of each env variable.
        '''

        url_to_get_or_update_env = f"https://api.vercel.com/v9/projects/{os.getenv('PROJECT_ID')}/env/{env['id']}"
        decrypted_env_response = requests.get(
            url_to_get_or_update_env,
            params=payload,
            headers=headers
        )
        decrypted_env = decrypted_env_response.json()
        # Print the response in a safe environment if required.

        '''
        Step 3: Update all variables to be sensitive if it has production
        target and also remove development target if exists.
        '''

        data = {
            'key': env['key'],
            'target': targets,
            'type': 'sensitive',
            'value': decrypted_env['value']
        }
        data = json.dumps(data)
        response = requests.patch(
            url_to_get_or_update_env,
            params=payload,
            headers=headers,
            data=data
        )
        # Print the response in a safe environment if required.
# Delete items which are not required for update endpoint from payload.
del payload['decrypt']
del payload['source']


for env in all_env_vars:
    # Update only if production is one of the targets of the variable.
    if 'production' in env['target'] and env['type'] != 'sensitive':

        '''
        Check if target array has development in it.
        Because development target cant have sensitive variables.
        '''

        targets = env['target'][:]
        if 'development' in targets:
            targets.remove('development')

        '''
        Step 2: Get decrypted value of each env variable.
        '''

        url_to_get_or_update_env = f"https://api.vercel.com/v9/projects/{os.getenv('PROJECT_ID')}/env/{env['id']}"
        decrypted_env_response = requests.get(
            url_to_get_or_update_env,
            params=payload,
            headers=headers
        )
        decrypted_env = decrypted_env_response.json()
        # Print the response in a safe environment if required.
import os
import requests
import json
from dotenv import load_dotenv

# Load env from .env file.
load_dotenv()

'''
Step 1: Get all env variables.
'''

url_to_get_all_envs = f"https://api.vercel.com/v9/projects/{os.getenv('PROJECT_ID')}/env"
payload = {
    'decrypt': 'true',
    'slug': os.getenv('SLUG'),
    'source': 'vercel-cli:pull',
    'teamId': os.getenv('TEAM_ID')
}
headers = {
    'Authorization': f"Bearer {os.getenv('VERCEL_TOKEN')}",
    'Content-Type': 'application/json'
}

all_env_vars = requests.get(
    url_to_get_all_envs,
    params=payload,
    headers=headers
)

# Print the response in a safe environment if required.
all_env_vars = all_env_vars.json()['envs']
import os
import requests
import json
from dotenv import load_dotenv

# Load env from .env file
load_dotenv()
import os
import requests
import json
from dotenv import load_dotenv
def factorial(n):
	if n < 1:
    	return 1

	return n*factorial(n-1)

factorial(5)
#cd to the relevant file
cd /usr/share/applications
#us nano text editor on the proper file
sudo nano yourapp.desktop
#Make entry into the file and save it 
[Desktop Entry]
Name=YourAppName
Exec=/path/to/your/executable
Icon=/path/to/your/icon
Type=Application
Terminal=false
# Analyzing Clicks A Python Function for Extracting Weekly Stats

users_data = [
    {'day': 0, 'clicks': 100}, {'day': 1, 'clicks': 10},
    {'day': 2, 'clicks': 150}, {'day': 3, 'clicks': 1000},
    {'day': 4, 'clicks': 100}, {'day': 5, 'clicks': 190},
    {'day': 6, 'clicks': 150}, {'day': 7, 'clicks': 1000}
]

def get_stats(day):
    # -1 to start from day 0 because day in date is 1
    return users_data[day - 1: day + 7]

print(get_stats(1))
import sqlite3

# Connect to the database
con = sqlite3.connect('database.db')

# Create a cursor object
cursor = con.cursor()

# Define the data to be inserted as a list of tuples
data = [
    ('John Doe', 'johndoe@example.com'),
    ('Jane Smith', 'janesmith@example.com'),
    ('Mike Johnson', 'mikejohnson@example.com')
]

# Use executemany() to insert the data into the "users" table
cursor.executemany("INSERT INTO users (name, email) VALUES (?, ?)", data)

# Commit the changes
con.commit()
import sqlite3

# Connect to the database
con = sqlite3.connect('database.db')

# Create a cursor object
cursor = con.cursor()

# Update user name based on ID
user_id = 2
new_name = "John Doe"

cursor.execute("UPDATE users SET name = ? WHERE id = ?", (new_name, user_id))

# Commit the changes
con.commit()
import sqlite3

# Connect to the database
con = sqlite3.connect('database.db')

# Create a cursor object
cursor = con.cursor()

# Fetch all data from the "users" table
cursor.execute("SELECT * FROM users")
data = cursor.fetchall()

# Print the retrieved data
for row in data:
    print(row)
import sqlite3

# Connect to the database
con = sqlite3.connect('database.db')

# Create a cursor object
cursor = con.cursor()

# Delete user based on ID
user_id = 123

cursor.execute("DELETE FROM users WHERE id = ?", (user_id,))

# Commit the changes
con.commit()
import sqlite3

# Connect to the database
con = sqlite3.connect('database.db')

# Create a cursor object
cursor = con.cursor()

# Fetch data based on the ID filter
cursor.execute("SELECT * FROM users WHERE id = ?", (123,))
data = cursor.fetchall()

# Print the retrieved data
for row in data:
    print(row)

import sqlite3

# Connect to the database
con = sqlite3.connect('database.db')

# Create a cursor object
cursor = con.cursor()

# Create a cursor object
cursor = con.cursor()

# Insert a new user
cursor.execute("INSERT INTO users (name, email) VALUES (?, ?)", ("John Doe", "johndoe@example.com"))

# Commit the changes
con.commit()

import vertexai
from vertexai.generative_models import GenerativeModel

# TODO(developer): Update and un-comment below line
# project_id = "PROJECT_ID"

vertexai.init(project=project_id, location="us-central1")

model = GenerativeModel(model_name="gemini-1.5-flash-001")

response = model.generate_content(
    "What's a good name for a flower shop that specializes in selling bouquets of dried flowers?"
)

print(response.text)
# Extract email addresses using regexp
import re
email_log = """Email received June 2 from user1@email.com.
Email received June 2 from user2@email.com.
Email rejected June 2 from invalid_email@email.com."""

#\w means any alpha numeric characters 
print(re.findall("\w+@\w+\.\w+",email_log))
from docx import Document
import pandas as pd

df = pd.read_csv('out.csv').to_dict()
word_doc = Document()

table_head = list(df.keys())

table_head_len = len(table_head)
tables_rows_len = len(df['name'])

# create tabel
table = word_doc.add_table(cols = table_head_len, rows = tables_rows_len)

# add the first row in the table
for i in range(table_head_len):
    table.cell(row_idx = 0, col_idx = i).text = table_head[i]


# add rows for name col
for i in range(1, tables_rows_len):
    table.cell(row_idx = i, col_idx = 0).text = df['name'][i]


# add rows for age col
for i in range(1, tables_rows_len):
    table.cell(row_idx = i, col_idx = 1).text = str(df['age'][i])


word_doc.save('word_doc.docx')
D = {num: num ** 2 for num in range(1, 11)}

print(D)
from functools import lru_cache, partial
import timeit

@lru_cache(maxsize = 128)
def fibonacci(n):
    if n == 0:
        return 0
    elif n == 1:
        return 1
    return fibonacci(n - 1) + fibonacci(n - 2)

#the first time
execution_time = timeit.Timer(partial(fibonacci, 50)).timeit(1)
print(execution_time)

#the second time
execution_time = timeit.Timer(partial(fibonacci, 50 )).timeit(1)
print(execution_time)
myData = [
    {'a': 1},
    {'a': 1},
    {'b': 1},
    {'b': 1},
    {'c': 1},
    {'c': 1},
]

# Use a set to track seen tuples of items (key-value pairs as tuples)
seen = set()
unique_data = []

for d in myData:
    # Convert each dictionary to a tuple of its items, then convert to frozenset
    # to make it hashable and suitable for use in a set
    dict_items = frozenset(d.items())
    
    # Check if the frozenset of items is already in the set
    if dict_items not in seen:
        # If not seen, add to both set and result list
        seen.add(dict_items)
        unique_data.append(d)

print(unique_data)
import math 

print(math.log10(10))
print(math.log10(50))
print(math.log10(100))
print(math.log10(200))
print(math.log10(1000))
from collections import Counter

data = ['Java', 'Python', 'Java', 'C++', 'Python', 'PHP', 'C++', 'Java', 'PHP', 'Java', 'Python']

c = Counter(data)

print(c.most_common(3))
import tempfile

temp = tempfile.TemporaryFile('w+')
print(temp)

#write to the file
temp.write("Hello world,")
temp.write("\nWelcome to Pynerds.")

#read from the file
temp.seek(0)
for i in temp.read():
    print(i, end = '')

temp.close()
#Example with list
L = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(ascii(L.__repr__())) #same as print(L)

#Example with tuples
T = ("Pynerds", "Python", "Django", "Flask")
print(T.__repr__()) #same as print(T)

D = {1: "One", 2: "Two", 3: "Three"}

print(D.__repr__())#Same as print(D)
from collections import namedtuple

Point = namedtuple("Point", ['x', 'y'])

p = Point(3, 5)

print(p.x, p.y) #using attributes
print(p[0], p[1]) # using indexes
# Visualize the distribution of sentiment values
import matplotlib.pyplot as plt
import seaborn as sns

# Set the style of the visualization
sns.set(style='whitegrid')

# Create a histogram of the sentiment values
plt.figure(figsize=(10, 6))
sns.histplot(df['sentiment'], bins=30, kde=True, color='blue')
plt.title('Distribution of Sentiment Values')
plt.xlabel('Sentiment')
plt.ylabel('Frequency')
plt.show()
# Perform sentiment analysis on the tweets and plot the results
from textblob import TextBlob
import matplotlib.pyplot as plt
import seaborn as sns

# Function to get the sentiment polarity

def get_sentiment(text):
    return TextBlob(text).sentiment.polarity

# Apply the function to the tweet column

df['sentiment'] = df['tweet'].apply(get_sentiment)

# Plot the sentiment distribution
plt.figure(figsize=(10, 6))
sns.histplot(df['sentiment'], bins=30, kde=True)
plt.title('Sentiment Distribution of Tweets')
plt.xlabel('Sentiment Polarity')
plt.ylabel('Frequency')
plt.show()

print('Sentiment analysis and plot completed.')
# This code will plot the frequency of hate speech over the index of the dataset.

import pandas as pd
import matplotlib.pyplot as plt

# Load the data
df = pd.read_csv('train new.csv')

# Plot the frequency of hate speech over the index of the dataset
plt.figure(figsize=(12, 6))
plt.plot(df.index, df['hate_speech_count'], label='Hate Speech Count')
plt.xlabel('Index (Proxy for Time)')
plt.ylabel('Hate Speech Count')
plt.title('Frequency of Hate Speech Over Time')
plt.legend()
plt.show()
import pandas as pd

dict_data = {
    'users': ['user_123', 'user_abc'],
    'tasks': ['test1', 'test2']
}

df = pd.DataFrame.from_dict(dict_data)
df.to_html('out.html')
import pandas as pd

dict_data = {
    'users': ['user_123', 'user_abc'],
    'tasks': ['test1', 'test2']
}

df = pd.DataFrame.from_dict(dict_data)
df.to_pickle('out.pkl')

# read pkl file
print(pd.read_pickle('out.pkl'))
import pandas as pd

# All arrays must be of the same length
dict_data = {
    'users': [{'name': 'test'}, {'name': '123'}], 
    'tasks': [{'name': 'test123'}, {'name': '45456'}]
}

df = pd.DataFrame.from_dict(dict_data)
df.to_json('out.json', indent = 4)
from random import randint
from faker import Faker
from datetime import date
import pandas as pd

f = Faker()

s_date = date(2018, 5, 1)
e_date = date(2018, 5, 30)

dict_data = {'date': [], 'email': [], 'money': []}

for _date in pd.date_range(start = s_date, end = e_date):
    dict_data['date'].append(_date)
    dict_data['email'].append(f.email())
    dict_data['money'].append(randint(1, 100) * 0.99)

df = pd.DataFrame.from_dict(dict_data)
df.to_csv('out.csv', index = 0)
#import the module
import asyncio

#define asynchronous tasks
async def task1():
    #prints even numbers from 0 to 9
    for i in range(10):
       if i%2 ==0:
           print(i)
           await asyncio.sleep(0.0001)#cause a small delay

async def task2():
     #prints odd numbers from 0 to 9
     for i in range(10):
         if i%2 == 1:
             print(i)
             await asyncio.sleep(0.0001) # cause a small delay

async def main():
    print('Started:')
    await asyncio.gather(task1(), task2())
    print('Finished!')

asyncio.run(main())
#import the sqlite3 module
import sqlite3

#create a tble
with sqlite3.connect(':memory:') as conn:
  #create a table
  conn.execute('''
    CREATE TABLE point(
      id INTEGER PRIMARY KEY AUTOINCREMENT,
      x INTEGER,
      y INTEGER
      )
    ''')

  #populate table
  conn.executescript('''
    INSERT INTO point (x, y) VALUES (3, 4);
    INSERT INTO point (x, y) VALUES (2, 3);
    INSERT INTO point (x, y) VALUES (-2, 5);
    ''')

  #retrieve data
  cursor = conn.cursor()
  cursor.execute('SELECT x, y FROM point;')

  print(cursor.fetchall())
  cursor.close()
#import the namedtuple factory method
from collections import namedtuple

#create a namedtuple to represent a Point
Point = namedtuple('Point', ['x', 'y'])

#Create objects from the namedtuple 'Point' 
p = Point(3, 4)

#Access the elements using the numeric indices
print(p[0], p[1])

#Access the elements using their names
print(p.x, p.y)
#a list of strings
mylist = ['Python', 'Java', 'C++', 'Ruby']

#remove all elements from the list
mylist.clear()

#the list is now empty
print(mylist)
 
nums = [1, 2, 3, 4, 5]
print('before: ')
print(nums)

#call the reverse method()
nums.reverse()

print("after: ")
print(nums)
 
class Point:
    __slots__ = ('x', 'y')
    def __init__(self, x, y):
        self.x = x
        self.y = y

p = Point(3, 4)
print(f'x: {p.x}')
print(f'y: {p.y}')
 
countries = ['Japan', 'Russia', 'Canada', 'Russsia']

countries.remove('Russia')

print(countries)
 
A = {'a', 'b', 'c', 'd', 'e', 'f'}
B = {'b', 'c', 'd'}

#get the difference
diff = A.difference(B)
print(diff)
#Define the sets
A = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
B = {2, 3, 5, 7}

#get the difference using the - operator
diff = A - B
print(diff)
from human_readable.files import file_size
import os

print(file_size(value = os.stat('test.txt').st_size))
def divide(a, b):
    assert b != 0
    print(f"{a} / {b} = {a/ b}")

divide(10, 2)
divide(10, 0)
#the break statement, causes the loop to immediately terminate.

seq = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

for i in seq:
    #define the condition
    if i == 5:
        break
    print(i)

print('after the loop')
#import the random module
import random

#The sequence to choose from
L = ['Python', 'Javascript', 'Ruby', 'Java', 'PHP', 'C++', 'C#', 'HTML', 'CSS']

#Choose random items from the list
print(random.choices(L, k = 3))
#an exception to be raised by an empty stack
class EmptyStack(Exception):
    pass

class Stack:
    
    def __init__(self):
        self._items = [] #non-public list for storing stack elements

    def __len__(self):
        return len(self._items)

    def isEmpty(self):
        return len(self) == 0 

    def push(self, e):
        self._items.append(e) #add the element at the end of the list

    def top(self):
        if self.isEmpty():
            raise EmptyStack("Stack Is Empty.")
        return self._items[-1] #Return the last element in list

    def pop(self):
        if self.isEmpty():
            raise EmptyStack("Stack Is Empty.")
        return self._items.pop() #pop the last item from the list

# test the stack
S = Stack()

S.push("A")
S.push("B")
S.push("C")
print(S.pop())
print(S.pop())
print(S.pop())
print(S.pop())
#merge sort

#the merge algorithm
def merge(L1, L2, L):
  i = 0
  j = 0
  while i+j < len(L):
    if j == len(L2) or (i < len(L1) and L1[i] < L2[j]):
       L[i+j] = L1[i]
       i += 1
    else:
       L[i+j] = L2[j]
       j += 1

#main function
def merge_sort(L):

  n = len(L)
  if n < 2:
     return # list is already sorted

  # divide
  mid = n // 2 #midpoint
  L1 = L[0:mid] # the first half
  L2 = L[mid:n] # the second half
 
  # conquer with recursion
  merge_sort(L1) # sort first sub-list
  merge_sort(L2) # sort second sub-list

  # merge result
  merge(L1, L2, L) 

#example
L = [7, 5, 1, 4, 2, 8, 0, 9, 3, 6]
print('Before: ', L)
merge_sort(L)
print('After: ', L)
def insertion_sort(lst):
   for i in range(1, len(lst)):
      j = i
      while (j > 0) and lst[j-1] > lst[j]:
         lst[j-1], lst[j] = lst[j], lst[j-1] #swap the elements
         j -=1

#sort a list
L = [5, 2, 9, 3, 6, 1, 0, 7, 4, 8]
insertion_sort(L)
print("The sorted list is: ", L)
#descending selection sort
def selection_sort(lst):
  for i in range(len(lst)):
    smallest = i

    for j in range(i + 1, len(lst)):
      if lst[j] > lst[smallest]:
        smallest = j

    lst[i], lst[smallest] = lst[smallest], lst[i] #swap the elements

#sort a list
L = [99, 9, 0, 2, 1, 0, 1, 100, -2, 8, 7, 4, 3, 2]
selection_sort(L)

print("The sorted list is: ", L)
#Optimized bubble sort
def bubble_sort(lst):

  swapped = True
  while swapped == True:
    swapped = False
    for j in range(len(lst)-1):  
      
      if(lst[j]>lst[j+1]):
        lst[j], lst[j + 1] = lst[j + 1], lst[j] #swap the elements
        swapped = True

#sort a list
L = [9, 0, 2, 1, 0, 1, 100, -2, 8, 7, 4, 3, 2]

bubble_sort(L)  
print("The sorted list is: ", L)
#import the random module
import random

#The list to shuffle
my_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

#shuffle the list in-place
random.shuffle(my_list)

print(my_list)
#import the random module
import random

#The sequence to sample
my_seq = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

#get a sample of 4 elements
print(random.sample(my_seq, 4))
myset = {1, 3, 5, 7, 9, 11}

#remove an arbitrary element
print(myset.pop())
print(myset.pop())
print(myset.pop())
print(myset.pop())

print(myset)
#unlike set.remove(), If the target element does not exist, set.discard method does not raise a KeyError, it returns None instead.

myset = {1, 3, 4, 5, 7, 8, 9}

#remove elements
myset.discard(4)
myset.discard(8)

print(myset)
myset = {'Python', 'Java', 'C++'}

#add elements to the set
myset.add('PHP')
myset.add('HTML')
myset.add('Javascript')

print(myset)
import functools

def power(a, b):
    return a ** b

square = functools.partial(power, b = 2)
cube = functools.partial(power, b = 3)

print(square(5))
print(cube(5))
#finally block always gets executed
#else block is only executed if no exception was raised

import math

try:
   print("Hello, World!")
   print(10/ 5)
   print(math.sqrt(-9))

except ValueError:
   print("a ValueError has occurred")

except IndexError:
    print("IndexError has occurred")

else:
    print("No Exception was raised.")
finally:
    print("This block always gets executed")
#The else block only gets executed if the try block terminates successfully i.e  no exception was raised inside the block.


import math

try:
   print("Hello, World!")
   print(10/ 2)
   print(math.sqrt(9))

except ValueError:
   print("a valu error has occurred")

except IndexError:
    print("IndexError has occurred")

else:
    print("No Exception was raised.")
import math

try:
   
   #This will raise a TypeError
   print( 3 + 'hello' )

   #This will raise a ValueError
   math.sqrt(-16)

   #This will raise a NameError
   print(a)
   
   #This will raise a zeroDivisionError
   print(10 / 0)

except NameError:
    print("A NameError occurred")

except ValueError: 
    print("A ValueError occurred")

except ZeroDivisionError:
    print("A ZeroDivisionError occurred")

except TypeError:
    print("A TypeError occurred")
try:
   
    print(1 / 0)

except ZeroDivisionError:
    print("You divided a number by 0.")
#a list of strings
languages = ['Python', 'Java']

#append an element
languages.append('PHP')
print(languages)

languages.append('C++')
print(languages)

languages.append('Javascript')
print(languages)
def add(a, b):
    print(f'{a} + {b} = {a + b}')

a = 10
b = 20
add(a, b)
#itertools.count()- creates an iterator which yields infinite sequence of integers from a starting point 

from itertools import count

seq = count(0, 2) #an infinite sequence of even numbers

print(next(seq))
print(next(seq))
print(next(seq))
print(next(seq))
print(next(seq))
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

def is_odd(num):
   return num % 2 == 1

odds = filter(is_odd, data)

print(*odds)
from itertools import repeat

#repeat a string 10 times
r = repeat('Spam', 10)
print(r)

for i in r:
    print(i)
import pause

def test():
    print('done')

def run_after(func, amount_of_time):
    pause.milliseconds(amount_of_time)
    func()

run_after(test, 10000)
from textblob import TextBlob

text = 'how are you'

blob = TextBlob(text)

if blob.polarity > 0:
    print('Positive')
elif blob.polarity < 0:
    print('Negative')
else:
    print('Neutral')
users_db = [
    {'id': '2313', 'coins': 50},
    {'id': '8213', 'coins': 20},
    {'id': '9989', 'coins': 0},
    {'id': '4654', 'coins': 1},
    {'id': '7897', 'coins': 3},
    {'id': '9898', 'coins': 60}
]

users_db.sort(key = lambda user: user['coins'])
print(users_db)
users_db = [
    {'id': 'ax123', 'money': 5},
    {'id': 'z5541', 'money': 0}
]

def get_user_by_id(user_id):
    for user in users_db:
        if user['id'] == user_id:
            return user
        
def send_money(from_id, to_id, amount):
    from_user_a = get_user_by_id(from_id)
    to_user_b = get_user_by_id(to_id)

    if from_user_a and to_user_b:
        if from_user_a['money'] >= amount:
            from_user_a['money'] -= amount
            to_user_b['money'] += amount
        else:
            print('the amount of money is large than your balance')
        
    

send_money('ax123', 'z5541', 2)
send_money('ax123', 'z5541', 2)
print(users_db)
from faker import Faker
from random import randint, choice

f = Faker()
payment_methods = ['paypal', 'bitcoin']

for _ in range(20):
    money = randint(1, 100) * 0.99
    pyment_method = choice(payment_methods)
    user_email = "*****" + f.email()[6:]
    print(f'{money}$ --- {user_email} --- {pyment_method}')
#Make directory in files, then right-click the directory and select "Open in Terminal"
#All following "sudo" command can be ignored by obtaining root permission from the opened terminal
sudo apt -y install git
sudo apt  -y install tpm2-tools

#Create script and run it in the current directory for software dependencies:
sudo apt -y install \
autoconf-archive \
libcmocka0 \
libcmocka-dev \
procps \
iproute2 \
build-essential \
git \
pkg-config \
gcc \
libtool \
automake \
libssl-dev \
uthash-dev \
autoconf \
doxygen \
libjson-c-dev \
libini-config-dev \
libcurl4-openssl-dev \
uuid-dev \
libltdl-dev \
libusb-1.0-0-dev \
libftdi-dev

#Copy & Paste these next commands in the terminal tab you are currently working in:

#Getting source, compiling and installing tpm2-tss:
git clone https://github.com/tpm2-software/tpm2-tss.git
cd tpm2-tss
./bootstrap
./configure  --with-udevrulesdir=/etc/udev/rules.d/
make -j`nproc`
sudo make install
sudo ldconfig
sudo udevadm control --reload-rules && sudo udevadm trigger
sudo pkill -HUP dbus-daemon
cd ..

#Install pre-requisties for tpm2-abrmd:
sudo apt -y install libglib2.0-dev

Getting Source, compiling and install tpm2-abrmd:
git clone https://github.com/tpm2-software/tpm2-abrmd.git
cd tpm2-abrmd
./bootstrap
./configure --with-dbuspolicydir=/etc/dbus-1/system.d
make -j`nproc`
sudo make install
sudo ldconfig
cd ..

#Source, compiling and installing tpm2-tools:
git clone https://github.com/tpm2-software/tpm2-tools.git
cd tpm2-tools
./bootstrap
./configure
make -j`nproc`
sudo make install
sudo ldconfig

#Should be able to use TPM device to generate a random number:
sudo tpm2_getrandom --hex 8
# Create a virtual environment
python -m venv venv  # Windows
python3 -m venv venv  # MacOS/Linux

# Activate the virtual environment
venv\Scripts\activate  # Windows
source venv/bin/activate  # MacOS/Linux

# Install required packages
pip install kivy

# Create requirements.txt file
pip freeze > requirements.txt

# Run a test script
python test.py

--------------------------
in case of an POLICY error -- 
PS D:\Coding Stuff, Editing\Visual Studio Python Codings\MyProjects\BurgerBillingSystem> .\burger-billing-system-env\Scripts\activate     
.\burger-billing-system-env\Scripts\activate : File D:\Coding Stuff, Editing\Visual Studio Python 
Codings\MyProjects\BurgerBillingSystem\burger-billing-system-env\Scripts\Activate.ps1 cannot be loaded because running scripts is disabled on this system.     
For more information, see about_Execution_Policies at https:/go.microsoft.com/fwlink/?LinkID=135170.
At line:1 char:1
+ .\burger-billing-system-env\Scripts\activate
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    + CategoryInfo          : SecurityError: (:) [], PSSecurityException
    + FullyQualifiedErrorId : UnauthorizedAccess

The error message you are encountering indicates that running scripts is disabled on your systems PowerShell execution policy. This is a security measure to prevent malicious scripts from running.

Here is how to fix this and activate your virtual environment:

Change the Execution Policy (For Administrators Only):

Open PowerShell as Administrator. "Right-click on the PowerShell" icon and select "Run as administrator".

Type the following command and press Enter:
"PowerShell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser"

Activate the Virtual Environment (After Policy Change):
Navigate to your project directory using the cd command in PowerShell.
Run the activation command again:

---------------------------------
For Anaconda Jupyter Notebook -
pip install venv
python -m venv env_test
.\env_test\Scripts\activate
pip install ipykernel
python -m ipykernel install --user --name kernel_ai_cnn_1
# Quotes API
from fastapi import FastAPI, Request
from slowapi.util import get_ipaddr
from slowapi.errors import RateLimitExceeded
from slowapi import Limiter, _rate_limit_exceeded_handler

import json

def get_quotes():
    return json.load(open('quotes.json', 'r'))

app = FastAPI()
limiter = Limiter(key_func = get_ipaddr)
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)

@app.get('/')
@limiter.limit('10/minute')
def home(request: Request):
    return {'msg': 'Hello, World'}


@app.get('/quotes')
@limiter.limit('10/minute')
def quote(request: Request):
    return get_quotes()
optimizer = tf.keras.optimizers.Adam()

# Define your loss function (e.g., Mean Squared Error)
mse_loss_fn = tf.keras.losses.MeanSquaredError()

# Compile the model
cvae_model.compile(optimizer=optimizer, loss=mse_loss_fn)

# Train the model
cvae_model.fit([data, clusters], data, epochs=10, batch_size=32)
from fastapi import FastAPI


db = [
 {'api_key': '437af89f-38ba-44f1-9eda-924f370193d4',
  'tokens': 3,
  'user_id': '6ddf9779-4be7-449c-a70d-51e81c19dd0b'},
 {'api_key': '48762b59-e968-459e-b28d-50e7a1ad37a4', 
  'tokens': 3,
  'user_id': 'daf36850-68ab-40fc-86af-e6093b6797dd'},
 {'api_key': 'dabbd875-acbd-4b98-a47f-a526075d41b4', 
  'tokens': 3,
  'user_id': '5750957f-a246-4290-a35e-2dec83bcfcea'},
 {'api_key': '604d5fe2-f7ce-4560-8137-eeae32091738',
  'tokens': 3,
  'user_id': '2d4ac462-b118-48d7-897e-163c2e8327aa'},
 {'api_key': '8540cbef-de3e-4cbd-83bc-f66297a7f407',
  'tokens': 3,
  'user_id': 'bb435e7e-b5da-4a8c-b860-e43a8e765f67'}
]



app = FastAPI()

def update_tokens_for_user(user_id, api_key):
    for user in db:
        if user['user_id'] == user_id and user['api_key'] == api_key:
            tokens = user['tokens']
            if tokens > 0:
                tokens = tokens - 1
                user['tokens'] = tokens

                return {'tokens': tokens}
            
            else:
                return {'tokens': tokens, 'msg': 'you need to get more tokens'}


@app.get('/test')
def test(user_id : str, api_key: str):
    result = update_tokens_for_user(user_id, api_key)
    if result:
        return result
    else:
        return {'msg': 'api key or user id is not found'}
import pprint

data = {'email': 'test@test.com', 'password': '123'}

print(data, type(data))

# convert dict to str
result = pprint.pformat(data)

print(result, type(result))
from peewee import Model, SqliteDatabase, CharField, IntegerField, UUIDField
from uuid import uuid4
from faker import Faker
from random import randint

fake = Faker()

db = SqliteDatabase('mydb.db')

class User(Model):
    name = CharField(max_length = 25)
    email = CharField(max_length = 25)
    age = IntegerField()
    password = CharField(max_length = 100)
    user_id = UUIDField(primary_key = True, default = uuid4)

    class Meta:
        database = db


db.connect()
db.create_tables([User])

for i in range(20):
    new_user = User.create(**{
        'name': fake.name(),
        'email': fake.email(),
        'age': randint(12, 45),
        'password': fake.password()
    })

    new_user.save()

db.commit()
# Scanning the directory using os python function
img_dirs = []
for entry in os.scandir(path_to_data):
    if entry.is_dir():
        img_dirs.append(entry.path)
# Evaluating the best model on the test set
best_model = grid_search.best_estimator_
test_score = best_model.score(x_test, y_test)
print('Test set accuracy:', test_score)
pipe = make_pipeline(MinMaxScaler(), PCA(n_components=100), SVC())
param_grid = {
    'svc__kernel': ['rbf', 'linear'],
    'svc__C': [1, 10],
    'svc__gamma': [0.01, 0.1]
}
# Training the initial SVC model
pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC(kernel = 'rbf', C = 10))])
pipe.fit(x_train, y_train)
pipe.score(x_test, y_test)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state = 0)
# Creating a stacked version of the image - original and distorted one
x, y = [], []
for disease_name, training_files in oral_disease_file_names_dict.items():
    for training_image in training_files:
        img = cv2.imread(training_image)
        scaled_orig_img = cv2.resize(img, (40, 30))
        img_har = w2d(img, 'db1', 5)
        scaled_img_har = cv2.resize(img_har, (40, 30))
        stacked_img = np.vstack((scaled_orig_img.reshape(40*30*3,1),scaled_orig_img.reshape(40*30*3,1)))
        x.append(stacked_img)
        y.append(class_dict[disease_name])
# Transforming the image to improve detection
def w2d(img, mode='haar', level=1):
    imArray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    coeffs = pywt.wavedec2(imArray, mode, level=level)
    coeffs[0] *= 0
    imArray_H = pywt.waverec2(coeffs, mode)
    imArray_H = np.clip(imArray_H, 0, 255)
    imArray_H = np.uint8(imArray_H)
    return imArray_H
# Creating the labels based on directories
class_dict = {}
count = 0
for disease_name in oral_disease_file_names_dict.keys():
    class_dict[disease_name] = count
    count = count + 1
class_dict
# Collecting file directories into a dictionary
oral_disease_file_names_dict = {}

for img_dir in img_dirs:
    disease_name = img_dir.split('/')[-1]
    oral_disease_file_names_dict[disease_name] = []
    for root, dirs, files in os.walk(img_dir):
        for file in files:
            if file.endswith(('.jpg', '.jpeg', '.png', '.bmp', '.gif')): 
                file_path = os.path.join(root, file)
                oral_disease_file_names_dict[disease_name].append(file_path)
# Scanning the directory using os python function to collect
img_dirs = []
for entry in os.scandir(path_to_data):
    if entry.is_dir():
        img_dirs.append(entry.path)
# Loading the smile cascade classifier
smile_cascade = cv2.CascadeClassifier('kaggle/input/haar-cascades-for-face-detection/haarcascade_smile.xml')
smile = smile_cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (30, 30))
for (x, y, w, h) in smile:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
output_path = 'kaggle/working/image_with_teeth.jpg'
cv2.imwrite(output_path, img)
print('Image with detected teeth saved to:', output_path)
img = cv2.imread('kaggle/input/oral-diseases/Calculus/Calculus/(1).jpg')
img.shape
plt.imshow(img)
# Cleaning the index values and aggregating the counts of ingredients

top_ingredient_counts = {}

for ingredient, count in top_ingredient_series_sorted.items():
    top_ingredient_cleaned = ingredient.replace('[', '').replace(']', '').replace("'", "")
    if top_ingredient_cleaned in top_ingredient_counts:
        top_ingredient_counts[top_ingredient_cleaned] += count
    else:
        top_ingredient_counts[top_ingredient_cleaned] = count
top_ingredient_series_cleaned = pd.Series(top_ingredient_counts, dtype = int)
print(top_ingredient_series_cleaned)
# Iterating over rows and counting the occurrences of ingredients listed in the 'TopNotes' column 
top_ingredient_counts = {}

for index, row in df.iterrows():
    top_notes_list = row['TopNotes']
    if isinstance(top_notes_list, list):
        for ingredient in top_notes_list:
            top_ingredient_counts[ingredient] = top_ingredient_counts.get(ingredient, 0) + 1
    elif isinstance(top_notes_list, str):
        ingredients = [x.strip() for x in top_notes_list.split(',')]
        for ingredient in ingredients:
            top_ingredient_counts[ingredient] = top_ingredient_counts.get(ingredient, 0) + 1
top_ingredient_series = pd.Series(top_ingredient_counts, dtype=int)
print(top_ingredient_series)
# Saving the data to the CSV file

with open(csv_file, 'w', newline='', encoding='utf-8') as csvfile:
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()
    for data in all_perfume_data:
        writer.writerow(data)

print('Data saved to:', csv_file)
# Collecting the data from URLs we have gathered

all_perfume_data = []

for link in perfume_links:
    print('Scraping link:', link)
    try:
        perfume_data_new = scrape_perfume_data(link)
        all_perfume_data.append(perfume_data_new)
        print(f'Data scraped successfully for {perfume_data_new["PerfumeName"]}')
    except Exception as e:
        print(f'Error scraping data for {link}: {e}')
# Collecting the urls we want to scrape

perfume_links = scrape_all_pages('https://www.parfumo.com/Recently_added?current_page=', 50)
print(perfume_links)
# This method should parse through every page within the new release directory (theres a total of 50 of them)

def scrape_all_pages(base_url, total_pages):
    all_links = []
    base_url = 'https://www.parfumo.com/Recently_added?current_page='
    end_url = '&'
    total_pages = 50

    for page_number in range(1, total_pages + 1):
        page_url = f"{base_url}{page_number}{end_url}"
        try:
            links_on_page = scrape_perfume_links(page_url)
            all_links.extend(links_on_page)
            print(f"Scraped links from page {page_number}")
        except requests.HTTPError as e:
            print(f"Error scraping page {page_number}: {e}")
        time.sleep(1)
    return all_links
# Defining a method for collecting the urls we want to scrape
def scrape_perfume_links(url):
    headers = {
        "User-Agent": "Insert user agent",
        "Referer": "https://www.parfumo.com/",
        "Accept-Language": "en-US,en;q=0.9"
    }
    session = requests.Session()
    page = session.get(url, headers=headers)
    page.raise_for_status()
    soup3 = BeautifulSoup(page.content, 'html.parser')
    
    perfumes = soup3.find_all('div', class_='name')
    perfume_links = []
    for perfume in perfumes:
        link = perfume.find('a')['href']
        perfume_links.append(link)

    return perfume_links
# Defining a method for scraping the web page
def scrape_perfume_data(url):
    headers = {
        "User-Agent": "Insert user agent"
    }
    page = requests.get(url, headers=headers)
    page.raise_for_status()
    soup = BeautifulSoup(page.content, 'html.parser')
    
    perfume_name = brand_name = release_year = overall_rating = rating_count = perfumer = scent_rating = longevity_rating = sillage_rating = bottle_rating = value_rating = scraping_date = None
    
    try:
        perfume_name = soup.find('h1', class_='p_name_h1', itemprop='name').get_text().strip().split('\n')[0].strip()
    except AttributeError:
        pass
    
    try:
        brand_span = soup.find('span', itemprop='brand')
        brand_name = brand_span.find('span', itemprop='name').get_text().strip()
    except AttributeError:
        pass
    
    try:
        year = soup.find('a', href=lambda href: href and 'Release_Years' in href)
        release_year = year.get_text().strip()
    except AttributeError:
        pass
    
    try:
        overall_rating = soup.find('span', itemprop='ratingValue').get_text().strip()
    except AttributeError:
        pass
    
    try:
        rating_count = soup.find('span', itemprop='ratingCount').get_text().strip()
    except AttributeError:
        pass
    
    try:
        perfumer = soup.find('div', {'class': 'w-100 mt-0-5 mb-3'}).get_text().strip()
    except AttributeError:
        pass
    
    try:
        top_notes = soup.find('div', class_='pyramid_block nb_t w-100 mt-2')
        top_note_list = [span.get_text(strip=True) for span in top_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
    
    try:
        heart_notes = soup.find('div', class_='pyramid_block nb_m w-100 mt-2')
        heart_note_list = [span.get_text(strip=True) for span in heart_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
    
    try:
        base_notes = soup.find('div', class_='pyramid_block nb_b w-100 mt-2')
        base_note_list = [span.get_text(strip=True) for span in base_notes.find_all('span', class_='clickable_note_img')]
    except AttributeError:
        pass
   
    
    scraping_date = datetime.date.today()
    
    return {
        'PerfumeName': perfume_name, 
        'Brand': brand_name, 
        'ReleaseYear': release_year, 
        'OverallRating': overall_rating, 
        'RatingCount': rating_count, 
        'Perfumer': perfumer, 
        'TopNotes': top_note_list if 'top_note_list' in locals() else None, 
        'HeartNotes': heart_note_list if 'heart_note_list' in locals() else None, 
        'BaseNotes': base_note_list if 'base_note_list' in locals() else None, 
    }
# Showcasing the data for cluster 0

cluster_0_df = df_trimmed[df_trimmed['ClustersK'] == 0]

variable_names = [col for col in cluster_0_df.columns if col != 'ClustersK']
colors = ['#2e2237']
n_variables = len(variable_names)
n_rows = (n_variables - 1) // 5 + 1
fig, axes = plt.subplots(n_rows, 5, figsize=(15, 3 * n_rows), squeeze=False)

for i, variable in enumerate(variable_names):
    row = i // 5
    col = i % 5
    ax = axes[row, col]
    cluster_0_df[variable].plot.hist(ax=ax, bins=20, color=colors)
    ax.set_title(f'Distribution of {variable}')
    ax.set_xlabel(variable)
    ax.set_ylabel('Frequency')

for i in range(n_variables, n_rows * 5):
    fig.delaxes(axes.flatten()[i])
plt.tight_layout()
plt.show()
# Defining the number of clusters (K)
num_clusters = 4

# Initialising and fitting the KMeans model
kmeans = KMeans(n_clusters = num_clusters, n_init = 10)
cluster_labels = kmeans.fit_predict(dim_ds)
df_trimmed['ClustersK'] = cluster_labels
# Initiating PCA to reduce dimensions aka features to 3 and transforming the data
dim_ds = pd.DataFrame(PCA(n_components = 3).fit_transform(df_scaled), columns = ['column1', 'column2', 'column3'])
dim_ds.describe().T
# Applying z-score normalisation
scaler = StandardScaler()

# Fitting the scaler to the data and transforming it
df_scaled = pd.DataFrame(scaler.fit_transform(df_trimmed), columns = df_trimmed.columns)
# Defining a spend column based on individual categories
df['TotalSpend'] = df['Wines'] + df['Fruits'] + df['Meat'] + df['Fish'] + df['Sweets'] + df['Gold']
# Fleshing out the data around parenthood
df['N_minors_home'] = df['Kidhome'] + df['Teenhome']
df['Parent'] = np.where(df.N_minors_home> 0, 1, 0)
# Cleaning the Education column
df['Education'] = df['Education'].replace({'Basic':'Sec school',
                                           '2n Cycle':'Masters', 
                                           'Graduation':'Bachelors', 
                                           'Master':'Masters', 
                                           'PhD':'Doctorate'})
# Cleaning the Marital status column
df['Marital_Status'] = df['Marital_Status'].replace({'Alone':'Single','Widow':'Widowed', 'Together':'Dating'})
df = df[~df['Marital_Status'].isin(['YOLO', 'Absurd'])]
# Exploring the unique values in the Marital status column
df['Marital_Status'].unique()
# Removing outliers
df = df[df['Income'] <= 200000]
df = df.dropna(subset=['Income'])
# Exploring the outliers for age 
customers_over_100 = df[df['Age'] > 100]
num_customers_over_100 = len(customers_over_100)

print('N of customers over the age of 100:', num_customers_over_100)
# Creating an age column based on the year of birth
current_year = dt.datetime.now().year
df['Age'] = current_year - df['Year_Birth']
myLists = [
    [1, 2, 3],
    [4, 5, 6],
    [7, 8, 9]
]

def to_one_list(myLists):
    myList = []
    for _list in myLists:
        for l in _list:
            myList.append(l)

    return myList

print(to_one_list(myLists = myLists))
# Grouping data by a column and calculating aggregate functions
grouped_df = df.groupBy('column').agg({'column2': 'sum', 'column3': 'avg'})

# Applying aggregate functions to entire DataFrame
agg_df = df.agg({'column1': 'max', 'column2': 'min'})
<p>TopHomeworkHelper.com offers comprehensive programming help for students, covering various programming languages like Java, Python, C++, and more. Services include assignment help, coding assistance, and debugging support. With 24/7 availability and experienced tutors, the platform ensures timely and reliable solutions for all programming-related academic needs.<br /><br />Source:&nbsp;<a href="https://tophomeworkhelper.com/programming-help.html" target="_blank" rel="noopener">Programming Homework Help</a></p>
<p>&nbsp;</p>
from random import randint
from PIL import Image
from os import listdir, path, mkdir
from uuid import uuid4
import moviepy.editor as mp

def generate_video(folder):
    if not path.exists(folder):
        mkdir(folder)

    # generate video frames
    for i in range(20):
        img = Image.new(
            mode = 'RGB',
            size = (500, 250),
            color = (0, 0, 0)
        )

        for x in range(img.size[0]):
            for y in range(img.size[1]):
                r, g, b = randint(0, 255), randint(0, 255), randint(0, 255)
                img.putpixel((x, y), value = (r, g, b))

        img.save(f'{folder}/{i}.png')
    
    # create video
    images = [f'{folder}/{img}' for img in listdir(folder)]
    clips = [mp.ImageClip(img).set_duration(0.08) for img in images]
    video = mp.concatenate_videoclips(clips, method = 'compose')
    video.to_videofile(f'{str(uuid4())}.mp4', fps = 24)


generate_video('test')
from uuid import uuid4
import random

extintions = ['zip', 'exe', 'txt', 'py', 'mp4', 'mp3', 'png']

def generate_files(files_len, folder):
    for i in range(files_len):
        file_name = f'{folder}/{str(uuid4())}.{random.choice(extintions)}'
        file_size = (1024 * 1024) * random.randint(1, 10)
        file = open(file_name, 'wb')
        file.write(b'\b' * file_size)
        file.close()

generate_files(20, 'test')
from fastapi import FastAPI, Request
from slowapi.util import get_remote_address
import uvicorn


app = FastAPI()

@app.get('/')
def home(req : Request):
    return {'ip': get_remote_address(request=req)}
n=int(input('total number of items in list'))
L=[]
for i in range(n):
    item=int(input('enter the item'))
    L.append(item)
n=int(input('total number of items in list'))
L=[]
for i in range(n):
    item=int(input('enter the item'))
    L.append(item)



for i in range(1,n):
    k=L[i]
    j=i-1
    while j>=0 and k<L[j]:
        L[j+1]=L[j]
        j=j-1
    else:
        L[j+1]=k
print('sorted list:\n',L)

import vertexai

from vertexai.generative_models import GenerativeModel, Part

# TODO(developer): Update and un-comment below line
# project_id = "PROJECT_ID"

vertexai.init(project=project_id, location="us-central1")

model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")

response = model.generate_content(
    [
        Part.from_uri(
            "gs://cloud-samples-data/generative-ai/image/scones.jpg",
            mime_type="image/jpeg",
        ),
        "What is shown in this image?",
    ]
)

print(response.text)
from peewee import SqliteDatabase, Model, UUIDField, CharField, IntegerField
from uuid import uuid4

db = SqliteDatabase('mydb.db')

# Create user Model
class User(Model):
    userId = UUIDField(primary_key = True, default = uuid4)
    name = CharField(max_length = 10)
    age = IntegerField()

    class Meta:
        database = db

# Connect to db
db.connect()
db.create_tables([User])
db.commit()

# Add new user to database
user = User.create(name = 'user123', age = 20)
user.save()

# Get all users
for user in User.select():
    print(user.name, user.userId)
from selenium import webdriver
from selenium.webdriver.common.by import By
from dotenv import load_dotenv

# https://pypi.org/project/2captcha-python/
from twocaptcha import TwoCaptcha


import time
import sys
import os

# https://github.com/2captcha/2captcha-python

sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))


url = 'https://accounts.hcaptcha.com/demo'

driver = webdriver.Chrome()

driver.get(url=url)

time.sleep(2)

site_key = driver.find_element(
    by = By.XPATH, 
    value = '//*[@id="hcaptcha-demo"]').get_attribute('data-sitekey')




load_dotenv()

# create account in 2captcha from here : https://bit.ly/3MkkuPJ
# make deposit at least 3$
# https://2captcha.com/pay

# create env file or you can put your API key direct in TwoCaptcha function


api_key = os.getenv('APIKEY_2CAPTCHA')


api_key = os.getenv('APIKEY_2CAPTCHA', 'YOUR_API_KEY')

solver = TwoCaptcha(api_key)

try:
    result = solver.hcaptcha(
        sitekey=site_key,
        url=url,
    )

    code = result['code']
    print(code)

    

    driver.execute_script(f"document.getElementsByName('h-captcha-response')[0].innerHTML = '{code}'")
    
    # submit
    driver.find_element(by = By.ID, value = 'hcaptcha-demo-submit').click()
    

except Exception as e:
    sys.exit(e)



input()
# Create account from here : http://adfoc.us/831311103357835
# Go to https://adfoc.us/tools/api 
# http://adfoc.us/api/?key={}&url={}


import requests, os
from dotenv import load_dotenv

load_dotenv()

url = ''
api_key = os.getenv('api_key')

endpoint = f'http://adfoc.us/api/?key={api_key}&url={url}'

res = requests.get(url = endpoint)
print(res.text)
from PIL import Image, ImageDraw, ImageOps

def convert_image_to_circle(image_path):
    my_image = Image.open(image_path)
    # create mask 
    mask = Image.new('L', my_image.size)
    draw = ImageDraw.Draw(mask)
    # draw white circle
    draw.ellipse((0, 0) + mask.size, fill = 255)

    # create output image
    output = ImageOps.fit(my_image, mask.size, centering=(0.5, 0.5))
    output.putalpha(mask)
    output.save('out.png')



convert_image_to_circle('test.png')
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import os

# Define a list of known malware signatures
malware_signatures = [
    "malware_signature_1",
    "malware_signature_2",
    # Add more signatures as needed
]

def scan_file(file_path):
    """
    Scan a file for known malware signatures.
    """
    with open(file_path, "rb") as file:
        content = file.read()
        for signature in malware_signatures:
            if signature.encode() in content:
                return True
    return False

def scan_directory(directory):
    """
    Recursively scan a directory for files containing malware signatures.
    """
    malware_files = []
    for root, dirs, files in os.walk(directory):
        for file_name in files:
            file_path = os.path.join(root, file_name)
            if scan_file(file_path):
                malware_files.append(file_path)
    return malware_files

if __name__ == "__main__":
    # Directory to scan
    target_directory = "/path/to/directory"

    # Scan the directory for malware
    malware_files = scan_directory(target_directory)

    if malware_files:
        print("Malware detected in the following files:")
        for file_path in malware_files:
            print(file_path)
    else:
        print("No malware detected.")
import mido
import random

def generate_notes(notes_len):
    notes = []
    for i in range(notes_len):
        notes.append(random.randint(0, 127))

    return notes



def create_midi_file(notes):
    output_midi = mido.MidiFile()
    # create a track 
    track = mido.MidiTrack()
    # add track
    output_midi.tracks.append(track)

    # write notes
    for note in notes:
        track.append(mido.Message('note_on', note = note, velocity=64, time = 120))
        track.append(mido.Message('note_off', note = note, velocity=64, time = 120))

    # write midi file
    output_midi.save('out.mid')

notes = generate_notes(100)
create_midi_file(notes)
import tkinter as tk


root = tk.Tk()
root.attributes('-fullscreen', True)

# this is very important
def close_fullscreen():
    root.attributes('-fullscreen', False)

# add bt to close fullscreen mode this is very important
close_bt = tk.Button(root, text='Close full screen mode', command=close_fullscreen)
close_bt.pack(fill=tk.BOTH)

root.mainloop()
# Enable gzip compression for text-based files
http {
   gzip on;
   gzip_types text/plain text/css text/javascript;
}
def factorial(n):
    if n == 0 or n == 1:
        return 1
    else:
        return n * factorial(n-1)
Write a function named only_ints that takes two parameters. Your function should return True if both parameters are integers, and False otherwise.

For example, calling only_ints(1, 2) should return True, while calling only_ints("a", 1) should return False.
ChatGPT
ChatGPT
Here's a Python function named only_ints that checks if both parameters are integers:

python
Copy code
def only_ints(param1, param2):
    return isinstance(param1, int) and isinstance(param2, int)

# Example usage:
print(only_ints(1, 2))   # Output: True
print(only_ints("a", 1))  # Output: False

Copy and Save

Share

Ask Copilot

This function uses the isinstance() function to check if both parameters are of type int. If both are integers, it returns True; otherwise, it returns False.
engine = db.create_engine('sqlite:///datacamp.sqlite')
conn = engine.connect()
metadata = db.MetaData()

Student = db.Table('Student', metadata,
              db.Column('Id', db.Integer(),primary_key=True),
              db.Column('Name', db.String(255), nullable=False),
              db.Column('Major', db.String(255), default="Math"),
              db.Column('Pass', db.Boolean(), default=True)
              )

metadata.create_all(engine)
from sqlalchemy import create_engine
engine = create_engine("sqlite+pysqlite:///:memory:", echo=True)

connection = engine.connect()
print("Connected to the database successfully")
connection.close()
pip install SQLAlchemy
0 0.961847 0.151807 0.945783 0.151807 0.929719 0.166265 0.923695 0.180723 0.923695 0.207229 0.903614 0.243373 0.853414 0.26506 0.833333 0.279518 0.799197 0.289157 0.787149 0.289157 0.783133 0.284337 0.781124 0.255422 0.7751 0.243373 0.773092 0.166265 0.76506 0.151807 0.746988 0.159036 0.692771 0.207229 0.668675 0.209639 0.654618 0.216867 0.620482 0.216867 0.592369 0.207229 0.574297 0.207229 0.568273 0.214458 0.568273 0.240964 0.580321 0.26747 0.580321 0.279518 0.568273 0.296386 0.554217 0.344578 0.53012 0.383133 0.502008 0.414458 0.447791 0.513253 0.413655 0.595181 0.385542 0.689157 0.373494 0.780723 0.383534 0.807229 0.381526 0.850602 0.395582 0.872289 0.437751 0.891566 0.475904 0.901205 0.516064 0.903614 0.556225 0.915663 0.588353 0.908434 0.594378 0.889157 0.586345 0.860241 0.616466 0.824096 0.630522 0.79759 0.648594 0.742169 0.648594 0.689157 0.65261 0.684337 0.662651 0.693976 0.668675 0.739759 0.676707 0.761446 0.686747 0.773494 0.712851 0.778313 0.722892 0.766265 0.728916 0.744578 0.722892 0.66747 0.708835 0.631325 0.680723 0.60241 0.688755 0.566265 0.710843 0.53494 0.730924 0.46506 0.746988 0.440964 0.787149 0.404819 0.821285 0.392771 0.85743 0.366265 0.893574 0.325301 0.953815 0.26988 0.96988 0.209639 0.96988 0.168675
coco/
└── annotations/
    └── coco_annotations.json
coco_to_yolo.py
coco_converted/
├── images/
└── labels/
    └── coco_annotations/
        └── image1.txt
        └── image2.txt
python coco_to_yolo.py
.\venv\Scripts\activate.ps1
.\venv\Scripts\activate
python -m venv venv
cd path\to\your\project\directory
coco/
└── annotations/
    └── coco_annotations.json
coco_to_yolo.py
from ultralytics.data.converter import convert_coco
convert_coco(labels_dir='annotations', use_segments=True)
import serial
import struct
import time  # Added import for time module

# Define communication constants and machine information
BAUD_RATE = 38400
DATA_BITS = 8
STOP_BITS = 1
PARITY = serial.PARITY_NONE
NODE_ID = 0x01

# Define commands for motor control
SET_CONTROLWORD_CMD = 0x2B
SET_OPERATION_MODE_CMD = 0x2F
SET_TARGET_POSITION_CMD = 0x41
READ_STATUSWORD_CMD = 0x4B
SET_PROFILE_SPEED_CMD = 0x23

# Define functions to calculate checksum and send commands
def calculate_checksum(data):
    return (-sum(data) % 256) & 0xFF

def send_command(serial_port, cmd, index, sub_index, data=None):
    if data is None:
        data = b''
    packet = bytes([NODE_ID, cmd, index & 0xFF, index >> 8, sub_index, *data])
    checksum = calculate_checksum(packet)
    packet += bytes([checksum])
    serial_port.write(packet)
    response = serial_port.read(8)
    return response

# Motor control functions
def move_to_position(serial_port, position):
    response = send_command(serial_port, SET_TARGET_POSITION_CMD, 0x607A, 0x00, data=list(struct.pack('<i', position)))
    print(f"Response (Move to Position {position}):", response.hex())

def set_speed(serial_port, speed):
    speed_bytes = struct.pack('<i', speed)
    response = send_command(serial_port, SET_PROFILE_SPEED_CMD, 0x6081, 0x00, data=list(speed_bytes))
    print(f"Response (Set Speed {speed}):", response.hex())

def start_motion(serial_port):
    response = send_command(serial_port, SET_CONTROLWORD_CMD, 0x6040, 0x00, data=[0x0F, 0x00, 0x00, 0x00])  # Start motion
    print("Response (Start motion):", response.hex())

def stop_motion(serial_port):
    response = send_command(serial_port, SET_CONTROLWORD_CMD, 0x6040, 0x00, data=[0x06, 0x00, 0x00, 0x00])  # Stop motion
    print("Response (Stop motion):", response.hex())

def move_to_positions(serial_port, positions):
    for pos in positions:
        move_to_position(serial_port, pos)
        time.sleep(1)  # Added delay to allow time for motion
        set_speed(serial_port, 200)  # Set speed to 200 RPM
        start_motion(serial_port)
        time.sleep(5)  # Added delay to simulate motion time
        stop_motion(serial_port)

# Main function
def main():
    serial_port = serial.Serial('COM3', baudrate=BAUD_RATE, bytesize=DATA_BITS, stopbits=STOP_BITS, parity=PARITY, timeout=1)

    try:
        positions = [5000001, 6000001, 7000001]
        move_to_positions(serial_port, positions)

    finally:
        serial_port.close()

if __name__ == "__main__":
    main()
class Solution(object):
  def lengthOfLongestSubstring(self, s):
    max_sub_length = 0
    start = 0
    s_length = len(s)
    
    for end in range(1, s_length):
      if s[end] in s[start:end]:
        start = s[start:end].index(s[end]) + 1 + start
      else:
        max_sub_length = max(max_sub_length, end - start + 1)
	return max_sub_length    
c3.Genai.UnstructuredQuery.Engine.REA.RetrieverConfig.make('materiality_retriever_config').getConfig().setConfigValue("numRetrievedPassages", 10)
import math


class Circle:
    def __init__(self, radius):
        self._radius = radius
        self._area = None

    @property
    def radius(self):
        return self._radius

    @radius.setter
    def radius(self, value):
        if value < 0:
            raise ValueError('Radius must be positive')

        if value != self._radius:
            self._radius = value
            self._area = None

    @property
    def area(self):
        if self._area is None:
            self._area = math.pi * self.radius ** 2

        return self._area
Code language: Python (python)
numbers = (*odd_numbers, *even_numbers)
print(numbers)
Code language: Python (python)
numbers = (*odd_numbers, *even_numbers)
print(numbers)
Code language: Python (python)
numbers = (*odd_numbers, *even_numbers)
print(numbers)
Code language: Python (python)
new_stocks = {symbol: price * 1.02 for (symbol, price) in stocks.items()}

Code language: Python (python)
cities = ['New York', 'Beijing', 'Cairo', 'Mumbai', 'Mexico']

for item in enumerate(cities):
    print(item)
Code language: Python (python)
list_of_dicts = [
    {'name': 'Alice', 'age': 30},
    {'name': 'Bob', 'age': 25},
    {'name': 'Charlie', 'age': 35}
]
person = next((item for item in list_of_dicts if item.get('name') == 'Bob'),  "person not found." )
print(f"Found: {person}")
class Celsius:
    def __init__(self, temperature=0):
        # This calls the setter method `temperature(self, value)` below 
        self.temperature = temperature

    def to_farenheit(self):
        # This calls the getter method (`temperature(self)`) method below
        return (self.temperature * 1.8) + 32

    @property
	def temperature(self):
        """
        Getter method for the `temperature` property.
        If you create an instance of this class and then refer to the
            `temperature` property of that instance then this method will be called.

        Example:
            t = Celsius(37)
            t.temperature  <-- this is where this getter method is called
        """
        print("Getting value...")
        return self._temperature

    @temperature.setter
    def temperature(self, value):
        """
        Setter method for the `temperature` property.
        If you create an instance of this class and then assign a value
            to the `temperature` property of that instance then this method will be called.

        Example:
            t = Celsius(37)  <-- this is where this setter method is called
            t.temperature  
        """
        print("Setting value...")
        if value < -273.15:
            raise ValueError("Temperature below -273 is not possible")
        self._temperature = value


human = Celsius(37)  # Setting value...
print(human.temperature)  # Getting value...
print(human.to_farenheit())  # Setting value...
coldest_thing = Celsius(-300)  # ValueError
class Dates:
    def __init__(self, date):
    	self.date = date

    def get_date(self):
    	return self.date

    @staticmethod
    def format_date_with_dashes(date):
      	# Replace / with -
    	return date.replace("/", "-")


slash_date = "10/20/2024"
# Calling format_date_with_dashes() like any other function, but preceded by it's class name
dash_date = Dates.format_date_with_dashes(slash_date)

print(dash_date)  # 10-20-2024
from datetime import date


# random Person
class Person:
    def __init__(self, name, age):
        self.name = name
        self.age = age

    @classmethod
    def from_birth_year(cls, name, birth_year):
    	# Since `cls` refers to the class itself (`Person`), calling this
        #	`classmethod` creates an instance of `Person`.
    	return cls(name, date.today().year - birth_year)

    def display(self):
    	print(f"{self.name}'s age is: {self.age}")


bob = Person("Bob", 25)
bob.display()  # Bob's age is: 25

alice = Person.from_birth_year("Alice", 1985)
alice.display()  # Alice's age is: 39
class Rectangle:
    def __init__(self, length, width):
    	self.length = length
		self.width = width

    def area(self):
    	return self.length * self.width


class Square(Rectangle):
    def __init__(self, length):
        # Call the __init__ method of the parent class (Rectangle)
        # super() refers to the parent class of this class
        # Actually using the __init__ method of `Rectangle` here
        #	which needs 2 parameters, `length` and `width`. Since
        #	a square is length==width then we use `length` twice.
    	super().__init__(length, length)


my_square = Square(5)
my_square.area()  # 25
from abc import ABC, abstractmethod


class Shape(ABC):
# Shape is an abstract base class

    @abstractmethod
    def area(self):
        pass


class Circle(Shape):
# Circle is a class that inherits from the abstract Shape class

    def __init__(self, radius):
    	self.radius = radius
	
    def area(self):
   	# As a child of an Abstract class, this child class MUST define
    #	it's own implementation of this abstractmethod
    	return 3.14 * self.radius ** 2


class Square(Shape):
# Square is a class that inherits from the abstract Shape class

    def __init__(self, side):
    	self.side = side

    def area(self):
    # As a child of an Abstract class, this child class MUST define
    #	it's own implementation of this abstractmethod
    	return self.side ** 2
Test automation code involves creating test cases, handling different locators, interacting with various elements, and implementing assertions to validate expected outcomes. I am sharing a general example using Python with the popular testing framework, Selenium, for web automation.

from selenium import webdriver
from selenium.webdriver.common.by import By

# Set up the WebDriver (assuming Chrome for this example)
driver = webdriver.Chrome(executable_path='path/to/chromedriver.exe')

# Navigate to a website
driver.get('https://www.example.com')

# Find an element using its ID and perform an action (e.g., click)
element = driver.find_element(By.ID, 'example-button')
element.click()

# Perform an assertion to verify the expected result
assert "Example Domain" in driver.title

# Close the browser window
driver.quit()

# Demonstrated Python Program
# to read file character by character
file = open('file.txt', 'r')
 
while 1:
     
    # read by character
    char = file.read(1)          
    if not char: 
        break
         
    print(char)
 
file.close()
d = {}
with open("file.txt") as f:
    for line in f:
       (key, val) = line.split()
       d[int(key)] = val
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import pandas as pd
import time

web = 'https://twitter.com/'
path = r"C:\Drivers\chromedriver-win64\chromedriver.exe"
options = webdriver.ChromeOptions()
service = Service(executable_path=path)
options.add_experimental_option("detach", True)
options.add_argument('window-size=1920x1080')

driver = webdriver.Chrome(service=service, options=options)
driver.get(web)  # Open the webpage

# Wait for the login button to be clickable
login_button = WebDriverWait(driver, 10).until(
    EC.element_to_be_clickable((By.XPATH, '//a[contains(@href, "/login")]'))
)
login_button.click()
time.sleep(2)

# Wait for the username input field to be visible and then enter username
user_name = WebDriverWait(driver, 10).until(
    EC.visibility_of_element_located((By.XPATH, '//input[contains(@autocomplete, "username")]'))
)
user_name.send_keys("mnis00014@gmail.com")

# Wait for the next button to be clickable and then click
next_button = WebDriverWait(driver, 10).until(
    EC.element_to_be_clickable((By.XPATH, '//div[contains(@role, "button")]//span[text()="Next"]'))
)
next_button.click()

time.sleep(2)

# Wait for the password input field to be visible and then enter password
password = WebDriverWait(driver, 10).until(
    EC.visibility_of_element_located((By.XPATH, '//input[contains(@autocomplete, "current-password")]'))
)
password.send_keys("Tw@mnis@2024")

# Wait for the login button to be clickable and then click
login_button = WebDriverWait(driver, 10).until(
    EC.element_to_be_clickable((By.XPATH, '//div[contains(@role, "button")]//span[text()="Log in"]'))
)
login_button.click()

# closing driver
# driver.quit()
def retry(operation, attempts):
  for n in range(attempts):
    if operation():
      print("Attempt " + str(n) + " succeeded")
      break
    else:
      print("Attempt " + str(n) + " failed")

retry(create_user, 3)
retry(stop_service, 5)
def factorial(n):
    result = 1
    for x in range(0,10):
        result = result * x
    return x

for n in range(0,10):
  print(n, n*factorial(n+1))
# Handle pagination with Selenium
# Scrape Website (www.audible.com)

from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import pandas as pd
import time

path = r"C:\Drivers\chromedriver-win64\chromedriver.exe"
website = "https://www.audible.com/search"

# Use the Service class to specify the path to chromedriver.exe
service = Service(executable_path=path)

# Use ChromeOptions for additional configurations
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)

# Initialize the WebDriver with the specified service and options
driver = webdriver.Chrome(service=service, options=options)

# Navigate to the specific website
driver.get(website)

# Wait for some time to ensure the page is loaded
time.sleep(5)

try:
    # Wait for the container to be present
    container = WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.CLASS_NAME, 'adbl-impression-container'))
    )

    # Wait for the products to be present within the container
    products = WebDriverWait(container, 10).until(
        EC.presence_of_all_elements_located((By.XPATH, './/li[contains(@class, "productListItem")]'))
    )

    book_title = []
    author_name = []
    run_time = []
    release_date = []

    for product in products:
        try:
            # Wait for the book title element to be present within each product
            book_title_elem = WebDriverWait(product, 5).until(
                EC.presence_of_element_located((By.XPATH, './/h3[contains(@class, "bc-heading")]'))
            )

            # Append book title
            book_title.append(book_title_elem.text)
            
            # Append author name
            author_name_elem = product.find_element(By.XPATH, './/li[contains(@class, "authorLabel")]')
            author_name.append(author_name_elem.text)

            # Append run time
            run_time_elem = product.find_element(By.XPATH, './/li[contains(@class, "runtimeLabel")]')
            run_time.append(run_time_elem.text)

            # Append release date
            release_date_elem = product.find_element(By.XPATH, './/li[contains(@class, "releaseDateLabel")]')
            release_date.append(release_date_elem.text)

        except TimeoutException:
            print("Timeout occurred while waiting for element within product.")
            # Handle the timeout situation here (e.g., skip this product or log the issue)

    # Create DataFrame and save to CSV
    df = pd.DataFrame({'book_title': book_title,
                       'author_name': author_name,
                       'run_time': run_time,
                       'release_date': release_date})

    df.to_csv('amazon_audible.csv', index=False)
    print(df)

except TimeoutException:
    print("Timeout occurred while waiting for container element.")
    # Handle the timeout situation here (e.g., retry navigating to the page or log the issue)

finally:
    # Quit the driver
    driver.quit()

# Import necessary libraries
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
from selenium.common.exceptions import NoSuchElementException, TimeoutException

# Set the path to chromedriver.exe
path = r"C:\Drivers\chromedriver-win64\chromedriver.exe"
website = "https://www.adamchoi.co.uk/overs/detailed"

# Use the Service class to specify the path to chromedriver.exe
service = Service(executable_path=path)

# Use ChromeOptions for additional configurations
options = webdriver.ChromeOptions()

# Add the --headless option to run Chrome in headless mode (optional)
# options.add_argument("--headless")

# Add the --detach option to keep the browser open after the script finishes
options.add_experimental_option("detach", True)

# Initialize the WebDriver with the specified service and options
driver = webdriver.Chrome(service=service, options=options)

# Navigate to the specified website
driver.get(website)

try:
    # Wait for the "All matches" button to be clickable
    all_matches_button = WebDriverWait(driver, 10).until(
        EC.element_to_be_clickable((By.XPATH, '//label[@analytics-event="All matches"]'))
    )

    # Click on the "All matches" button
    all_matches_button.click()

    # Wait for the matches to load (adjust the timeout as needed)
    WebDriverWait(driver, 10).until(
        EC.presence_of_all_elements_located((By.TAG_NAME, "tr"))
    )

    # Get all match elements
    matches = driver.find_elements(By.TAG_NAME, "tr")

    date = []
    home_team = []
    score = []
    away_team = []

    # Extract data from each match
    for match in matches:
        date.append(match.find_element("xpath", "./td[1]").text)
        home_team.append(match.find_element("xpath", "./td[2]").text)
        score.append(match.find_element("xpath", "./td[3]").text)
        away_team.append(match.find_element("xpath", "./td[4]").text)

except (NoSuchElementException, TimeoutException) as e:
    print(f"Error: {e}")

finally:
    # Close the WebDriver when you're done
    driver.quit()

# Create a DataFrame from the scraped data
df = pd.DataFrame({'date': date,
                   'home_team': home_team,
                   'score': score,
                   'away_team': away_team})

# Save the DataFrame to a CSV file
df.to_csv('football_data.csv', index=False)

# Print the DataFrame
print(df)
# ---------------------------    Chrome   ---------------------------------

from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
import pandas as pd
import time

path = r"C:\Drivers\chromedriver-win64\chromedriver.exe"
website = "https://www.adamchoi.co.uk/overs/detailed"

# Use the Service class to specify the path to chromedriver.exe
service = Service(executable_path=path)

# Use ChromeOptions for additional configurations
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)

# Initialize the WebDriver with the specified service and options
driver = webdriver.Chrome(service=service, options=options)

# Navigate to the specified website
driver.get(website)

all_matches_button = driver.find_element("xpath", '//label[@analytics-event="All matches"]')
all_matches_button.click()

dropdown = Select(driver.find_element(By.ID, "country"))
dropdown.select_by_visible_text('Spain')

time.sleep(3)

matches = driver.find_elements(By.TAG_NAME, "tr")

date = []
home_team = []
score = []
away_team = []

for match in matches:
    date.append(match.find_element("xpath", "./td[1]").text)
    home_team.append(match.find_element("xpath", "./td[2]").text)
    score.append(match.find_element("xpath", "./td[3]").text)
    away_team.append(match.find_element("xpath", "./td[4]").text)

# Close the WebDriver when you're done
driver.quit()

df = pd.DataFrame({'date': date,
                   'home_team': home_team,
                   'score': score,
                   'away_team': away_team})
df.to_csv('football_data.csv', index=False)
print(df)
# ---------------------------    Chrome   ---------------------------------

from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By

path = r"C:\Drivers\chromedriver-win64\chromedriver.exe"
website = "https://www.adamchoi.co.uk/overs/detailed"

# Use the Service class to specify the path to chromedriver.exe
service = Service(executable_path=path)

# Use ChromeOptions for additional configurations
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)

# Initialize the WebDriver with the specified service and options
driver = webdriver.Chrome(service=service, options=options)

# Navigate to the specified website
driver.get(website)

all_matches_button = driver.find_element("xpath", '//label[@analytics-event="All matches"]')
all_matches_button.click()

matches = driver.find_elements(By.TAG_NAME, "tr")

date = []
home_team = []
score = []
away_team = []

for match in matches:
    date.append(match.find_element("xpath", "./td[1]").text)
    home_team.append(match.find_element("xpath", "./td[2]").text)
    home = match.find_element("xpath","./td[2]").text
    print(home)
    score.append(match.find_element("xpath", "./td[3]").text)
    away_team.append(match.find_element("xpath", "./td[4]").text)

# Close the WebDriver when you're done
# driver.quit()

from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
import pandas as pd
import time

path = r"C:\Drivers\chromedriver-win64\chromedriver.exe"
website = "https://www.adamchoi.co.uk/overs/detailed"

# Use the Service class to specify the path to chromedriver.exe
service = Service(executable_path=path)

# Use ChromeOptions for additional configurations
options = webdriver.ChromeOptions()
options.add_experimental_option("detach", True)

# Initialize the WebDriver with the specified service and options
driver = webdriver.Chrome(service=service, options=options)

# Navigate to the specified website
driver.get(website)

all_matches_button = driver.find_element("xpath", '//label[@analytics-event="All matches"]')
all_matches_button.click()

dropdown = Select(driver.find_element(By.ID, "country"))
dropdown.select_by_visible_text('Spain')

time.sleep(3)

matches = driver.find_elements(By.TAG_NAME, "tr")

date = []
home_team = []
score = []
away_team = []

for match in matches:
    date.append(match.find_element("xpath", "./td[1]").text)
    home_team.append(match.find_element("xpath", "./td[2]").text)
    score.append(match.find_element("xpath", "./td[3]").text)
    away_team.append(match.find_element("xpath", "./td[4]").text)

# Close the WebDriver when you're done
driver.quit()

df = pd.DataFrame({'date': date,
                   'home_team': home_team,
                   'score': score,
                   'away_team': away_team})
df.to_csv('football_data.csv', index=False)
print(df)
import fiftyone as fo
from fiftyone.zoo import load_zoo_dataset

def download_vehicle_images():
    # Specify the classes of interest (e.g., vehicles, cars)
    classes_of_interest = ["Car", "Truck", "Motorcycle", "Bus", "Van"]

    # Specify the maximum number of samples you want to download
    max_samples = 100  # You can adjust this number as needed

    # Load the Open Images V7 dataset using FiftyOne
    dataset = load_zoo_dataset(
        "open-images-v7",
        split="validation",
        label_types=["detections", "segmentations", "points"],
        classes=classes_of_interest,
        max_samples=max_samples,
    )

    # Save the downloaded dataset
    dataset.export(export_dir="/path/to/save/vehicle/images")

if __name__ == "__main__":
    # Execute the download function
    download_vehicle_images()
import cProfile

# Use `profile` if `cProfile` isn't available on your OS
# import profile


def adder(x, y):
	return x + y


cProfile.run('adder(10, 20)')
import timeit


def adder(x, y):
	return x + y


t = timeit.Timer(setup='from __main__ import adder', stmt='adder(10, 20)')
t.timeit()
while True:
    try:
        marks = float(input('Enter your marks to calculate your grade: \n'))
    except ValueError:
        print('Invalid input, try again')
        continue
    if marks > 100:
        print('your input must not be greater than 100')
    else:
        break

if 85 <= marks <= 100:
    print('You have grade A')
elif 65 <= marks < 85:
    print('You have grade B')
elif 41 <= marks < 65:
    print('You have grade C')
elif 0 <= marks < 41:
    print('You failed')
else:
    print('Invalid Marks')
How to write a great documentation (in 5 points)

1. Use question and answer look (more like FAQs)
2. Use pointers wherever possible
    a. maximum 5, preferably 3
    b. if goes beyond 5, try making sub-points instead
3. Diagrams wherever possible. a 🖼 is worth a 1000 words
4. Be precise, but accurate. And have  w h i t e  s p a c e s , increases readability.
5. Avoid similar synonyms, generous adjectives, spellnig and grammatical the mistakes
    a. repeat same words already used to refer something
    b. have links to go to something previously explained
    c. Avoid external links, instead provide a brief. Add link only for confidence (reduces distraction)
"""
Exercise 1.10: Evaluate a Gaussian Function
"""

from math import pi, exp, sqrt

m = 0
s = 2
x = 1

y = (1 / (sqrt(2 * pi) * s)*exp((-1/2) * (((x - m)/s) ** 2)))
print(y)

'''
Sample run:
    y = 0.176032663
'''
    
# If the program reached this point, that means the number is prime
return True  
# Loop through all numbers between 2 and n - 1, and check if n is NOT prime
i = 2
while i < n:
    if n % i == 0:
        return False
    i = i + 1
# Deal with special cases
if n == 0 or n == 1:
    return False
from flask import Flask, render_template
from flask.ext.wtf import Form, SubmitField, Field, TextInput

app = Flask(__name__)
app.config['SECRET_KEY'] = 'Shh!'


class CustomField(Field):
    widget = TextInput()
    
    def _value(self):
        if self.data:
            return u', '.join(self.data)
        else:
            return u''

    def process_formdata(self, valuelist):
        if valuelist:
            self.data = [x.strip() for x in valuelist[0].split(',')]
        else:
            self.data = []


class ExampleForm(Form):
    status = CustomField()
    submit = SubmitField('POST')
from sys import path
import os
path.append(os.path.realpath('../'))
# split()
# دالة تقوم بتقسيم السلسلة إلى قائمة من الكلمات باستخدام (المسافات أو أي نص آخر) كفاصل
# Syntax: string.split(sep=None, maxsplit=-1) -> list[LiteralString]
# sep => Separator    NOTE: By default any (whitespace) is a separator
# maxsplit => عايز التقسيم يكون في كام عنصر؟
# maxsplit => لو كتبت مثلا 2 ترجع 3عناصر وهكذا، يعني النتيجة بتكون +1 يعني التقسيم تم في 2 والباقي نزله في عنصر واحد الاخير

## أختها الوحيدة ##########################
# rsplit()  # Right Split
################################################################

txt1 = "welcome to the jungle"
txt2 = "welcome#to#the#jungle"

print("============(split)===================================")
x1 = txt1.split()           
x2 = txt1.split(" ",2)
x3 = txt2.split("#")
print(x1)                   # ['welcome', 'to', 'the', 'jungle']
print(x2)                   # ['welcome', 'to', 'the jungle']
print(x3)                   # ['welcome', 'to', 'the', 'jungle']

print("============(rsplit)===================================")
y1 = txt1.rsplit(None,2)
y2 = txt1.rsplit(" ",2)
print(y1)                    # ['welcome to', 'the', 'jungle']
print(y2)                    # ['welcome to', 'the', 'jungle']
person_info = {
    'Name': 'Ahmed',
    'Age': 25,
    'City': 'Cairo'
}
FROM python:3.9-slim-bullseye

ENV VIRTUAL_ENV=/opt/venv
RUN python3 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"

# Install dependencies:
COPY requirements.txt .
RUN pip install -r requirements.txt

# Run the application:
COPY myapp.py .
CMD ["python", "myapp.py"]
Page segmentation modes:
  0    Orientation and script detection (OSD) only.
  1    Automatic page segmentation with OSD.
  2    Automatic page segmentation, but no OSD, or OCR.
  3    Fully automatic page segmentation, but no OSD. (Default)
  4    Assume a single column of text of variable sizes.
  5    Assume a single uniform block of vertically aligned text.
  6    Assume a single uniform block of text.
  7    Treat the image as a single text line.
  8    Treat the image as a single word.
  9    Treat the image as a single word in a circle.
 10    Treat the image as a single character.
 11    Sparse text. Find as much text as possible in no particular order.
 12    Sparse text with OSD.
 13    Raw line. Treat the image as a single text line,
                        bypassing hacks that are Tesseract-specific.
from fastapi import FastAPI, HTTPException, Security, status
from fastapi.security import APIKeyHeader


api_keys = [
    "my_api_key"
]

app = FastAPI()

api_key_header = APIKeyHeader(name="X-API-Key")

def get_api_key(api_key_header: str = Security(api_key_header)) -> str:
    if api_key_header in api_keys:
        return api_key_header
    raise HTTPException(
        status_code=status.HTTP_401_UNAUTHORIZED,
        detail="Invalid or missing API Key",
    )

@app.get("/protected")
def protected_route(api_key: str = Security(get_api_key)):
    # Process the request for authenticated users
    return {"message": "Access granted!"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)


wget https://blah/blah/stats --header="X-API-Key: key1"
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
import io
import pandas as pd

app = FastAPI()

@app.get("/get_csv")
async def get_csv():
    df = pd.DataFrame(dict(col1 = 1, col2 = 2), index=[0])
    stream = io.StringIO()
    df.to_csv(stream, index = False)
    response = StreamingResponse(iter([stream.getvalue()]),
                                 media_type="text/csv"
                                )
    response.headers["Content-Disposition"] = "attachment; filename=export.csv"
    return response
Alternate command for 1 GPU:

sockeye-train \
    --prepared-data prepared --validation-source dev.en.bpe \
    --validation-target dev.de.bpe --output model --num-layers 6 \
    --transformer-model-size 1024 --transformer-attention-heads 16 \
    --transformer-feed-forward-num-hidden 4096 --amp --batch-type max-word \
    --batch-size 5000 --update-interval 80 --checkpoint-interval 500 \
    --max-updates 15000 --optimizer-betas 0.9:0.98 \
    --initial-learning-rate 0.06325 \
    --learning-rate-scheduler-type inv-sqrt-decay --learning-rate-warmup 4000 \
    --seed 1
# Best min size, in kB.
best_min_size = (32000 + 100000) * (1.073741824 * duration) / (8 * 1024)
# Download the helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client


# Find your Account SID and Auth Token at twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)

verification = client.verify \
                     .v2 \
                     .services('VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
                     .verifications \
                     .create(to='+15017122661', channel='sms')

print(verification.sid)
# Download the helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client


# Find your Account SID and Auth Token at twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)

verification = client.verify \
                     .v2 \
                     .services('VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
                     .verifications \
                     .create(to='+15017122661', channel='sms')

print(verification.sid)
1. Create File "Dockerfile" in same directory as "docker-compose.yaml"
2. Write 
FROM apache/airflow:2.0.2
RUN pip install --no-cache-dir geopandas another-package
3. Build new image:
docker build . --tag projectname-airflow:2.0.2
4. In "docker-compose.yaml" replace
image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.0.2}
with 
image: ${AIRFLOW_IMAGE_NAME:-projectname-airflow:2.0.2}
5. In Shell move to directory an run
docker build . --tag projectname-airflow:2.0.2
6. Epic win
JupyterHub.ensureService({waitForReady:true})
import holidays
from datetime import datetime, timedelta


# Get current date/time
tday = datetime.now()
testday = datetime(2023,12,1,13,13,0)    # for debug only

tday = testday    # for debug only
aims_rec_date = tday.strftime('%Y-%m-%d')
aims_time = tday.strftime('%H:%M:%S')
same_day_despatch_cut_off_time = datetime.strptime(aims_rec_date + " 13:00:00", "%Y-%m-%d %H:%M:%S")
add_days = 0

print(f"*******************************\nReceipt: {aims_rec_date} @ {aims_time}")

# Early enough for same-day despatch?
wk_day = int(tday.strftime('%w'))
if wk_day in range(1,5+1):
    despatch_today = tday < same_day_despatch_cut_off_time
    print(f"Despatch today: {despatch_today}")
    if not despatch_today:
        add_days = 1
else:
    print("Weekend...")

# Set provisional despatch date
aims_despatch_date = (tday + timedelta(days=add_days))

# Only interested in these public holidays
occasions = [
    "New Year's Day",
    "Good Friday",
    "Easter Monday [England/Wales/Northern Ireland]",
    "May Day",
    "Spring Bank Holiday",
    "Late Summer Bank Holiday [England/Wales/Northern Ireland]",
    "Christmas Day",
    "Boxing Day",
]
uk_holidays = holidays.UnitedKingdom()

print(f"UK Holiday: {aims_despatch_date in uk_holidays}")

# Amend provisional despatch date if not working day
while aims_despatch_date in uk_holidays or int(aims_despatch_date.strftime('%w')) in [0,6]:
    print("****skip-a-day****")
    try:    # amend for public holiday
        occasion = uk_holidays[aims_despatch_date]
        if occasion in occasions:
            aims_despatch_date = (aims_despatch_date + timedelta(days=1))
            wk_day = aims_despatch_date.strftime('%w')
        else:
            break
    except Exception as e:    # amend for weekend
        aims_despatch_date = (aims_despatch_date + timedelta(days=1))
        wk_day = aims_despatch_date.strftime('%w')


print(f"Despatch: {aims_despatch_date.strftime('%Y-%m-%d')}\n*******************************\n")
# Assume -9999 is a missing data flag
df.replace(-9999, np.NaN)
# Alternatively, perform this when creating the dataframe:
pd.read_csv("https://www.atmos.albany.edu/products/metarCSV/world_metar_latest.csv", sep='\s+',na_values=['9999.00','-9999.0'])
import xarray as xr
import numpy as np
from xarray.core.duck_array_ops import isnull


array = xr.DataArray([1, np.nan, 3], dims="x")
array

%time np.sum(isnull(array.data), axis=()).sum()
# core/settings.py 

# other settings....

CACHES = {
        "default": {
            "BACKEND": "django_redis.cache.RedisCache",
            "LOCATION": "redis://redis:6379/",
            "OPTIONS": {
                "CLIENT_CLASS": "django_redis.client.DefaultClient"
            },
        }
    }
t = int(input(""))
p = 0
mylist = []
if t >= 0 and t < 101:
    while p<t:
        a = int(input("a "))
        b = int(input("b "))
        c = int(input("c "))
        if a <= 20000 and a >= 1:
            if b <= 20000 and a >=1:
                if c <= 20000 and c >=1:
                    if a+b<=c or a+c<=b or b+c<=a:
                        txt = ("nelze sestrojit")
                    else:
                        if a == b and b == c and c == a:
                            txt = ("rovnostranny")
                        elif a == b or b == c or c == a:
                            txt = ("rovnoramenny")
                        else:
                            txt = ("obecny")
                else:
                    txt = ("moc velké nebo malé číslo")
            else:
                txt = ("moc velké nebo malé číslo")
        else:
            txt = ("moc velké nebo malé číslo")
        p = p+1
        mylist.append(txt)
        print(p)
    print(mylist)
else:
    print("syntax")
def res(vys):
    for x in vys:
        print(x)
try:
    s = int(input("s "))
    p = 0
    lst = []
    if s >=1 and s <=100:
        while p<s:
            z = input("z ")
            if len(z) >= 1 and len(z) <= 50:
                try:
                    num = int(z)
                    kys = type(num) == int
                except:
                    kys = z.isupper()     
                if kys == True:
                    pal = z[::-1]
                    if z == pal:
                        txt = "yes"
                    else:
                        txt = "no"
                else:
                    txt = "syntax"
            else:
                txt = "syntax"
            p = p+1
            lst.append(txt)
            print("")
        res(lst)
    else:
        print("syntax")
except:
    print("syntax")
import cv2
import numpy as np

img = cv2.imread('blackdots.jpg')
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

ret, thresh = cv2.threshold(gray_img, 170, 255, cv2.THRESH_BINARY) # <--- Try different values here

accum_size = 1
# Minimum distance between the centers of the detected circles.
minDist = 30
#First method-specific parameter. In case of CV_HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny() edge detector (the lower one is twice smaller).
param1 = 50
# Second method-specific parameter. In case of CV_HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
param2 = 5
#
minRadius = 1
#
maxRadius = 10
circles = cv2.HoughCircles(thresh, cv2.HOUGH_GRADIENT, accum_size, minDist,
                           param1=param1, param2=param2,
                           minRadius=minRadius, maxRadius=maxRadius)
circles = circles.reshape(1,circles.shape[1], circles.shape[2])
if circles is not None:
    circles = np.uint16(np.around(circles))
    for ind, i in enumerate(circles[0, :]):
        center = (i[0], i[1])
        radius = 15
        cv2.circle(img, center, radius, (255, 0, 255), 3)

thresh = cv2.resize(thresh, (1280, 720)) # <---- This is just for easier display
img = cv2.resize(img, (1280, 720)) # <---- This is just for easier display
cv2.imwrite('circles_black_dot.png', img)
cv2.imwrite('threshold_black_dots.png', thresh)
cdo -expr,'var = ((var > 0.005)) ? var : 0.05' infile outfile
# Create a virtual environment
python -m venv env

# Activate virtual environment
source env/bin/activate
print(c3.Pkg.file('meta://esg/src/model/mentionTracking/EsgMentionTrackingMl.MentionTrackingPipeline.py').readString())
ls = [1, 2, 9, 4, 5, 2, 3, 9, 1]
result = max(ls, key=ls.count)

# or
# result = max(set(ls), key=ls.count)
# STEP 1
# import libraries
import fitz
import io
from PIL import Image
  
# STEP 2
# file path you want to extract images from
file = "/content/pdf_file.pdf"
  
# open the file
pdf_file = fitz.open(file)
  
# STEP 3
# iterate over PDF pages
for page_index in range(len(pdf_file)):
    
    # get the page itself
    page = pdf_file[page_index]
    image_list = page.getImageList()
      
    # printing number of images found in this page
    if image_list:
        print(f"[+] Found a total of {len(image_list)} images in page {page_index}")
    else:
        print("[!] No images found on page", page_index)
    for image_index, img in enumerate(page.getImageList(), start=1):
        
        # get the XREF of the image
        xref = img[0]
          
        # extract the image bytes
        base_image = pdf_file.extractImage(xref)
        image_bytes = base_image["image"]
          
        # get the image extension
        image_ext = base_image["ext"]
# extract images from pdf file
import fitz
doc = fitz.open("file.pdf")
for i in range(len(doc)):
    for img in doc.getPageImageList(i):
        xref = img[0]
        pix = fitz.Pixmap(doc, xref)
        if pix.n < 5:       # this is GRAY or RGB
            pix.writePNG("p%s-%s.png" % (i, xref))
        else:               # CMYK: convert to RGB first
            pix1 = fitz.Pixmap(fitz.csRGB, pix)
            pix1.writePNG("p%s-%s.png" % (i, xref))
            pix1 = None
        pix = None
import subprocess as sp
import json
import os
import sys


for year in range(1980,2015):
    print(year)
    cmd=f' mv {year}/pl_*.nc plFiles/'
    #print(cmd)
    sp.run(cmd, shell=True)
#libraries
import datetime as dt
import xarray as xr
import fsspec
import s3fs
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd


#### AWS repository link - https://registry.opendata.aws/mur/

%%time
file_location = 's3://mur-sst/zarr'

ikey = fsspec.get_mapper(file_location, anon=True)

ds_sst = xr.open_zarr(ikey,consolidated=True)

ds_sst

data=ds_sst.sel(lat=slice(10,15),lon=slice(-68,-67))

data.analysed_sst[0,:,:].where(dataMask[0,:,:]==1).plot()


dataMask=ds_sst.mask.sel(lat=slice(10,15),lon=slice(-68,-67))
dataMask

data.analysed_sst[0,:,:].where(dataMask[0,:,:]==1).to_netcdf('testMUR.nc')

sst=xr.open_dataset('testMUR.nc')
sst.analysed_sst.plot()


#!/usr/bin/env python3
"""
License: MIT License
Copyright (c) 2023 Miel Donkers

Very simple HTTP server in python for logging requests
Usage::
    ./server.py [<port>]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging

class S(BaseHTTPRequestHandler):
    def _set_response(self):
        self.send_response(200)
        self.send_header('Content-type', 'text/html')
        self.end_headers()

    def do_GET(self):
        logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
        self._set_response()
        self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))

    def do_POST(self):
        content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
        post_data = self.rfile.read(content_length) # <--- Gets the data itself
        logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
                str(self.path), str(self.headers), post_data.decode('utf-8'))

        self._set_response()
        self.wfile.write("POST request for {}".format(self.path).encode('utf-8'))

def run(server_class=HTTPServer, handler_class=S, port=8080):
    logging.basicConfig(level=logging.INFO)
    server_address = ('', port)
    httpd = server_class(server_address, handler_class)
    logging.info('Starting httpd...\n')
    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        pass
    httpd.server_close()
    logging.info('Stopping httpd...\n')

if __name__ == '__main__':
    from sys import argv

    if len(argv) == 2:
        run(port=int(argv[1]))
    else:
        run()
import re
s = "string. With. Punctuation?"
s = re.sub(r'[^\w\s]','',s)
# \w matches any word character (equivalent to [a-zA-Z0-9_])
# \s matches any whitespace character (equivalent to [\r\n\t\f\v ])
from serpapi import GoogleSearch

params = {
  "q": "Coffee",
  "location": "Austin, Texas, United States",
  "hl": "en",
  "gl": "us",
  "google_domain": "google.com",
  "api_key": "381f56d7198076f0606f9d563798774bd6f56899511e8b4770216e5facd8668f"
}

search = GoogleSearch(params)
results = search.get_dict()
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader

# load the document and split it into chunks
loader = TextLoader("")
documents = loader.load()
len(documents)
print(documents[0])

# split it into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
len(docs)
print(docs[0])


# create the open-source embedding function
embedding_function = HuggingFaceEmbeddings

# load it into Chroma
db = Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db")

# query it
#query = "Question"
#docs = db.similarity_search(query)

# print results
print(docs[0].page_content)

docs = db2.similarity_search(query)
# Route to blacklist a creator
@app.route('/blacklist_creator/<int:creator_id>', methods=['POST'])
def blacklist_creator(creator_id):
    admin_id = session['user_id'] 

    
    creator_blacklist = CreatorBlacklist.query.filter_by(admin_id=admin_id, creator_id=creator_id).first()
    if creator_blacklist:
        flash('Creator is already blacklisted.', 'danger')
    else:
        creator_blacklist = CreatorBlacklist(admin_id=admin_id, creator_id=creator_id)
        db.session.add(creator_blacklist)
        db.session.commit()
        flash('Creator has been blacklisted.', 'success')

    return redirect(url_for('review_creator'))

# Route to whitelist a creator
@app.route('/whitelist_creator/<int:creator_id>', methods=['POST'])
def whitelist_creator(creator_id):
    admin_id = session['user_id']  
    creator_blacklist = CreatorBlacklist.query.filter_by(admin_id=admin_id, creator_id=creator_id).first()
    if creator_blacklist:
        db.session.delete(creator_blacklist)
        db.session.commit()
        flash('Creator has been whitelisted.','success')
    else:
        flash('Creator was not blacklisted.','danger')
    return redirect(url_for('review_creator'))  
docsearch.as_retriever(search_type="mmr")

# Retrieve more documents with higher diversity- useful if your dataset has many similar documents
docsearch.as_retriever(search_type="mmr", search_kwargs={'k': 6, 'lambda_mult': 0.25})

# Fetch more documents for the MMR algorithm to consider, but only return the top 5
docsearch.as_retriever(search_type="mmr", search_kwargs={'k': 5, 'fetch_k': 50})

# Only retrieve documents that have a relevance score above a certain threshold
docsearch.as_retriever(search_type="similarity_score_threshold", search_kwargs={'score_threshold': 0.8})

# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})

# Use a filter to only retrieve documents from a specific paper
docsearch.as_retriever(search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}})
engine = create_engine(
    f"trino://{user}:{user_key}@{host}:{port}/{catalog}", 
    connect_args={
        "auth": BasicAuthentication(user, user_key),
        "http_scheme": "https",
    }
)
connection = engine.connect()
query = f"""

"""

pd.read_sql(query, connection)
'''def download_data(urls):
    filenames = []
    
    for i, link in enumerate(urls):
        try:
            result = requests.get(link, cookies={'urs_user': 'liviaalvzs', 'urs_pass': 'Ss1234567890?'})
            result.raise_for_status()
            
            basename = get_filenames_from_urls(link)
            filenames.append(basename)
            
            with open(basename, 'wb') as f:
                f.write(result.content)
            
            print(f'O arquivo do link {link} foi baixado com sucesso e salvo como {basename}.')
            
        except requests.exceptions.HTTPError as errh:
            print ("HTTP Error:",errh)
        except requests.exceptions.ConnectionError as errc:
            print ("Error Connecting:",errc)
        except requests.exceptions.Timeout as errt:
            print ("Timeout Error:",errt)
        except requests.exceptions.RequestException as err:
            print ("Something went wrong:",err)
        
    return filenames
python3 manage.py makemigrations  --settings=CheckInPlatform.settings.localpkt 

python3 manage.py migrate  --settings=CheckInPlatform.settings.localpkt
import pandas as pd
import sqlite3

# Create database
conn = sqlite3.connect('guests.db')
c = conn.cursor()

# Create table
c.execute('''CREATE TABLE guests
             (id INTEGER PRIMARY KEY AUTOINCREMENT,
              name TEXT,
              phone TEXT,
              email TEXT,
              visits INTEGER)''')

# Insert data
df = pd.read_csv('guests.csv')
df.to_sql('guests', conn, if_exists='append', index=False)

# Close connection
conn.close()
import os
import zipfile

def zipdir(path, ziph):
    # ziph is zipfile handle
    for root, dirs, files in os.walk(path):
        for file in files:
            ziph.write(os.path.join(root, file),
                       os.path.relpath(os.path.join(root, file),
                                       os.path.join(path, '..')))

with zipfile.ZipFile('/content/m1.Zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
    zipdir('/content/m1', zipf)
import numpy as np
import matplotlib.pyplot as plt

# Create a sample dataset
data = np.random.randn(1000)

# Calculate the number of bins
num_bins = 10

# Create a histogram
hist, bins = np.histogram(data, bins=num_bins)

# Plot the histogram
plt.bar(bins[:-1], hist, width=bins[1] - bins[0])
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.title("Histogram of Sample Data")
plt.show()
pip install -U pip setuptools
pip install wheel

!pip install eeconvert

pip install --upgrade urllib3

import pandas as pd
import geopandas as gpd
import datetime
import ee
import eeconvert

ee.Authenticate()
ee.Initialize()

from google.colab import drive
drive.mount('/content/drive')

Maiz_str = '/content/drive/MyDrive/Puntos/SHP/Col_2021_Maiz.shp'
Soja_str = '/content/drive/MyDrive/Puntos/SHP/Col_2021_Soja.shp'

df = gpd.read_file(Maiz_str, parse_dates=['Fecha_Siembra', 'Rango_Floor','Rango_Top'])
#df = gpd.read_file(Soja_str, parse_dates=['Fecha_Cosecha', 'Date_R3_Fr', 'Date_R6_Fr'])# en el registro 315 modifiqué el año 0201 por 2016 de la columna Fecha_Cosecha
#df = df.head(10)
display(df.columns)

df['Start_Date'] = pd.to_datetime(df['Rango_Flor']).dt.strftime('%Y-%m-%d')
df['End_Date'] = pd.to_datetime(df['Rango_Top_']).dt.strftime('%Y-%m-%d')


#df['Start_Date'] = pd.to_datetime(df['Date_R3_Fr']).dt.strftime('%Y-%m-%d')
#df['End_Date'] = pd.to_datetime(df['Date_R6_Fr']).dt.strftime('%Y-%m-%d')

df = df[df['Start_Date'].notna()]
df = df[df['End_Date'].notna()]
df = df[df['Longitud'].notna()]
df = df[df['Latitud'].notna()]

df = df[['Start_Date','End_Date', 'geometry', 'Parcela']]

new_df = pd.DataFrame([],columns=['id', 'longitude', 'latitude', 'time', 'NDVI', 'Parcela'])

for index, row in df.iterrows():
  newGDF = df.filter(items = [index], axis=0)
  fc = eeconvert.gdfToFc(newGDF)
  feature = fc.geometry().buffer(-125)
  print(row.Parcela)
  Start_Date = ee.Date(row.Start_Date)
  End_Date = ee.Date(row.End_Date)

  dataset = ee.ImageCollection("MODIS/061/MOD13Q1").select('NDVI').filter(ee.Filter.date(Start_Date,End_Date))
  NDVIvalues = dataset.getRegion(feature, 250).getInfo()
  NDVI_df = pd.DataFrame(NDVIvalues)
  NDVI_df.columns = NDVI_df.iloc[0]
  NDVI_df = NDVI_df.iloc[1:].reset_index(drop=True)
  NDVI_df.insert(1, "Parcela", row.Parcela)
  new_df = new_df.append(NDVI_df)

new_df.to_csv('/content/drive/MyDrive/Puntos/NDVI_Poligonos_Maiz.csv',header=True, index=False)
import os
import glob
import geopandas as gpd
import pandas as pd
import re

json_dir_name = "/Users/agroclimate/Documents/PriceFrobes/Uruguay/Poligonos/GeoJson/2023/"
json_pattern = os.path.join(json_dir_name,'*.geojson')
file_list = glob.glob(json_pattern)

collection = gpd.GeoDataFrame([])

for file in file_list:
    print(file)
    newGpd = gpd.read_file(file)
    newGpd['NAME'] = re.sub(r"[^a-zA-Z0-9]","_",os.path.basename(file)[:-8])
    collection = pd.concat([collection,newGpd])

collection = collection.loc[collection.geometry.geom_type=='Polygon']
collection.to_file('/Users/agroclimate/Documents/PriceFrobes/Uruguay/Poligonos/GeoJson/Col_2023_name.shp')
from shapely.geometry import *
import glob
import fiona
from zipfile import ZipFile
import pandas as pd
import io
import geopandas as gpd
from fastkml import kml
import shapely

def remove_third_dimension(geom):
    if geom.is_empty:
        return geom

    if isinstance(geom, Polygon):
        exterior = geom.exterior
        new_exterior = remove_third_dimension(exterior)

        interiors = geom.interiors
        new_interiors = []
        for int in interiors:
            new_interiors.append(remove_third_dimension(int))

        return Polygon(new_exterior, new_interiors)

    elif isinstance(geom, LinearRing):
        return LinearRing([xy[0:2] for xy in list(geom.coords)])

    elif isinstance(geom, LineString):
        return LineString([xy[0:2] for xy in list(geom.coords)])

    elif isinstance(geom, Point):
        return Point([xy[0:2] for xy in list(geom.coords)])

    elif isinstance(geom, MultiPoint):
        points = list(geom.geoms)
        new_points = []
        for point in points:
            new_points.append(remove_third_dimension(point))

        return MultiPoint(new_points)

    elif isinstance(geom, MultiLineString):
        lines = list(geom.geoms)
        new_lines = []
        for line in lines:
            new_lines.append(remove_third_dimension(line))

        return MultiLineString(new_lines)

    elif isinstance(geom, MultiPolygon):
        pols = list(geom.geoms)

        new_pols = []
        for pol in pols:
            new_pols.append(remove_third_dimension(pol))

        return MultiPolygon(new_pols)

    elif isinstance(geom, GeometryCollection):
        geoms = list(geom.geoms)

        new_geoms = []
        for geom in geoms:
            new_geoms.append(remove_third_dimension(geom))

        return GeometryCollection(new_geoms)

    else:
        raise RuntimeError("Currently this type of geometry is not supported: {}".format(type(geom)))
        
fiona.drvsupport.supported_drivers['kml'] = 'rw'
fiona.drvsupport.supported_drivers['KML'] = 'rw'
fiona.drvsupport.supported_drivers['libkml'] = 'rw'
fiona.drvsupport.supported_drivers['LIBKML'] = 'rw'

outDir = '/Users/agroclimate/Documents/PriceFrobes/Uruguay/Poligonos/GeoJson/2023/'
files = glob.glob('/Users/agroclimate/Documents/PriceFrobes/Uruguay/Poligonos/KMZ 2023/**/*.kml', recursive=True)

for file in files:
    print(file)
    polys = gpd.read_file(file, driver='KML')

    name = polys.Name
    geom = polys.geometry
    file_name = os.path.basename(file)[:-4]
    
    for (geom, name) in zip(geoms, names):
        geom = remove_third_dimension(geom)
        gdf = gpd.GeoDataFrame(index=[0], crs='epsg:4326', geometry=[geom])
        
        gdf.to_file(outDir+file_name+ "_"+name+'.geojson', driver='GeoJSON')
from shapely.geometry import *
import glob
from zipfile import ZipFile
import pandas as pd
import io
import geopandas as gpd
from fastkml import kml
import shapely

def remove_third_dimension(geom):
    if geom.is_empty:
        return geom

    if isinstance(geom, Polygon):
        exterior = geom.exterior
        new_exterior = remove_third_dimension(exterior)

        interiors = geom.interiors
        new_interiors = []
        for int in interiors:
            new_interiors.append(remove_third_dimension(int))

        return Polygon(new_exterior, new_interiors)

    elif isinstance(geom, LinearRing):
        return LinearRing([xy[0:2] for xy in list(geom.coords)])

    elif isinstance(geom, LineString):
        return LineString([xy[0:2] for xy in list(geom.coords)])

    elif isinstance(geom, Point):
        return Point([xy[0:2] for xy in list(geom.coords)])

    elif isinstance(geom, MultiPoint):
        points = list(geom.geoms)
        new_points = []
        for point in points:
            new_points.append(remove_third_dimension(point))

        return MultiPoint(new_points)

    elif isinstance(geom, MultiLineString):
        lines = list(geom.geoms)
        new_lines = []
        for line in lines:
            new_lines.append(remove_third_dimension(line))

        return MultiLineString(new_lines)

    elif isinstance(geom, MultiPolygon):
        pols = list(geom.geoms)

        new_pols = []
        for pol in pols:
            new_pols.append(remove_third_dimension(pol))

        return MultiPolygon(new_pols)

    elif isinstance(geom, GeometryCollection):
        geoms = list(geom.geoms)

        new_geoms = []
        for geom in geoms:
            new_geoms.append(remove_third_dimension(geom))

        return GeometryCollection(new_geoms)

    else:
        raise RuntimeError("Currently this type of geometry is not supported: {}".format(type(geom)))
        
outDir = '/Users/agroclimate/Documents/PriceFrobes/Uruguay/Poligonos/GeoJson/2023/'
files = glob.glob('/Users/agroclimate/Documents/PriceFrobes/Uruguay/Poligonos/KMZ 2023/**/*.kmz', recursive=True)

for file in files:
    print(file)
    
    kmz = ZipFile(file, 'r')
    kml_content = kmz.open('doc.kml', 'r').read()

    k = kml.KML()
    k.from_string(kml_content)
    
    docs = list(k.features())
    
    folders = []
    for d in docs:
        folders.extend(list(d.features()))
    
    records = []
    
    while type(folders[0]) is not type(kml.Placemark('{http://www.opengis.net/kml/2.2}', 'id', 'name', 'description')):
        records = []
        for f in folders:
            records.extend(list(f.features()))
        folders = records
    
    names = [element.name for element in folders]

    geoms = [element.geometry for element in folders]
    
    file_name = os.path.basename(file)[:-4]
    
    for (geom, name) in zip(geoms, names):
        geom = remove_third_dimension(geom)
        gdf = gpd.GeoDataFrame(index=[0], crs='epsg:4326', geometry=[geom])
        
        gdf.to_file(outDir+file_name+ "_"+name+'.geojson', driver='GeoJSON')
from google.colab import drive
drive.mount('/content/drive')
base_dir = "/content/drive/MyDrive/vision_computador"
os.chdir(base_dir)
print('actual direccion de trabajo:',base_dir)

# ingresar direccion de la carpeta de trabajo
WORKPLACE_DIR = None
assert WORKPLACE_DIR is not None, "[!] Enter the la direccion de la carpeta de trabajo."
# This function chooses at random which action to be performed within the range 
# of all the available actions.
def ActionChoice(available_actions_range):
    if(sum(PossibleAction)>0):
        next_action = int(ql.random.choice(PossibleAction,1))
    if(sum(PossibleAction)<=0):
        next_action = int(ql.random.choice(5,1))
    return next_action

# Sample next action to be performed
action = ActionChoice(PossibleAction)
I highly recommend you tf.data.Dataset for creating the dataset:

Do all processes (like resize and normalize) that you want on all images with dataset.map.
Instead of using train_test_split, Use dataset.take, datast.skip for splitting dataset.
Code for generating random images and label:

# !pip install autokeras
import tensorflow as tf
import autokeras as ak
import numpy as np

data = np.random.randint(0, 255, (45_000,32,32,3))
label = np.random.randint(0, 10, 45_000)
label = tf.keras.utils.to_categorical(label) 
 Save
Convert data & label to tf.data.Dataset and process on them: (only 55 ms for 45_000, benchmark on colab)

dataset = tf.data.Dataset.from_tensor_slices((data, label))
def resize_normalize_preprocess(image, label):
    image = tf.image.resize(image, (16, 16))
    image = image / 255.0
    return image, label

# %%timeit 
dataset = dataset.map(resize_normalize_preprocess, num_parallel_calls=tf.data.AUTOTUNE)
# 1 loop, best of 5: 54.9 ms per loop
 Save
Split dataset to 80% for train and 20% for test
Train and evaluate AutoKeras.ImageClassifier
dataet_size = len(dataset)
train_size = int(0.8 * dataet_size)
test_size = int(0.2 * len(dataset))

dataset = dataset.shuffle(32)
train_dataset = dataset.take(train_size)
test_dataset = dataset.skip(train_size)

print(f'Size dataset : {len(dataset)}')
print(f'Size train_dataset : {len(train_dataset)}')
print(f'Size test_dataset : {len(test_dataset)}')

clf = ak.ImageClassifier(overwrite=True, max_trials=1)
clf.fit(train_dataset, epochs=1)
print(clf.evaluate(test_dataset))
>>> import shutil, os

>>> os.chdir('C:\\')
>>> shutil.copy('C:\\spam.txt', 'C:\\delicious')
# C:\\delicious\\spam.txt'

>>> shutil.copy('eggs.txt', 'C:\\delicious\\eggs2.txt')
# 'C:\\delicious\\eggs2.txt'
>>> import os
>>> os.makedirs('C:\\delicious\\walnut\\waffles')
>>> import os

>>> os.getcwd()
# 'C:\\Python34'
>>> os.chdir('C:\\Windows\\System32')

>>> os.getcwd()
# 'C:\\Windows\\System32'
>>> my_files = ['accounts.txt', 'details.csv', 'invite.docx']

>>> for filename in my_files:
...     print(os.path.join('C:\\Users\\asweigart', filename))
...
# C:\Users\asweigart\accounts.txt
# C:\Users\asweigart\details.csv
# C:\Users\asweigart\invite.docx
import csv

import json



# Open the JSON file

with open('data.json') as json_file:

    data = json.load(json_file)



# Create a new CSV file and write the data

with open('data.csv', 'w', newline='') as csv_file:

    writer = csv.DictWriter(csv_file, fieldnames=data[0].keys())

    writer.writeheader()

    for row in data:

        writer.writerow(row)
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.3/css/all.min.css">
  
  
    <ul class="nav">
            <li><i class="fas fa-home"></i><a href="#">Home</a></li>
            <li><i class="fas fa-user"></i><a href="#">Profile</a></li>
            <li><i class="fas fa-envelope"></i><a href="#">Messages</a></li>
            <li><i class="fas fa-cog"></i><a href="#">Settings</a></li>
        </ul>
def pull_file(URL, savepath):
    r = requests.get(URL)
    with open(savepath, 'wb') as f:
        f.write(r.content)   
    # Use the print method for logging
    print(f"File pulled from {URL} and saved to {savepath}")

from airflow.operators.python_operator import PythonOperator

# Create the task
pull_file_task = PythonOperator(
    task_id='pull_file',
    # Add the callable
    python_callable=pull_file,
    # Define the arguments
    op_kwargs={'URL':'http://dataserver/sales.json', 'savepath':'latestsales.json'},
    dag=process_sales_dag
)
import pandas as pd
import geopandas as gps
import datetime
import ee

ee.Authenticate()
ee.Initialize()

from google.colab import drive
drive.mount('/content/drive')

"""Selccionar Cultivo"""
#Maiz_str = '/content/drive/MyDrive/Puntos/Maiz.csv'
#df = pd.read_csv(Maiz_str, parse_dates=['Fecha_Siembra', 'Rango_Floor','Rango_Top'])

Soja_str = '/content/drive/MyDrive/Puntos/Soja.csv'
df = pd.read_csv(Soja_str, parse_dates=['Fecha_Cosecha', 'R3', 'R6'])
# En archivo de soja fila 315, columna Fecha_Cosecha modifiqué año 0201 por 2016 

df['Start_Date'] = df['R3']
df['End_Date'] = df['R6']

df = df[df['Start_Date'].notna()]
df = df[df['End_Date'].notna()]
df = df[df['Longitud'].notna()]
df = df[df['Latitud'].notna()]

new_df = pd.DataFrame([],columns=['id', 'longitude', 'latitude', 'time', 'NDVI', 'Parcela','Codigo_Lote'])

for index, row in df.iterrows():

    feature = ee.Algorithms.GeometryConstructors.Point([row.Longitud,row.Latitud])

    Start_Date = ee.Date(row.Start_Date)
    End_Date = ee.Date(row.End_Date)

    dataset = ee.ImageCollection("MODIS/061/MOD13Q1").select('NDVI').filter(ee.Filter.date(Start_Date,End_Date))
    NDVIvalues = dataset.getRegion(feature, 250).getInfo()
    NDVI_df = pd.DataFrame(NDVIvalues)
    NDVI_df.columns = NDVI_df.iloc[0]
    NDVI_df = NDVI_df.iloc[1:].reset_index(drop=True)
    NDVI_df.insert(1, "Parcela", row.Parcela)
    NDVI_df.insert(1, "Codigo_Lote", row.Codigo_Lote)
    new_df = new_df.append(NDVI_df)

new_df.to_csv('/content/drive/MyDrive/Puntos/NDVI_Puntos.csv',header=True, index=False)
# Je to na tema co jsme delali ve fyzice jen cast tridy a nevim jestli je to latka ale pokud ano tak tohle muze byt jako kontrola kdyz by na to byl trrrreba tesst
# Tady je celej kod
# Nakodoval jsem to já, není to ukradnutý
# Mělo by to fungovat, 1x jsem to zkoušel počítat ručně a přes program. Řučně to trvalo tak 100x dýl a ještě to nebylo přesný. A funguje to
# Asi to neni nic extre protoze nejsem 6tilety študent, ale účel to plní




# Dalsi projekty kdyztak na kubasobr.blogspot.com
import random
import time
import math

import os

def Zacatek():
    ascii_art1 = """ .----------------.  .----------------.  .----------------.  .----------------. 
| .--------------. || .--------------. || .--------------. || .--------------. |
| |    _______   | || |     ____     | || | ____    ____ | || | ____   ____  | |
| |   /  ___  |  | || |   .'    `.   | || ||_   \  /   _|| || ||_  _| |_  _| | |
| |  |  (__ \_|  | || |  /  .--.  \  | || |  |   \/   |  | || |  \ \   / /   | |
| |   '.___`-.   | || |  | |    | |  | || |  | |\  /| |  | || |   \ \ / /    | |
| |  |`\____) |  | || |  \  `--'  /  | || | _| |_\/_| |_ | || |    \ ' /     | |
| |  |_______.'  | || |   `.____.'   | || ||_____||_____|| || |     \_/      | |
| |              | || |              | || |              | || |              | |
| '--------------' || '--------------' || '--------------' || '--------------' |
 '----------------'  '----------------'  '----------------'  '----------------' """
    print(ascii_art1)
    ascii_art2 = """      _       _          _       ____        _          
     | | __ _| | ___   _| |__   / ___|  ___ | |__  _ __ 
  _  | |/ _` | |/ / | | | '_ \  \___ \ / _ \| '_ \| '__|
 | |_| | (_| |   <| |_| | |_) |  ___) | (_) | |_) | |   
  \___/ \__,_|_|\_\\__,_|_.__/  |____/ \___/|_.__/|_|   
                                                        """
    print(ascii_art2)

    print("Vítejte u programu SOMV, který jsem vytvořil jako malý projekt do fyziky když jsem se nudil. SOMV stojí pro Směrodatná odchylka měření veličiny"+"\n"+"Program je určen jako pomůcka k měření. Program vám vypočítá SOMV a i Relativní odchylku  aby jste věděli jak přesně jste měřili"+"\n"+"Program vytvoří i graf s vámi danými hodnotami"+"\n"+"Je to vše naprogramované přes python, jelikož to je jediný co umím vcelku, max základy html css a javascript."+"\n"+"\n"+"NÁVOD k použití:"+"\n"+"Program se vás bude ptát na měřené hodnoty a vy je bude zadávat ve STEJNÝCH jednotkách(v nové verzi možná vylepším)"+"\n"+"Jakmile zadáte všechny hodnoty co chcete tak program sám řekne co dál."+"\n"+"Autorem je Jakub Šobr Videnska 1.B, update ohlášeny předem na kubasobr.blogspot.cz")


    print("")
    print("START PROGRAMU")
    random_cislo = random.randint(0,100)
    start_otazka = int(input("Napište číslo "+str(random_cislo) +" pro start programu: "))
    if start_otazka == random_cislo:
        Informace_funkce()
    else:
        print("")
        print("OZNÁMENÍ")
        print("Bud jste zadali špatné číslo a nebo nastalo v programu k chybě")
        print("Za pět sekund bude zapnuta začáteční funkce znovu.")
        print("")
        time.sleep(5)
        Zacatek()

def Informace_funkce():
    print("")
    print("PROGRAM STARTUJE")
    otazka_pocet_hodnot = int(input("INFORMACE 1 - Kolikrát jste provedli měření?: "))
    otazka_jednotka = input("INFORMACE 2 - Zadejte jednotku v níž jste prováděli měření (cm,dm,m,litr apod): ")
    tts = input("INFORMACE 3 - Chcete aby program výsledek zazněl jako text to speech? ano / ne: ")


    def Hlavni_funkce(pocet_hodnot,jednotka):
        print("")
        aktualni_cislo = 1
        hodnoty_list = []
        index = 0
        celkove = 0

        for _ in range(pocet_hodnot):
            hodnota = float(input("Zadejte hodnotu č."+str(aktualni_cislo)+" : "))
            celkove += hodnota
            hodnoty_list.insert(index, hodnota)
            index +=1
            aktualni_cislo +=1


        ar_prumer = celkove/pocet_hodnot
        def Casti(aritmeticky_prumer):
            index_cteni  = 0
            soucet_casti = 0
            for hodnota in hodnoty_list:
                cast = (hodnota - aritmeticky_prumer) ** 2
                soucet_casti += cast
                index_cteni += 1
            # Odmocní celkový součet
            citatel = soucet_casti ** 0.5
            jmenovatel = pocet_hodnot *(pocet_hodnot-1)

            odchylka = citatel /jmenovatel
            minus_interval = aritmeticky_prumer-odchylka
            plus_interval = aritmeticky_prumer+odchylka
            relativni_odchylka = odchylka/aritmeticky_prumer

            # Rounding
            round_odchylka = round(odchylka,3)
            round_minus_interval = round(minus_interval,3)
            round_plus_interval = round(plus_interval, 3)
            round_relativni_odchylka = round(relativni_odchylka, 3)

            # Uprava pro tts
            uprava_round_odchylka = str(round_odchylka).replace('.', ',')
            uprava_round_minus_interval = str(round_minus_interval).replace('.', ',')
            uprava_round_plus_interval = str(round_plus_interval).replace('.', ',')
            uprava_round_relativni_odchylka = str(round_relativni_odchylka).replace('.', ',')
            if odchylka > 0.5:
                text = ("Systém odhalil nepřesné měření! Odchylka je +- " + str(round_odchylka) + str(
                    jednotka) + " Reálná hodnota je někde mezi " + str(round_minus_interval) + " a " + str(
                    round_plus_interval) + ". Relativní odchylka je " + str(
                    round_relativni_odchylka) + " Děkujeme za použití programu.")
                presne_hodnoty = ("Odchylka = "+str(odchylka)+"\n"+"Interval = od "+str(minus_interval)+" po "+str(plus_interval)+"\n"+"Relativní odchylka = "+str(relativni_odchylka))
                print(text)
                presne_ot = int(input("Chcete vidět přesné hodnoty(1) nebo stačí obecnější?(2): "))
                if presne_ot == 1:
                    print(presne_hodnoty)

            else:
                text = ("Měření je v pořádku! Odchylka je +- " + str(round_odchylka) + str(
                    jednotka) + " Reálná hodnota je někde mezi " + str(round_minus_interval) + " a " + str(
                    round_plus_interval) + ". Relativní odchylka je " + str(
                    round_relativni_odchylka) + " Děkujeme za použití programu.")
                presne_hodnoty = ("Odchylka = "+str(odchylka)+"\n"+"Interval = od "+str(minus_interval)+" po "+str(plus_interval)+"\n"+"Relativní odchylka = "+str(relativni_odchylka))
                print(text)
                presne_ot = int(input("Chcete vidět přesné hodnoty(1) nebo stačí obecnější?(2): "))
                if presne_ot == 1:
                    print(presne_hodnoty)


            def Tts():
                # User Inteface
                if odchylka > 1:
                    text = ("Systém odhalil nepřesné měření! Odchylka je +- " + str(uprava_round_odchylka) + str(
                        jednotka) + " Reálná hodnota je někde mezi " + str(uprava_round_minus_interval) + " a " + str(
                        uprava_round_plus_interval) + ". Relativní odchylka je " + str(
                        uprava_round_relativni_odchylka) + " Děkujeme za použití programu.")
                    print(text)
                else:
                    text = ("Měření je v pořádku! Odchylka je +- " + str(uprava_round_odchylka) + str(
                        jednotka) + " Reálná hodnota je někde mezi " + str(uprava_round_minus_interval) + " a " + str(
                        uprava_round_plus_interval) + ". Relativní odchylka je " + str(
                        uprava_round_relativni_odchylka) + " Děkujeme za použití programu.")
                    print(text)

                from gtts import gTTS
                import os

                # Text, který chcete převést na hlas
                if odchylka > 1:
                    text = "Systém odhalil nepřesné měření! Odchylka je +-" + str(round_odchylka) + " " + str(
                        jednotka) + "Reálná hodnota je někde mezi " + str(round_minus_interval) + " a " + str(
                        round_plus_interval) + ". Relativní odchylka je " + str(
                        round_relativni_odchylka) + "Děkujeme za použití programu."
                else:
                    text = "Měření je v pořádku! Odchylka je +-" + str(round_odchylka) + " " + str(
                        jednotka) + "Reálná hodnota je někde mezi " + str(round_minus_interval) + " a " + str(
                        round_plus_interval) + ". Relativní odchylka je " + str(
                        round_relativni_odchylka) + "Děkujeme za použití programu."

                # Vytvoření instance gTTS s textem a nastavením jazyka na češtinu
                tts = gTTS(text, lang='cs')

                # Uložení hlasového souboru na disk
                tts.save("nahravka.mp3")

                # Přehrání hlasového souboru (vyžaduje externí přehrávač)
                os.system("nahravka.mp3")
            def graf_menu():
                text = """                   __ 
   __ _ _ __ __ _ / _|
  / _` | '__/ _` | |_ 
 | (_| | | | (_| |  _|
  \__, |_|  \__,_|_|  
  |___/               """
                print(text)
                otazka = input("Chcete zobrazit hodnoty ve grafu? ano / ne: ")
                if otazka == "ano":
                    graf()
                else:
                    print("Děkujeme za použití programu. Program se vypne za 10 sekund")
                    time.sleep(10)
            def graf():
                import matplotlib.pyplot as plt

                import matplotlib.pyplot as plt
                import numpy as np

                #hodnoty_list = [15, 18, 12, 25, 14]
                jednotka = "cm"
                poradi = list(range(1, pocet_hodnot + 1))  # Od 1 do pocet_hodnot

                # Příprava dat
                x = poradi
                y = hodnoty_list

                # Vytvoření čárového grafu
                plt.plot(x, y, label='Vámi změřené hodnoty', color='b')

                # Přidání první horizontální čáry na hodnotu 16 s červenou barvou
                plt.axhline(y=minus_interval, color='r', linestyle='-.', label='Mínusový interval')

                # Přidání druhé horizontální čáry na hodnotu 22 s červenou barvou
                plt.axhline(y=plus_interval, color='r', linestyle='-.', label='Plusový interval')

                # Přidání tučné zelené čáry na hodnotu 20
                plt.axhline(y=aritmeticky_prumer, color='g', linestyle='--', linewidth=3, label='Aritmetický průměr')

                # Vyplnění prostoru mezi červenými čarami
                plt.fill_between(x, minus_interval, plus_interval, color='yellow', alpha=0.5, label='Interval odchylky')

                # Nastavení popisků os
                plt.ylabel('Hodnoty v ' + jednotka)
                plt.xlabel('Pořadí')

                # Zobrazení legendy
                plt.legend()

                # Zobrazení grafu
                plt.show()

            if tts == "ano":
                Tts()
                graf_menu()
            else:
                graf_menu()

        Casti(ar_prumer)







    Hlavni_funkce(otazka_pocet_hodnot,otazka_jednotka)

Zacatek()
# made by jkob sobr 23 original
!pip install rasterstats
!pip install rasterio
!pip install leafmap

import glob
import pandas as pd
import rasterio
from rasterio.mask import mask
import geopandas as gps
from rasterstats import zonal_stats

from google.colab import drive
drive.mount('/content/drive')

files = glob.glob(f"/content/drive/MyDrive/Sura/Datos TMax/2011_15/SAMeT_CPTEC_TMAX_2013*.nc")
print(len(files))

grid_str = '/content/drive/MyDrive/Sura/Seccionales_Policiales_WGS84_Selected.shp'
poligonos = gps.read_file(grid_str)

df = pd.DataFrame([], columns=['fecha', 'median', 'CODIGOSECC'])

for file in files:

    with rasterio.open('netcdf:'+ file + ':tmax') as msla_nc:
        msla_nc_data = msla_nc.read(1)

    affine= msla_nc.transform

    print(file)
    for index, row in poligonos.iterrows():
        features = row.geometry

        zs = zonal_stats(features, msla_nc_data,affine=affine,all_touched= False,
                    stats="median")

        df = df.append(pd.DataFrame([[zs[0].get('median'), str(file)[-11:-3],row['CODIGOSECC']]],
                    columns=['median', 'fecha','CODIGOSECC']))

        print(row['CODIGOSECC'])

df.to_csv('/content/drive/MyDrive/Sura/Tmax2013.csv',header=True, index=False)


import glob
import pandas as pd

df = pd.DataFrame([], columns=['fecha', 'median', 'CODIGOSECC'])

files = glob.glob(f"/content/drive/MyDrive/Sura/Datos TMax/Tmax2*.csv")
print(len(files))
for file in files:
  df = df.append(pd.read_csv(file))
#print(df)
df['fecha'] = df['fecha'].astype(str)
df['year'] = df.fecha.str[:4]
df['month'] = df.fecha.str[4:6]
df['day'] = df.fecha.str[-2:]
#display(df.groupby('year').count())

df.to_csv('/content/drive/MyDrive/Sura/TmaxCompleto.csv',header=True, index=False)
import os
import requests
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import urljoin

base_url = 'http://ftp.cptec.inpe.br/'

baseDir = 'modelos/tempo/SAMeT/DAILY/TMAX/'

outputDir = '/Users/agroclimate/Documents/PriceFrobes/Uruguay/Sura/CPTEC/Faltantes/'

years = list(range(2022, 2023))
months = list(range(1,4))+list(range(10, 13))
#months = list(range(10, 13))
for year in years:

    for month in months:
        if month < 10:
            ftp_path = base_url + baseDir + str(year) + '/0' + str(month) + '/'
        elif month >= 10:
            ftp_path = base_url + baseDir + str(year) + '/' + str(month) + '/'
        
        # List folder content
        response = requests.get(ftp_path)
        
        soup = BeautifulSoup(response.content, 'html.parser')
        # Find all the 'a' tags (hyperlinks) on the page
        links = soup.find_all('a')

        # Extract filenames from the href attribute of 'a' tags
        files = [link['href'] for link in links if link.has_attr('href')]

        # Filter out directories and other unwanted links
        files = [filename for filename in files if filename.endswith('.nc')]
    
        for file in files:
            try:
                file_url = urljoin(ftp_path, file)

                to_download = (outputDir + file)

                # Download the file
                urllib.request.urlretrieve(file_url, to_download)

                print(f"Downloaded: {file}")

            except:
                print("Error")
SLP = SLP.assign_coords(longitude=(((SLP.longitude + 180) % 360) -180))
Z = Z.assign_coords(longitude=(((Z.longitude + 180) % 360) -180))
SLP = SLP.sortby(SLP['longitude'])
Z = Z.sortby(Z['longitude'])
.\env\Scripts\activate
find_item = input("Enter the item to be searched here:- ").lower()

items = {'wand': 5, 'telescope': 2, 'feather pen': 7}

for thing in items:
    if thing == find_item:
        print(f"{items[thing]} units of {thing} are present.")
        break
else:
    print('No stock left! You need to go and buy!!')
numbers = [51, 32, 63, 74, 45, 36, 47]

for i in numbers:
   pass
numbers = [51, 32, 63, 74, 45, 36, 47]

for num in numbers:
    if num % 2 == 0:
        continue
    else:
        print(num)
numbers = [51, 32, 63, 74, 45, 36, 47]

for num in numbers:
    if num % 5 == 0:
        print(num)
        break
wizard_list = ["Wand", "Cauldron", "crystal phials", \
        "telescope", "brass scales", "Owl", \
        "Cat", "Toad", \
        "Feather"]

for items in wizard_list:
    print(items)
wizard_list = ["Wand", "Cauldron", "crystal phials", \
        "telescope", "brass scales", "Owl", \
        "Cat", "Toad", \
        "Feather pen"]

print(wizard_list[0])
print(wizard_list[1])
print(wizard_list[2])
print(wizard_list[3])
print(wizard_list[4])
print(wizard_list[5])
print(wizard_list[6])
print(wizard_list[7])
print(wizard_list[8])
 _____________________
|  _________________  |
| | Pythonista   0. | |  .----------------.  .----------------.  .----------------.  .----------------. 
| |_________________| | | .--------------. || .--------------. || .--------------. || .--------------. |
|  ___ ___ ___   ___  | | |     ______   | || |      __      | || |   _____      | || |     ______   | |
| | 7 | 8 | 9 | | + | | | |   .' ___  |  | || |     /  \     | || |  |_   _|     | || |   .' ___  |  | |
| |___|___|___| |___| | | |  / .'   \_|  | || |    / /\ \    | || |    | |       | || |  / .'   \_|  | |
| | 4 | 5 | 6 | | - | | | |  | |         | || |   / ____ \   | || |    | |   _   | || |  | |         | |
| |___|___|___| |___| | | |  \ `.___.'\  | || | _/ /    \ \_ | || |   _| |__/ |  | || |  \ `.___.'\  | |
| | 1 | 2 | 3 | | x | | | |   `._____.'  | || ||____|  |____|| || |  |________|  | || |   `._____.'  | |
| |___|___|___| |___| | | |              | || |              | || |              | || |              | |
| | . | 0 | = | | / | | | '--------------' || '--------------' || '--------------' || '--------------' |
| |___|___|___| |___| |  '----------------'  '----------------'  '----------------'  '----------------' 
|_____________________|
                           Welcome to pythonista Calculator
                  We support the following operations on two numbers:
                  - subraction
                  + addition
                  * multiplication
                  / division
                  ** exponention
                  % modular division

Enter 1st number:- 13
Enter 2nd number:- 2 
Enter the operator here:- *
13 * 2 = 26
calc = """
 _____________________
|  _________________  |
| | Pythonista   0. | |  .----------------.  .----------------.  .----------------.  .----------------. 
| |_________________| | | .--------------. || .--------------. || .--------------. || .--------------. |
|  ___ ___ ___   ___  | | |     ______   | || |      __      | || |   _____      | || |     ______   | |
| | 7 | 8 | 9 | | + | | | |   .' ___  |  | || |     /  \     | || |  |_   _|     | || |   .' ___  |  | |
| |___|___|___| |___| | | |  / .'   \_|  | || |    / /\ \    | || |    | |       | || |  / .'   \_|  | |
| | 4 | 5 | 6 | | - | | | |  | |         | || |   / ____ \   | || |    | |   _   | || |  | |         | |
| |___|___|___| |___| | | |  \ `.___.'\  | || | _/ /    \ \_ | || |   _| |__/ |  | || |  \ `.___.'\  | |
| | 1 | 2 | 3 | | x | | | |   `._____.'  | || ||____|  |____|| || |  |________|  | || |   `._____.'  | |
| |___|___|___| |___| | | |              | || |              | || |              | || |              | |
| | . | 0 | = | | / | | | '--------------' || '--------------' || '--------------' || '--------------' |
| |___|___|___| |___| |  '----------------'  '----------------'  '----------------'  '----------------' 
|_____________________|
                           Welcome to pythonista Calculator
                  We support the following operations on two numbers:
                  - subraction
                  + addition
                  * multiplication
                  / division
                  ** exponention
                  % modular division
"""
print(calc)
first_number = int(input("Enter 1st number:- "))
second_number = int(input("Enter 2nd number:- "))
operator_ = input("Enter the operator here:- ")

if operator_ == "-":
    result = first_number - second_number
    print(f"{first_number} {operator_} {second_number} = {result}")

elif operator_ == "+":
    result = first_number + second_number
    print(f"{first_number} {operator_} {second_number} = {result}")

elif operator_ == "*":
    result = first_number * second_number
    print(f"{first_number} {operator_} {second_number} = {result}")

elif operator_ == "/":
    result = first_number / second_number
    print(f"{first_number} {operator_} {second_number} = {result}")

elif operator_ == "**":
    result = first_number ** second_number
    print(f"{first_number} {operator_} {second_number} = {result}")

elif operator_ == "%":
    result = first_number % second_number
    print(f"{first_number} {operator_} {second_number} = {result}")

else:
    print("You entered an invalid operator!!")
a = int(input("Enter value of a:-\n"))
b = int(input("Enter value of b:-\n"))
print("a is greater than b") if a > b else print("b is greater than a")
num = -19

if num>=0:
    if type(num)==float:
        print(f"{num} is a positive floating-point number.")
    elif type(num)==int:
        print(f"{num} is a positive integer.")

else:
    if type(num)==float:
        print(f"{num} is a negative floating-point number.")
    elif type(num)==int:
        print(f"{num} is a negative integer.")
age = 19

if age==18:
    print("You're 18 go through the next check to know whether you qualify for license or not.")
if age > 18:
    print("You are above 18 you do qualify age criteria for license.")
else:
    print(f"You are not yet 18 come after {18-age} years to qualify the age criteria.")
age = 19

if age==18:
    print("You're 18 go through the next check to know whether you qualify for license or not.")
elif age > 18:
    print("You are above 18 you do qualify age criteria for license.")
else:
    print(f"You are not yet 18 come after {18-age} years to qualify the age criteria.")
num = -19

if num>=0:
    print(f"{num} is a positive number.")
else:
    print(f"{num} is a negative number.")

print("This will always be printed")
Welcome to the fun world ladies and gentleman


⠀⠀⠀⠀⠀⠀⠀⣠⡔⠚⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠑⠢⢄⠀⢀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⢀⠔⠋⠻⢿⣷⣦⣤⣀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣽⣧⣷⠀⠀⠀⠀
⠀⠀⢀⠴⠁⠀⠀⠀⠀⢈⣹⣿⣿⣷⠀⠀⠀⠀⠀⢾⣿⣿⣟⠋⠁⠈⠳⡀⠀⠀
⠀⢠⠊⣀⣤⠴⠶⠖⢛⠻⣿⣿⣭⣀⣀⠀⠀⠀⠀⠀⠉⠻⡿⠳⠖⠶⠶⠟⠓⠲
⣴⢣⡞⡉⢄⠒⡈⠜⡀⢾⣏⠉⠙⠛⠻⠿⢿⣿⣶⣶⣿⠿⢷⡡⢘⡐⢢⠘⡄⢂
⣿⡏⡐⢌⠢⠑⡌⠂⢥⣿⣿⣦⣤⣀⣀⠀⠀⠀⠀⠀⠀⣀⣼⣷⠠⠘⡄⠣⡐⠂..
⡟⣷⠀⢆⠢⡑⣈⢡⡾⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡷⣅⠰⢁...⡡
⡇⠈⠻⣦⣴⣤⠶⠋⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠈⠉⠓⠒⢪
⡇⠀⠀⠀⠀⠀⠀⠀⠀⠈⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡃⠀⠀⠀⠀⢸
⣇⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⢸
⠘⡄⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⢀⠏
⠀⠙⣄⠀⠀⠀⠀⠀⠀⠀⠀⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠇⠀⠀⠀⣠⠏⠀
⠀⠀⠈⠢⡀⠀⠀⠀⠀⠀⠀⠀⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⠏⠀⠀⢀⡴⠁⠀⠀
⠀⠀⠀⠀⠈⠲⢄⠀⠀⠀⠀⠀⠀⠉⠿⣿⣿⣿⣿⣿⠿⠋⠀⣠⠔⠋⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠙⠒⠤⣤⣀⣀⣀⣀⣀⣈⣉⣉⣠⠤⠖⠏⠁

Enter any of the words below and press enter to know its meaning in the fun world
['Bumfuzzle', 'Everywhen', 'Erf', 'Obelus', 'Bumbershoot']

Enter the word here:- Obelus
searching...

The meaning of 'Obelus' in our world is:-  the symbol used for division in a math problem
print("\nWelcome to the fun world ladies and gentleman\n")
print("""
⠀⠀⠀⠀⠀⠀⠀⣠⡔⠚⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠑⠢⢄⠀⢀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⢀⠔⠋⠻⢿⣷⣦⣤⣀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣽⣧⣷⠀⠀⠀⠀
⠀⠀⢀⠴⠁⠀⠀⠀⠀⢈⣹⣿⣿⣷⠀⠀⠀⠀⠀⢾⣿⣿⣟⠋⠁⠈⠳⡀⠀⠀
⠀⢠⠊⣀⣤⠴⠶⠖⢛⠻⣿⣿⣭⣀⣀⠀⠀⠀⠀⠀⠉⠻⡿⠳⠖⠶⠶⠟⠓⠲
⣴⢣⡞⡉⢄⠒⡈⠜⡀⢾⣏⠉⠙⠛⠻⠿⢿⣿⣶⣶⣿⠿⢷⡡⢘⡐⢢⠘⡄⢂
⣿⡏⡐⢌⠢⠑⡌⠂⢥⣿⣿⣦⣤⣀⣀⠀⠀⠀⠀⠀⠀⣀⣼⣷⠠⠘⡄⠣⡐⠂..
⡟⣷⠀⢆⠢⡑⣈⢡⡾⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡷⣅⠰⢁...⡡
⡇⠈⠻⣦⣴⣤⠶⠋⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠈⠉⠓⠒⢪
⡇⠀⠀⠀⠀⠀⠀⠀⠀⠈⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡃⠀⠀⠀⠀⢸
⣇⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⢸
⠘⡄⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⢀⠏
⠀⠙⣄⠀⠀⠀⠀⠀⠀⠀⠀⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠇⠀⠀⠀⣠⠏⠀
⠀⠀⠈⠢⡀⠀⠀⠀⠀⠀⠀⠀⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⠏⠀⠀⢀⡴⠁⠀⠀
⠀⠀⠀⠀⠈⠲⢄⠀⠀⠀⠀⠀⠀⠉⠿⣿⣿⣿⣿⣿⠿⠋⠀⣠⠔⠋⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠙⠒⠤⣤⣀⣀⣀⣀⣀⣈⣉⣉⣠⠤⠖⠏⠁
""")

fun_world_dictionary = {'Bumfuzzle': "confused",
                        'Everywhen': "all the time",
                        'Erf': "plot of land",
                        'Obelus': " the symbol used for division in a math problem",
                        'Bumbershoot': "umbrella"}

# I know that it's not quiet funnny(ok not at all funny). But, please forgive me for that

print("Enter any of the words below and press enter to know its meaning in the fun world")
list_of_words = list(fun_world_dictionary.keys())
print(list_of_words, "\n")


word_to_be_searched = input("Enter the word here:- ")
print("searching...\n")
meaning =""

if word_to_be_searched=="Bumfuzzle":
    meaning= fun_world_dictionary['Bumfuzzle']

if word_to_be_searched=="Everywhen":
    meaning= fun_world_dictionary['Everywhen']

if word_to_be_searched=="Erf":
    meaning= fun_world_dictionary['Erf']

if word_to_be_searched=="Obelus":
    meaning= fun_world_dictionary['Obelus']

if word_to_be_searched=="Bumbershoot":
    meaning= fun_world_dictionary['Bumbershoot']


print(f"The meaning of '{word_to_be_searched}' in our world is:- {meaning}\n")
$ uvicorn main:app --reload --port 8000