Snippets Collections
from pandas_profiling import ProfileReport
profile = ProfileReport(df, title="Pandas Profiling Report")
rofile.to_file("your_report.html")
def octal_to_string(octal):
    result = ""
    value_letters = [(4,"r"),(2,"w"),(1,"x")]

    for i in [int(n) for n in str(octal)]:

        for value, letter in value_letters:
            
            if i >= value:
                result += letter
                i -= value

            else:

                result += "-"

    return result
    
print(octal_to_string(755)) # Should be rwxr-xr-x
print(octal_to_string(644)) # Should be rw-r--r--
print(octal_to_string(750)) # Should be rwxr-x---
print(octal_to_string(600)) # Should be rw-------
def faktorial(N):
    i=1
    fakt=1
    while i!=N+1:
        fakt = fakt*i        
        i += 1
    return fakt

print(faktorial(5))
def faktorial(N):
    i=1
    fakt=1
    while i!=N+1:
        fakt = fakt*i        
        i += 1
    return fakt

print(faktorial(5))
def getLargest(a,b,c):
    if a>b:
        if a>c:
            return a
        else: 
            return c
    else:
        if b>c:
            return b
        else:
            return c
def addNums(a,b):
    summa = a + b
    return summa
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px

import pandas as pd

df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')

app = dash.Dash(__name__)

app.layout = html.Div([
    dcc.Graph(id='graph-with-slider'),
    dcc.Slider(
        id='year-slider',
        min=df['year'].min(),
        max=df['year'].max(),
        value=df['year'].min(),
        marks={str(year): str(year) for year in df['year'].unique()},
        step=None
    )
])


@app.callback(
    Output('graph-with-slider', 'figure'),
    Input('year-slider', 'value'))
def update_figure(selected_year):
    filtered_df = df[df.year == selected_year]

    fig = px.scatter(filtered_df, x="gdpPercap", y="lifeExp",
                     size="pop", color="continent", hover_name="country",
                     log_x=True, size_max=55)

    fig.update_layout(transition_duration=500)

    return fig


if __name__ == '__main__':
    app.run_server(debug=True)
import pandas as pd

# Dictionary of string and int
char_dict = {
    'C': 56,
    "A": 23,
    'D': 43,
    'E': 78,
    'B': 11
}

# Convert a dictionary to a Pandas Series object.
# dict keys will be index of Series &
# values of dict will become values in Series.
series_obj = pd.Series(char_dict)

print('Contents of Pandas Series: ')
print(series_obj)
import numpy as np

arr = np.array([1, 2, 3, 4, 5, 6])

newarr = np.array_split(arr, 3)

print(newarr)

###
arr2 = np.array([1, 2, 3, 4, 5, 6])

newarr = np.array_split(ar2r, 4)

print(newarr)
gcp_key='../gcp_key.json'
fs = gcsfs.GCSFileSystem(project=gcp_project, access="read", token=gcp_key)
df_ext = pd.read_csv(file_name,storage_options={"token": gcp_key})
def apply_bq_naming_requirements(dataframe: pd.DataFrame) -> pd.DataFrame:
    """
    Removes accents.
    Replaces spaces and `-` with `_`.
    Lowercase.

    Args:
      dataframe: Dataframe to be formatted.

    Returns:
      Pandas dataframe with columns changed.
    """

    def strip_accents(s):
        return ''.join(c for c in unicodedata.normalize('NFD', s)
                       if unicodedata.category(c) != 'Mn')

    fixed_columns = [column.replace("-", "_") for column in dataframe.columns.tolist()]
    fixed_columns = [column.replace(" ", "_") for column in fixed_columns]
    fixed_columns = [strip_accents(column) for column in fixed_columns]
    fixed_columns = [column.lower() for column in fixed_columns]
    dataframe.columns = fixed_columns
    return dataframe
import unicodedata

def strip_accents(s):
    return ''.join(c for c in unicodedata.normalize('NFD', s)
                   if unicodedata.category(c) != 'Mn')
pd.read_excel('path/to/file.xlsx', engine='openpyxl')
conda create --name kedro-environment python=3.7 -y
import pandas as pd

df = pd.DataFrame([('Foreign Cinema', 'Restaurant', 289.0),
                   ('Liho Liho', 'Restaurant', 224.0),
                   ('500 Club', 'bar', 80.5),
                   ('The Square', 'bar', 25.30)],
           columns=('name', 'type', 'AvgBill')
                 )

#Now let's remove the "type" column from our dataset. We set our axis=1 to specify we are dropping #columns
df_drop_column = df.copy()
df_drop_column.drop("type", axis=1)
from google.cloud import bigquery

# Construct a BigQuery client object.
client = bigquery.Client()

datasets = list(client.list_datasets())  # Make an API request.
project = client.project

if datasets:
    print("Datasets in project {}:".format(project))
    for dataset in datasets:
        print("\t{}".format(dataset.dataset_id))
else:
    print("{} project does not contain any datasets.".format(project))
monthly_sales = pd.merge(prediction_df[['category', 'distributorid', 'segment']], monthly_sales, on = ['category', 'distributorid'])
df = pd.DataFrame({'A': [-3, 7, 4, 0], 'B': [-6, -1, 2, -8], 'C': [1, 2, 3, 4]})

#it goes through column A, selects where it's negative & replaces with 2, or if it's not negative it puts in the values from column C
df.A = np.where(df.A < 0, 2, df.C)


#it goes through column A, selects where it's negative & replaces with 2, or if it's not negative it leaves it as is
df.A = np.where(df.A < 0, 2, df.A)
xdata = (np.array(ret_rates.time_since_acqm)).reshape(-1,1)
#xdata.reshape(-1,1)
plt.plot(xdata, ret_rates.value_retention)
model = LinearRegression().fit(xdata, ret_rates.value_retention)
y_pred = model.predict(xdata)
plt.plot(xdata, y_pred)

##be sure to reshape the x value so you can get the ypred against it
def get_gcp_project_name():
    """
    ()-->str
    Get the project_id from GOOGLE_APPLICATION_CREDENTIALS (if running locally) or metadata.google.internal (if running on GCF).

    :return bucket_name: str
    """
    # if this is running locally then GOOGLE_APPLICATION_CREDENTIALS should be defined
    if os.getenv('GOOGLE_APPLICATION_CREDENTIALS'):
        with open(os.environ['GOOGLE_APPLICATION_CREDENTIALS'], 'r') as fp:
            credentials = json.load(fp)
        gcp_project = credentials['project_id']
    # If this is running in a cloud function, then get gcp_project from url
    else:
        metadata_server = "http://metadata.google.internal/computeMetadata/v1/project/project-id"
        metadata_flavor = {'Metadata-Flavor' : 'Google'}
        gcp_project = requests.get(metadata_server, headers = metadata_flavor).text
    return gcp_project
from xlsx2csv import Xlsx2csv
from io import StringIO
import pandas as pd


def read_excel(path: str, sheet_name: str) -> pd.DataFrame:
    buffer = StringIO()
    Xlsx2csv(path, outputencoding="utf-8", sheet_name=sheet_name).convert(buffer)
    buffer.seek(0)
    df = pd.read_csv(buffer)
    return df
colors = ['red', 'green', 'blue']
for idx, color in enumerate(colors):
	print(idx, color)
> 0 red
> 1 green
> 2 blue
JS

favMovies.forEach(function(favMovie) {
  console.log('I really like the movie', favMovie)
})
JavaScript

for (let i=0; i < arrayName.length; i++){
  console.log(arrayName[i])
}

i.e.):

movies = ['legally blonde', 'Ms. & Ms. Smith', 'The Incredibles'];

for (let i=0; i < movies.length; i++){
  console.log(movies[i])
}

Python

for singular_array_name in array_name_pluralized:
	print(singular_array_name)

i.e.):

movies = ['legally blonde', 'Ms. & Ms. Smith', 'The Incredibles']

for movie in movies:
	print(movie)
import sklearn.datasets as datasets
X, y = datasets.make_regression(n_features=1, n_informative=1)

diabetes = datasets.load_diabetes()
X_diabets, y_diabetes = diabetes.data, diabetes.target
# correct approach for normalizing the data after the data is split before the model is evaluated
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# define dataset
X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=7)
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# define the scaler
scaler = MinMaxScaler()
# fit on the training dataset
scaler.fit(X_train)
# scale the training dataset
X_train = scaler.transform(X_train)
# scale the test dataset
X_test = scaler.transform(X_test)
# fit the model
model = LogisticRegression()
model.fit(X_train, y_train)
# evaluate the model
yhat = model.predict(X_test)
# evaluate predictions
accuracy = accuracy_score(y_test, yhat)
print('Accuracy: %.3f' % (accuracy*100))
...
# define the scaler
scaler = MinMaxScaler()
# fit on the training dataset
scaler.fit(X_train)
# scale the training dataset
X_train = scaler.transform(X_train)
# scale the test dataset
X_test = scaler.transform(X_test)
import apiritif
from apiritif import http

def testRandomDogImage():
    with apiritif.transaction('Random'):
        response = http.get("https://dog.ceo/api/breeds/image/random")
        response.assert_ok()
        response.assert_header_value('Content-Type', 'application/json')
        response.assert_has_header('X-Cache')
        response.assert_in_body('"success"')
        response.assert_jsonpath('status', 'success')
# (1) Creare cartella di progetto
# (2) Posizionarsi nella cartella 
# (3) 
C:<Path fino alla cartella>>python -m venv v_env
from datetime import datetime, timedelta

nine_hours_from_now = datetime.now() + timedelta(hours=9)
#datetime.datetime(2012, 12, 3, 23, 24, 31, 774118)
def power(base, exp):

   if (exp == 1):
      return (base)
   if (exp != 1):
      return (base * power(base, exp - 1))

   base = int(input("Enter base: "))
   exp = int(input("Enter exponential value: "))

   print("Result:", power(base, exp))
def new_decile_table(y_true, y_pred, y_prob, change_deciles=10, labels=True, round_decimal=3):
""""""
y_true = np.array(y_true)
y_pred = np.array(y_pred)
y_prob = np.array(y_prob)

df = pd.DataFrame()
df['y_true'] = y_true
df['y_pred'] = y_pred
df['y_prob'] = y_prob

df.sort_values('y_prob', ascending=False, inplace=True)
df['decile'] = np.linspace(1, change_deciles+1, len(df), False, dtype=int)

dt = df.groupby('decile').apply(lambda x: pd.Series([
np.min(x['y_prob']),
np.max(x['y_prob']),
np.mean(x['y_prob']),
np.size(x['y_prob']),
np.sum(x['y_true']),
np.size(x['y_true'][x['y_true']==0]),
np.size(x['y_true'][(x['y_true']==1) & (x['y_pred']==1)]), # True Positive
np.size(x['y_true'][(x['y_true']==1) & (x['y_pred']==0)]), # False Negative
np.size(x['y_true'][(x['y_true']==0) & (x['y_pred']==1)]), # False Positive
np.size(x['y_true'][(x['y_true']==0) & (x['y_pred']==0)]), # False Positive
],
index=(["prob_min",
"prob_max",
"prob_avg",
"count_istanze",
"actual_1",
"actual_0",
"true_positive",
"false_negative",
"false_positive",
"true_negative"])
)).reset_index()


dt['prob_min']=dt['prob_min'].round(round_decimal)
dt['prob_max']=dt['prob_max'].round(round_decimal)
dt['prob_avg']=round(dt['prob_avg'],round_decimal)

dt['precision_1'] = dt['true_positive'] / (dt['true_positive']+dt['false_positive'])
dt['recall_1'] = dt['true_positive'] / (dt['true_positive']+dt['false_negative'])
dt["f1_score"] = (dt['precision_1'] * dt['recall_1']) / (dt['precision_1'] + dt['recall_1'])

return dt
#loc for a label

txns1.loc[txns1.category == 'CUBA-portal']

#iloc for an index


https://www.shanelynn.ie/pandas-iloc-loc-select-rows-and-columns-dataframe/
if 4 <= day <= 20 or 24 <= day <= 30:
    suffix = "th"
else:
    suffix = ["st", "nd", "rd"][day % 10 - 1]
from google.colab import auth
auth.authenticate_user()

import gspread
from oauth2client.client import GoogleCredentials

gc = gspread.authorize(GoogleCredentials.get_application_default())

worksheet = gc.open('My cool spreadsheet').sheet1

# get_all_values gives a list of rows.
rows = worksheet.get_all_values()
print(rows)

import pandas as pd
pd.DataFrame.from_records(rows)
def create_sheet(sheet, df):

            # create rows generator, discarding index, header
            rows = dataframe_to_rows(df, index=False, header=False)

            # iterate throw row and col of df (specify starting coordinates of table to write)
            for r_idx, row in enumerate(rows, self.START_ROW):
                for c_idx, value in enumerate(row, self.START_COL):
                    sheet.cell(row=r_idx, column=c_idx, value=value)
import pandas as pd
from tqdm import tqdm

df1 = pd.DataFrame({'lkey': 1000*['a', 'b', 'c', 'd'],'lvalue': np.random.randint(0,int(1e8),4000)})
df2 = pd.DataFrame({'rkey': 1000*['a', 'b', 'c', 'd'],'rvalue': np.random.randint(0, int(1e8),4000)})

#this is how you activate the pandas features in tqdm
tqdm.pandas()
#call the progress_apply feature with a dummy lambda 
df1.merge(df2, left_on='lkey', right_on='rkey').progress_apply(lambda x: x)
monthly_sales['moving_avg_sales'] = monthly_sales.iloc[:,2].rolling(window=3).mean()

monthly_sales['moving'] = monthly_sales.groupby('category')['actual_sales'].transform(lambda x: x.rolling(10, 1).mean())
monthly_sales['moving1'] = monthly_sales.groupby(['category', 'distributorid'])['actual_sales'].transform(lambda x: x.rolling(3).mean())

pd.options.display.float_format = '{:.2f}'.format
df['year'] = df['insert_dataframe_here'].dt.year
df['month'] = df['insert_dataframe_here'].dt.month
##straight copy of the columns
seg_sales = txns1[['seg_met','ADV']].copy()


##copy of only unique values in the column
seg_sales1 = txns1[['seg_met','segment', 'distributionMethodSubType', 'ADV']].drop_duplicates()
create new column use transform: get the count of distributor_id for each seg

df.groupby(["seg_met"]).distributorid.transform("count")

just to get the counts use: 
df['distributorid'].groupby([df.seg_met]).agg(['count'])

#these do the same thing! 
pred_table.groupby('seg_met')['predicted_sales'].sum()
pred_table['predicted_sales'].groupby(pred_table.seg_met).sum()


# produces Pandas Series
data.groupby('month')['duration'].sum() 
# Produces Pandas DataFrame
data.groupby('month')[['duration']].sum()
def load_table_to_df(fp, **kwargs):

    if fp.endswith('.xls'):
        result_df = pd.read_excel(fp, **kwargs)
        
    elif fp.endswith('.xlsx'):
        result_df = pd.read_excel(fp, engine='openpyxl', **kwargs)
        
    elif fp.endswith('.csv') or fp.endswith('.txt'):
        result_df = pd.read_csv(fp, **kwargs)
        
    elif fp.endswith('.parquet'):
        result_df = pd.read_parquet(fp, **kwargs)
        
    else:
        return "Wrong file extension"

    print('     -->  Succesfully loaded {}'.format(fp))
    return result_df
del df['column name']
def fibonacci(n):
    # return a list of fibonacci numbers
    numbers = [0, 1]
    for i in range(2,n):
        numbers.append(numbers[i-1] + numbers[i-2])
    return numbers[0:n]
sentence = ['this','is','a','sentence']
sent_str = ""
for i in sentence:
    sent_str += str(i) + "-"
sent_str = sent_str[:-1]
print sent_str
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
class MyThing:
    def __init__(self,name,location,length):
        self.name = name
        self.location = location
        self.length = length

    def __hash__(self):
        return hash((self.name, self.location))

    def __eq__(self, other):
        return (self.name, self.location) == (other.name, other.location)

    def __ne__(self, other):
        # Not strictly necessary, but to avoid having both x==y and x!=y
        # True at the same time
        return not(self == other)
#Import the tkinter library
from tkinter import *

#Create an instance of tkinter frame
win = Tk()

#Set the geometry
win.geometry("600x250")

win.eval('tk::PlaceWindow . center')

win.mainloop()
from functools import partial
#(...)
action_with_arg = partial(action, arg)
button = Tk.Button(master=frame, text='press', command=action_with_arg)
#Import the required libraries
from tkinter import *

#Create an instance of Tkinter Frame
win = Tk()

#Set the geometry
win.geometry("700x350")

#Set the default color of the window
win.config(bg='#aad5df')

#Create a Label to display the text
label=Label(win, text= "Hello World!",font= ('Helvetica 18 bold'), background= 'white', foreground='purple1')
label.pack(pady = 50)

win.update()

#Return and print the width of label widget
width = label.winfo_width()
print("The width of the label is:", width, "pixels")

win.mainloop()
sudo apt-get install xclip xsel -y
import os
print(os.popen('xsel').read())



python get-selected.py
class Article(models.Model):
    # ...
    likers = ArrayField(
        models.GenericIPAddressField(), default=list, blank=True, null=False
    )

    @property
    def nb_likes(self) -> int:
        return len(self.likers)
for widget in frame.winfo_children():
    widget.destroy()
import tkinter as tk

LARGE_FONT= ("Verdana", 12)
HEIGHT = 768
WIDTH = 1366


class MainApp():
    def __init__(self, master):
        self.master = master
        self.master.title("Sales System") 
        self.master.geometry("%dx%d+0+0" % (WIDTH, HEIGHT)) 

        self.frames = {}

        start_page = StartPage(master)

        self.frames[StartPage] = start_page

        start_page.grid(row=0, column=0, sticky="nsew")
        self.master.grid_rowconfigure(0, weight=1)
        self.master.grid_columnconfigure(0, weight=1)

        self.show_frame(StartPage)

    def show_frame(self, cont):

        frame = self.frames[cont]
        frame.tkraise()


class base_frame(tk.Frame):
    def __init__(self, master, *args, **kwargs):
        tk.Frame.__init__(master, *args, **kwargs)

        b_image = tk.PhotoImage(file='background.png')
        b_label = tk.Label(self, image=b_image)
        b_label.image = b_image
        b_label.place(x=0, y=0, relwidth=1, relheight=1)

        topleft_label = tk.Label(self, bg='black', fg='white', text="Welcome - Login Screen", justify='left', anchor="w", font="Verdana 12")
        topleft_label.place(relwidth=0.5, relheight=0.05, relx=0.25, rely=0, anchor='n')

class StartPage(base_frame):

    def __init__(self, parent):
        super().__init__(self, parent)
        label = tk.Label(self, text="Start Page", font=LARGE_FONT)
        label.pack(pady=10,padx=10)

def main():
    root = tk.Tk() # MainApp()
    main_app = MainApp(root)
    root.mainloop()

if __name__ == '__main__':
    main()
from tkinter import  *

qw=Tk()
qw.overrideredirect(1) # will remove the top badge of window
qw.mainloop()
lins = pd.Series(lines, name='img_path')
train_df = pd.concat([train_df, lins], axis=1)
import sys
sys.path.append("path_to_folder")
# I have this list
mylist = [
    {'thing': 'A', 'count': 4},
    {'thing': 'B', 'count': 2},
    {'thing': 'A', 'count': 6}]

# And I want to make this one where duplicates are merged
newlist = [
    {'thing': 'A', 'count': 10},
    {'thing': 'B', 'count': 2}]
# Default inner join
df3 = pd.merge(df1, df2, left_index=True, right_index=True)
print (df3)
   a  b  c   d
a  0  5  0  10
b  1  3  1  20

# Default left join
df4 = df1.join(df2)
print (df4)
   a  b    c     d
a  0  5  0.0  10.0
b  1  3  1.0  20.0
c  2  6  NaN   NaN
d  3  9  NaN   NaN
e  4  2  NaN   NaN
f  5  4  NaN   NaN

# Default outer join
df5 = pd.concat([df1, df2], axis=1)
print (df5)
     a    b    c     d
a  0.0  5.0  0.0  10.0
b  1.0  3.0  1.0  20.0
c  2.0  6.0  NaN   NaN
d  3.0  9.0  NaN   NaN
e  4.0  2.0  NaN   NaN
f  5.0  4.0  NaN   NaN
h  NaN  NaN  2.0  30.0
i  NaN  NaN  3.0  40.0
import pdb
import traceback


old_format_exc = traceback.format_exc
def format_exc(*args, **kwargs):
    print("MENG WAS HERE")
    pdb.post_mortem()
    return old_format_exc(*args, **kwargs)

traceback.format_exc = format_exc
In [5]:

d.loc[(d['A'].isnull()) & (d.B == 't3'), 'A']='new_val'

d

Out[5]:

         A   B
0      NaN  t1
1       t2  t2
2  new_val  t3
3       t3  t4
4  new_val  t3

[5 rows x 2 columns]
csv_data_frame.to_sql(table_name, engine, if_exists='append', chunksize=1000)
sorted(unsorted, key=lambda element: (element[1], element[2]))
fig = plt.figure(figsize = (20,8))

for i, data in ret_rates.groupby('category'):
    plt.plot(data['time_since_acqm'], data['retention'], '.', label=i)

plt.xlabel('Time Since Acquisition')
plt.ylabel('Retention')
plt.legend()
plt.show()
tempTuple = ('Welcome', 'to', 'interview', 'bit.', 'Have', 'a', 'great', 'day', [1, 2, 3])
# tempTuple[0] = 'Hello' # throws type error, tuple object does not support type assignment.
tempTuple[8].append(4) # appending a new integer i.e. 4 in a list at 8th index of the tuple ‘tempTuple’
# Printing the list at 8th index in the tuple
print(tempTuple[8]) # OUTPUT: [1, 2, 3, 4]
tempTuple[8].pop(3) # popping element at 3rd index from the list i.e. 8th index of the tuple 'tempTuple'
# Printing the list at 8th index in the tuple
print(tempTuple[8]) # OUTPUT: [1, 2, 3]
tempTuple = (1, 2, 3) # Assigning tuple all over again
# Printing the tuple
print(tempTuple) # OUTPUT: (1, 2, 3)
converters = dict((i,str) if i < 5 else (i,float) for i in range(31))
df = pd.read_excel(filename, converters=converters)
{k: d[k] for k in sorted(d, key=lambda x: to_datetime(x))}
from kivy.app import App
from kivy.core.window import Window


class WindowFileDropExampleApp(App):
    def build(self):
        Window.bind(on_dropfile=self._on_file_drop)
        return

    def _on_file_drop(self, window, file_path):
        print(file_path)
        return

if __name__ == '__main__':
    WindowFileDropExampleApp().run()
def read_excel_pgbar(excel_path, sheet_name, chunksize, usecols, dtype=object):

    try:
        # print('Getting row count of excel file')
        wb = load_workbook(excel_path, read_only=True)
        if type(sheet_name) == int:
            sheet = wb.worksheets[int(sheet_name)]
        else:
            sheet = wb[sheet_name]
        rows = sheet.max_row
        chunks = rows//chunksize + 1
        print()
        # print('Reading excel file')
        chunk_list = []

        for i in tqdm(range(chunks), desc='# Chunks read: '):
            tmp = pd.read_excel(excel_path, sheet_name=sheet_name, nrows=chunksize, skiprows=[k for k in range(i*chunksize)], usecols=usecols, dtype=dtype)
            chunk_list.append(tmp)

        myexcel = pd.concat((f for f in chunk_list), axis=0)
        print('Finish reading excel file')

        return myexcel
    except InvalidFileException:
        raise FileNotFoundError
def read_csv_pgbar(csv_path, chunksize, usecols, dtype=object):


    # print('Getting row count of csv file')

    rows = sum(1 for _ in open(csv_path, 'r')) - 1 # minus the header
    # chunks = rows//chunksize + 1
    # print('Reading csv file')
    chunk_list = []

    with tqdm(total=rows, desc='Rows read: ') as bar:
        for chunk in pd.read_csv(csv_path, chunksize=chunksize, usecols=usecols, dtype=dtype):
            chunk_list.append(chunk)
            bar.update(len(chunk))

    df = pd.concat((f for f in chunk_list), axis=0)
    print('Finish reading csv file')

    return df
df['date_of_order'] = pd.to_datetime(df['date_of_order']) # if you haven't converted it already

df.groupby(df['date_of_order'].dt.to_period('Q'))['column to aggregate'].agg(...)
# Raggruppo per X e Y e calcolo il max di Z
df.groupby(['X','Y'])['Z'].max()
import warnings
warnings.filterwarnings('ignore')
# plt.style.use('ggplot')
cm = sns.light_palette("green", as_cmap=True)
pd.option_context('display.max_colwidth', 100)
plt.rcParams['figure.figsize'] = (15,18)
import os

# I assume you have a way of loading your 
# images from the filesystem, and they come 
# out of `images` (an iterator)

NUM_AUG_REPEAT = 10
AUG_SAVE_DIR = 'data/augmented'

# create directory of not present already
if not os.path.isdir(AUG_SAVE_DIR):
    os.makedirs(AUG_SAVE_DIR)

# This will create augmentation ids for the same image
# example: '00', '01', '02', ..., '08', '09' for
#          - NUM_AUG_REPEAT = 10
aug_id = lambda x: str(x).zfill(len(str(NUM_AUG_REPEAT)))

for image in images:
    for i in range(NUM_AUG_REPEAT):
        data = {'image': image}
        augmented = augmentation(**data)
        # I assume you have a function: save_image(image_path, image)
        # You need to write this function with 
        # whatever logic necessary. (Hint: use imageio or PIL.Image)
        image_filename = f'image_name_{aug_id(i)}.png'
        save_image(os.path.join(AUG_SAVE_DIR, image_filename), augmented['image'])
now = datetime.datetime.now()
request_dict["time_now"] =  now.strftime("%Y-%m-%d %H:%M:%S")
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
print(df):
    A   B   C   D
0   31  11  65  15
1   83  21   5  87
2   16   6  81  41
3   91  78  95  70
4   26  51  26  61
..  ..  ..  ..  ..
95  31  18  91  24
96  73  97  42  45
97  76  22   2  36
98  12  43  98  27
99  33  96  67  68


probbins = [0,10,20,30,40,50,60,70,80,90,100]
df['Groups'] = pd.cut(df['D'],bins=probbins)
plt.figure(figsize=(15,6))
chart = sns.barplot(x=df['Groups'], y=df['C'],estimator=sum,ci=None)
chart.set_title('Profit/Loss')
chart.set_xticklabels(chart.get_xticklabels(), rotation=30)
# annotation here
for p in chart.patches:
             chart.annotate("%.0f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
                 ha='center', va='center', fontsize=10, color='black', xytext=(0, 5),
                 textcoords='offset points')
plt.show()
def log_scale(x):
    C = 1 / np.log(10)
    return np.sign(x) * np.log10(1 + np.abs(x / C))
df.loc[df['DATA_NASCITA'].isin(['00000000','11921007','11970813']),'DATA_NASCITA']=np.nan
df['DATA_NASCITA'] = pd.to_datetime(df['DATA_NASCITA'], format='%Y%m%d')
df['nb_months'] = ((df.date2 - df.date1)/np.timedelta64(1, 'M'))

df['nb_months'] = df['nb_months'].astype(int)
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("--disable-blink-features=AutomationControlled")
driver = webdriver.Chrome(options=options)
product_count.sort_values('occupancy_rate',ascending=False).style.format({'occupancy_rate': '{:.2f}%'.format})
df[['A', 'B']] = df['AB'].str.split(' ', 1, expand=True)
user_dict = {12: {'Category 1': {'att_1': 1, 'att_2': 'whatever'},
                  'Category 2': {'att_1': 23, 'att_2': 'another'}},
             15: {'Category 1': {'att_1': 10, 'att_2': 'foo'},
                  'Category 2': {'att_1': 30, 'att_2': 'bar'}}}

pd.DataFrame.from_dict({(i,j): user_dict[i][j] 
                           for i in user_dict.keys() 
                           for j in user_dict[i].keys()},
                       orient='index')


               att_1     att_2
12 Category 1      1  whatever
   Category 2     23   another
15 Category 1     10       foo
   Category 2     30       bar
    if year < 2020 and not (year == 2019 and quarter == 4):
        query_churn = "SELECT sum(eur) FROM salesforce_summaries WHERE (\"Type of Business\" = 'Churn' AND \"Import from Weekly KPI Excel\" = 'True') AND time >= '{quarter_start}' AND time <= '{quarter_end}'".format(
            quarter_start=quarter_start, quarter_end=quarter_end)
In [11]: pd.DataFrame(d.items())  # or list(d.items()) in python 3
Out[11]:
             0    1
0   2012-07-02  392
1   2012-07-06  392
2   2012-06-29  391
3   2012-06-28  391
...

In [12]: pd.DataFrame(d.items(), columns=['Date', 'DateValue'])
Out[12]:
          Date  DateValue
0   2012-07-02        392
1   2012-07-06        392
2   2012-06-29        391
df.groupby(['col5','col2']).size().reset_index(name='Size').groupby('col2')[[0]].max()
df1.merge(df2,left_on='name1', right_on='name2').merge(df3,left_on='name1', right_on='name3')
.drop(columns=['name2', 'name3']).rename(columns={'name1':'name'})
import pandas as pd
df = pd.DataFrame({'A':[1,1,3,2,6,2,8]})
a = df['A'].unique()
print(sorted(a))
cast_dict = {'sesso' : 'category', 'altezza' : 'float64'}

df = df.astype(cast_dict)
df.select_dtypes(include=['object', 'category']).columns
class Node:
   def __init__(self, data):
      self.left = None
      self.right = None
      self.data = data

   def PrintTree ( self ) :
       if self.left :
           self.left.PrintTree ()
       print ( self.data, end= ' ' ) ,
       if self.right :
           self.right.PrintTree ()

class Solution:
    '''
    Function to invert the tree
    '''
    def invertTree(self, root):
       if root == None:
           return
       root.left, root.right = self.invertTree(root.right),self.invertTree(root.left)
       return root

if __name__ == '__main__':
    '''
                10                                              10
              /    \                                          /    \           
            20      30              ========>>              30      20           
           /         \                                      /        \
          40          50                                  50          40 
    '''
    Tree = Node(10)
    Tree.left = Node(20)
    Tree.right = Node(30)
    Tree.left.left = Node(40)
    Tree.right.right = Node(50)
    print('Initial Tree :',end = ' ' )
    Tree.PrintTree()
    Solution().invertTree(root=Tree)
    print('\nInverted Tree :', end=' ')
    Tree.PrintTree()
import numpy as np
import pandas as pd   
from IPython.display import display_html 

df1 = pd.DataFrame(np.arange(12).reshape((3,4)),columns=['A','B','C','D',])
df2 = pd.DataFrame(np.arange(16).reshape((4,4)),columns=['A','B','C','D',])

df1_styler = df1.style.set_table_attributes("style='display:inline'").set_caption('Caption table 1')
df2_styler = df2.style.set_table_attributes("style='display:inline'").set_caption('Caption table 2')

display_html(df1_styler._repr_html_()+df2_styler._repr_html_(), raw=True)
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from fake_useragent import UserAgent

options = Options()
ua = UserAgent()
userAgent = ua.random
print(userAgent)
options.add_argument(f'user-agent={userAgent}')
driver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\WebDrivers\ChromeDriver\chromedriver_win32\chromedriver.exe')
driver.get("https://www.google.co.in")
driver.quit()
python3 -m venv tutorial-env


tutorial-env\Scripts\activate.bat
import time
from time import sleep

time.sleep (60)
dus = input ('Did you solve the captcha? 1 for yes 2 for no :')

if dus == 1 :
      continue

else :
      time.sleep (60)
      tdus = input ('Did you solve the captcha? 1 for yes 2 for 
      no :')

      if tdus == 1 :
        continue
       
      else :
         exit
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from fake_useragent import UserAgent

options = Options()
ua = UserAgent()
userAgent = ua.random
print(userAgent)
options.add_argument(f'user-agent={userAgent}')
driver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\WebDrivers\ChromeDriver\chromedriver_win32\chromedriver.exe')
driver.get("https://www.google.co.in")
driver.quit()
const puppeteer = require('puppeteer');

const chromeOptions = {

 headless:false,

 defaultViewport: null};

(async function main() {

 const browser = await puppeteer.launch(chromeOptions);

 const page = await browser.newPage();

 await page.goto('https://old.reddit.com/login');

})()

await page.type('#user_reg', 'some_username');

await page.type('#passwd_reg', 'SuperStrongP@ssw0rd');

await page.type('#passwd2_reg', 'SuperStrongP@ssw0rd');

await page.click('#register-form button[type=submit]');

const chromeOptions = {

 headless:false,

 defaultViewport: null,

 slowMo:15,

};
from pathlib import Path

class DisplayablePath(object):
    display_filename_prefix_middle = '├──'
    display_filename_prefix_last = '└──'
    display_parent_prefix_middle = '    '
    display_parent_prefix_last = '│   '

    def __init__(self, path, parent_path, is_last):
        self.path = Path(str(path))
        self.parent = parent_path
        self.is_last = is_last
        if self.parent:
            self.depth = self.parent.depth + 1
        else:
            self.depth = 0

    @property
    def displayname(self):
        if self.path.is_dir():
            return self.path.name + '/'
        return self.path.name

    @classmethod
    def make_tree(cls, root, parent=None, is_last=False, criteria=None):
        root = Path(str(root))
        criteria = criteria or cls._default_criteria

        displayable_root = cls(root, parent, is_last)
        yield displayable_root

        children = sorted(list(path
                               for path in root.iterdir()
                               if criteria(path)),
                          key=lambda s: str(s).lower())
        count = 1
        for path in children:
            is_last = count == len(children)
            if path.is_dir():
                yield from cls.make_tree(path,
                                         parent=displayable_root,
                                         is_last=is_last,
                                         criteria=criteria)
            else:
                yield cls(path, displayable_root, is_last)
            count += 1

    @classmethod
    def _default_criteria(cls, path):
        return True

    @property
    def displayname(self):
        if self.path.is_dir():
            return self.path.name + '/'
        return self.path.name

    def displayable(self):
        if self.parent is None:
            return self.displayname

        _filename_prefix = (self.display_filename_prefix_last
                            if self.is_last
                            else self.display_filename_prefix_middle)

        parts = ['{!s} {!s}'.format(_filename_prefix,
                                    self.displayname)]

        parent = self.parent
        while parent and parent.parent is not None:
            parts.append(self.display_parent_prefix_middle
                         if parent.is_last
                         else self.display_parent_prefix_last)
            parent = parent.parent

        return ''.join(reversed(parts))

if __name__ == "__main__":
  paths = DisplayablePath.make_tree(Path('doc'))
  for path in paths:
    print(path.displayable())
import tkinter as tk
from tkinter import ttk

LARGE_FONT= ("Verdana", 12)
NORM_FONT= ("Verdana", 10)
SMALL_FONT= ("Verdana", 8)

def popup_msg(msg):
    '''Displays a message in a pop-up'''
    popup = tk.Tk()
    popup.wm_title("! message")

    window_width = 640
    window_height = 480

    # get the screen dimension
    screen_width = popup.winfo_screenwidth()
    screen_height = popup.winfo_screenheight()

    # find the center point
    center_x = int(screen_width/2 - window_width / 2)
    center_y = int(screen_height/2 - window_height / 2)

    # set the position of the window to the center of the screen
    popup.geometry(f'{window_width}x{window_height}+{center_x}+{center_y}')

    label = ttk.Label(popup, text=msg, font=NORM_FONT)
    label.pack(side="top", fill="x", pady=10)
    B1 = ttk.Button(popup, text="Dismiss", command = popup.destroy)
    B1.pack()
    popup.mainloop()

def popup_prompt(msg, func, btn_title="click", dismiss_after_click=True):
    '''Displays a pop-up with a button and runs a function on click of the button'''
    popup = tk.Tk()
    popup.wm_title("? prompt")

    window_width = 640
    window_height = 480

    # get the screen dimension
    screen_width = popup.winfo_screenwidth()
    screen_height = popup.winfo_screenheight()

    # find the center point
    center_x = int(screen_width/2 - window_width / 2)
    center_y = int(screen_height/2 - window_height / 2)

    # set the position of the window to the center of the screen
    popup.geometry(f'{window_width}x{window_height}+{center_x}+{center_y}')

    label = ttk.Label(popup, text=msg, font=NORM_FONT)
    label.pack(side="top", fill="x", pady=10)
    command = lambda : [popup.destroy(), func()]
    B1 = ttk.Button(popup, text=btn_title, command=command)
    B1.pack()
    popup.mainloop()
df = df['column'].value_counts(sort=True)

df = df['column'].value_counts(normalize=True)
from zipfile import ZipFile
zf = ZipFile("files.zip")
zf.namelist()
import pandas as pd
	
df_1 = pd.DataFrame(
	[['Somu', 68, 84, 78, 96],
	['Kiku', 74, 56, 88, 85],
	['Ajit', 77, 73, 82, 87]],
	columns=['name', 'physics', 'chemistry','algebra','calculus'])

df_2 = pd.DataFrame(
	[['Amol', 72, 67, 91, 83],
	['Lini', 78, 69, 87, 92]],
	columns=['name', 'physics', 'chemistry','science','calculus'])	

frames = [df_1, df_2]

#append dataframes
df = df_1.append(df_2, ignore_index=True, sort=False)

#print dataframe
print("df_1\n------\n",df_1)
print("\ndf_2\n------\n",df_2)
print("\ndf\n--------\n",df)
layer = iface.activeLayer()

for feat in layer.getFeatures():
    if feat["index"] > "0":
        print(feat["index"])
variable = dataframe['column_name'].value_counts()
{variable} = {dataframe}['{column}'].unique()
{dataframe}['{column}'] = {dataframe}['{column}'].astype({datatype})
np.random.seed({your_seed})
np.random.randint(range)
options = ["Go to bed", "Game all night", "Have a good cry", "Die"]

print("Please choose what you would like to do tonight: \n1. Go to bed "
      "\n2. Game all night \n3. Have a good cry\n4. Die")
int(input())

for answer in index(options):
    
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.io as pio
import plotly.express as px
pio.templates.default = "seaborn"
import pandas as pd
import re

curvas_a_ocultar = ['Depreciación Implicita','Inflación Implicita','Depreciación Real Implicta']

Implicita_Fig = make_subplots(
    rows=1, cols=1,
    shared_xaxes=True,
    vertical_spacing=0.03,
    specs=[[{"type": "scatter"}]]
)

Implicita_Fig.add_trace(go.Scatter(x=Curva_CUI[(Curva_CUI.fecha == Fecha_Reporte) & (Curva_CUI.Plazo <= 20) ].Plazo, 
                                              y=Curva_CUI[(Curva_CUI.fecha == Fecha_Reporte) & (Curva_CUI.Plazo <= 20)].valor/100,
                    line = dict(color='red', width=4, dash='solid'),
                    name='CUI'),row=1, col=1)
Implicita_Fig.add_trace(go.Scatter(x=Curva_CUD[(Curva_CUD.fecha == Fecha_Reporte) & (Curva_CUD.Plazo <= 20) ].Plazo, 
                                              y=Curva_CUD[(Curva_CUD.fecha == Fecha_Reporte) & (Curva_CUD.Plazo <= 20)].valor/100,
                   line = dict(color='green', width=4, dash='solid'),
                    name='CUD'),row=1, col=1)
Implicita_Fig.add_trace(go.Scatter(x=Curva_ITLUP[(Curva_ITLUP.fecha == Fecha_Reporte) & (Curva_ITLUP.Plazo <= 20) ].Plazo, 
                                              y=Curva_ITLUP[(Curva_ITLUP.fecha == Fecha_Reporte) & (Curva_ITLUP.Plazo <= 20)].valor/100,
                     line = dict(color='blue', width=4, dash='solid'),
                    name='ITLUP'),row=1, col=1)
Implicita_Fig.add_trace(go.Scatter(x=Curvas[(Curvas.fecha_CUI == Fecha_Reporte) & (Curvas.Plazo_CUI <= 20) ].Plazo_CUI, 
                                              y=Curvas[(Curvas.fecha_CUI == Fecha_Reporte) & (Curvas.Plazo_CUI <= 20)].Depreciacion_Implicta/100,
                    line = dict(color='purple', width=4, dash='longdashdot'),
                                   name='Depreciación Implicita'),row=1, col=1)
Implicita_Fig.add_trace(go.Scatter(x=Curvas[(Curvas.fecha_CUI == Fecha_Reporte) & (Curvas.Plazo_CUI <= 20) ].Plazo_CUI, 
                                              y=Curvas[(Curvas.fecha_CUI == Fecha_Reporte) & (Curvas.Plazo_CUI <= 20)].Inflacion_Implicta/100,
                     line = dict(color='orange', width=4, dash='dashdot'),
                                   name='Inflación Implicita'),row=1, col=1)
Implicita_Fig.add_trace(go.Scatter(x=Curvas[(Curvas.fecha_CUI == Fecha_Reporte) & (Curvas.Plazo_CUI <= 20) ].Plazo_CUI, 
                                              y=Curvas[(Curvas.fecha_CUI == Fecha_Reporte) & (Curvas.Plazo_CUI <= 20)].Depreciación_real_Implicta/100,
                     line = dict(color='grey', width=4, dash='longdash'),
                                   name='Depreciación Real Implicta',text=Curvas.Depreciación_real_Implicta),row=1, col=1)
Implicita_Fig.update_layout(
    height=600,width=1500,
    showlegend=True,
    title_text="Curvas",
)
Implicita_Fig.layout.yaxis.tickformat = ',.1%'
Implicita_Fig.update_traces(texttemplate='%{text:,.2%}',textposition='top center')
Implicita_Fig.update_xaxes(showline=True, linewidth=0.1, linecolor='grey', mirror=True)
Implicita_Fig.update_yaxes(showline=True, linewidth=0.1, linecolor='grey', mirror=True)
Implicita_Fig.for_each_trace(lambda trace: trace.update(visible="legendonly") 
                   if trace.name in curvas_a_ocultar else ())


Implicita_Fig.show()
# function to replace rows in the provided column of the provided dataframe
# that match the provided string above the provided ratio with the provided string
def replace_matches_in_column(df, column, string_to_match, min_ratio = 47):
    # get a list of unique strings
    strings = df[column].unique()
    
    # get the top 10 closest matches to our input string
    matches = fuzzywuzzy.process.extract(string_to_match, strings, 
                                         limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio)

    # only get matches with a ratio > 90
    close_matches = [matches[0] for matches in matches if matches[1] >= min_ratio]

    # get the rows of all the close matches in our dataframe
    rows_with_matches = df[column].isin(close_matches)

    # replace all rows with close matches with the input matches 
    df.loc[rows_with_matches, column] = string_to_match
    
    # let us know the function's done
    print("All done!")
'''

Ethan Anderson
Balance of Nature
VtT v1.0.1
Oct. 20 2021

'''

# importing libraries 
import speech_recognition as sr 
import os
from pydub import AudioSegment
from pydub.silence import split_on_silence


#create a speech recognition object
r = sr.Recognizer()
#change this to the folder you have your wav files
VM_directory = r'vm temp wav'

def send_to_rr():
    print("Sending to R&R...")
    #put actual transfer process here
def send_to_customer_care():
    print("Sending to CC...")
    #put actual transfer process here
def send_to_new_sales():
    print("Sending to NS...")
    #put actual transfer process here
def send_to_customer_sales():
    print("Sending to CS...")
    #put actual transfer process here
def send_to_returns():
    print("Sending to Returns...")
    #put actual transfer process here
def send_to_declines():
    print("Sending to Declines...")
    #put actual transfer process here
def send_to_scheduling():
    print("Sending to Scheduling...")
    #put actual transfer process here
def no_matches():
    print("No Matches")
    #put actual transfer process here

all_departments_words = [
    {
        'words' : ["tracking", "shipment", "pending", "shipped", "shipping", "where my order is", "where is my order", "delay", "delayed", "not received", "not recieve"],
        'target' : send_to_rr,
        'message' : "Sent to Reception and Routing"
    },

    {
        'words' : ["stop", "discontinue", "backlog", "surplus", "postpone", "no shipments"],
        'target' : send_to_customer_care,
        'message' : "Sent to Customer Care"
    },

    {
        'words' : ["fox","new customer", "cost", "discount code", "commercial", "do not have a computer", "don't have a computer", "35%", "35 percent", "Fox"],
        'target' : send_to_new_sales,
        'message' : "Sent to New Sales"
    },

    {
        'words' : ["reinstate", "old account", "ordered before", "place an order", "place another order", "renew", "change in my order", "reactivate", "reorder", "make an order", "apple", "preferred customer"],
        'target' : send_to_customer_sales,
        'message' : "Sent to Customer Sales"
    },

    {
        'words' : ["refund", "money back", "charged", "RMA", "R M A", "refunded", "refunding", "refused", "refuse"],
        'target' : send_to_returns,
        'message' : "Sent to Returns"
    },

    {
        'words' : ["fraud", "update", "declined", "decline", "account is disabled", "account has been disabled", "declines", "updated"],
        'target' : send_to_declines,
        'message' : "Sent to Declines"
    },

    {
        'words' : ["appointment", "scheduled", "schedule", "reschedule", "coach"],
        'target' : send_to_scheduling,
        'message' : "Sent to Scheduling"
    },

    {
        'words' : ["return", "returns", "returning"],
        'target' : send_to_returns,
        'message' : "Sent to Returns"
    },

    {
        'words' : ["cancel", "cancelled", "cancelling", "canceling"],
        'target' : send_to_customer_care,
        'message' : "Sent to Customer Care"
    },

    {
        'words' : ["ship"],
        'target' : send_to_rr,
        'message' : "Sent to Reception and Routing"
    },

    {
        'words' : [' ', '-'],
        'target' : no_matches,
        'message' : "No Matches"
    }

]



####################################### FUNCTIONS ##########################################

# a function that splits the audio file into chunks
# and applies speech recognition - edited to make it one giant chunk instead of multiple small ones (would want to be split into small ones for conversations)
def get_large_audio_transcription(path):
    """
    Splitting the large audio file into chunks
    and apply speech recognition on each of these chunks
    """
    # open the audio file using pydub
    sound = AudioSegment.from_wav(path)  
    # split audio sound where silence is 700 miliseconds or more and get chunks
    chunks = split_on_silence(sound,
        # experiment with this value for your target audio file
        min_silence_len = 1500, #change this if you want to modify for conversations, detects the silence length to find breaks
        # adjust this per requirement
        silence_thresh = sound.dBFS-14,
        # keep the silence for 1 second, adjustable as well
        keep_silence=500,
    )
    folder_name = "audio-chunks"
    # create a directory to store the audio chunks
    if not os.path.isdir(folder_name):
        os.mkdir(folder_name)
    whole_text = ""
    # process each chunk 
    for i, audio_chunk in enumerate(chunks, start=1):
        # export audio chunk and save it in
        # the `folder_name` directory.
        chunk_filename = os.path.join(folder_name, f"chunk{i}.wav")
        audio_chunk.export(chunk_filename, format="wav")
        # recognize the chunk
        with sr.AudioFile(chunk_filename) as source:
            audio_listened = r.record(source)
            # try converting it to text
            try:
                text = r.recognize_google(audio_listened)
            except sr.UnknownValueError as e:
                print("Error:", str(e))
            else:
                text = f"{text.capitalize()}. "
                print(chunk_filename, ":", text)
                whole_text += text
    # return the text for all chunks detected
    return whole_text


def main():
    #checklist = []
    for filename in os.scandir(VM_directory):
        #only running through .wav files
        if filename.path.endswith(".wav"):
            vm_text_string = get_large_audio_transcription(filename)
            #flags for if there are no keywords
            flag = 0
            #loops through dictionaries
            for department in all_departments_words:
                if flag == 1:
                    break
                #loops through keywords
                for word in department['words']:
                    #loops words through VM and splits string into words
                    if word in vm_text_string:
                        #calls the function that will actually send the vm
                        department['target']()
                        #prints the "sent to ..." from dict
                        print(department['message'])
                        #checklist.append(department['message'])
                        flag += 1
                        #ends loop to prevent repeats
                        break
            #if no keywords are detected flag will stay 0
            if flag == 0:
                #runs function for no matches
                no_matches()
                #checklist.append('No Matches')
    '''print(checklist)
    answers = ['Sent to Customer Care', 'Sent to Customer Care', 'Sent to Customer Care', 'Sent to Customer Care', 'Sent to Customer Sales', 'Sent to Customer Sales', 'No Matches', 'No Matches', 'No Matches', 'Sent to New Sales', 'Sent to New Sales', 'Sent to Returns', 'Sent to Returns', 'Sent to Reception and Routing', 'Sent to Reception and Routing', 'Sent to Reception and Routing', 'Sent to Reception and Routing', 'Sent to Scheduling', 'Sent to Scheduling']
    i = -1
    while i < 19:
        i += 1
        if checklist[i] == answers[i]:
            print('correct')   
        else:
            print('incorrect')'''

main()
#Code by Leon Wolber https://www.kaggle.com/leonwolber/reddit-nlp-topic-modeling-prediction

# The wordcloud 
plt.figure(figsize=(16,13))
wc = WordCloud(background_color="black", max_words=1000, max_font_size= 200,  width=1600, height=800)
wc.generate(" ".join(ls))
plt.title("Most discussed terms", fontsize=20)
plt.imshow(wc.recolor( colormap= 'viridis' , random_state=17), alpha=0.98, interpolation="bilinear", )
plt.axis('off')
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
    for filename in filenames:
        print(os.path.join(dirname, filename)) 
df['SettlementDate'] = pd.TimedeltaIndex(df['SettlementDate'], unit='d') + dt.datetime(1900,1,1)
>>> lis = []
>>> lis = lis + [1]
>>> lis
[1]
>>> lis = lis + [2]
>>> lis
[1, 2]
>>> lis += [3]  # += acts like list.extend, i.e changes the list in-place
>>> lis
[1, 2, 3]
def weighted_median(df, median_col, weight_col):
    df_sorted = df.sort_values(median_col)
    cumsum = df_sorted[weight_col].cumsum()
    cutoff = df_sorted[weight_col].sum()/2
    return df_sorted[cumsum >= cutoff][median_col].iloc[0]
pd.pivot(df, index='time', columns= 'name)'.plot(subplots=True)
y_pos = np.arange(len(df_cuba.GP_per_sender))

plt.bar(y_pos,df_cuba.GP_per_sender)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize= (30,8))
ax1.plot(df_cuba.wk_year, df_cuba.GP_per_sender,'-b*')
ax1.plot(df_cuba.wk_year, df_cuba.avg_value, '--g')
ax1.plot(df_cuba.wk_year, df_cuba.std_value2p,'-r')
ax1.plot(df_cuba.wk_year, df_cuba.std_value2m,'-r')
ax1.set_title('Cuba')
ax2.plot(df_row.wk_year, df_row.GP_per_sender,'-b*')
ax2.plot(df_row.wk_year, df_row.avg_value, '--g')
ax2.plot(df_row.wk_year, df_row.std_value2p,'-r')
ax2.plot(df_row.wk_year, df_row.std_value2m,'-r')
ax2.set_title('ROW')
fig, ax1 = plt.subplots( figsize= (20,8))

ax2 = ax1.twinx()
ax1.plot(df_cuba.wk_year, df_cuba.GP_per_sender, '-m')
ax2.plot(df_row.wk_year, df_row.GP_per_sender, '-k')


ax1.set_xlabel('X data')
ax1.set_ylabel('Y1 data', color='m')
ax2.set_ylabel('Y2 data', color='k')

plt.show()
fig = plt.figure(figsize = (20,8)) # Create matplotlib figure

ax = fig.add_subplot(111) # Create matplotlib axes
ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax.

width = 0.4

df_cuba.GP_per_sender.plot(kind='bar', color='m', ax=ax, width=width, position=1)
df_row.GP_per_sender.plot(kind='bar', color='blue', ax=ax2, width=width, position=0)

ax.set_ylabel('cuba', color='m')
ax2.set_ylabel('row', color='b')

plt.show()
es = Elasticsearch()
# get all indices name
indices_names = []
for elem in es.cat.indices(format="json"):
    indices_names.append( elem['index'] )
# get all fileds of index
dict_index_fields = {}
index = 'factiva.snapshot'
mapping = es.indices.get_mapping(index)
dict_index_fields[index] = []
for field in mapping[index]['mappings']['properties']:
    dict_index_fields[index].append(field)
day30.pivot(index='acq_date', columns='cuba_split', values=['senders','orders_per_sender','GP_per_sender'])
1
df['avg_value'] = df.groupby("x").y.transform('mean')
2
df['avg_value'] = df.groupby("x").y.mean()
def selection_sort(array):
    n = len(array)
    
    for i in range(n):
        
        # Index of smallest element
        min_index = i
        
        for j in range(i+1, n):
            if array[j] < array[min_index]:
                min_index = j
                
        array[min_index], array[i] = array[i], array[min_index]
        
my_array = [5, 3, 7, 1, 4, 8, 2, 6]
selection_sort(my_array)
print my_array
def sort(nums):
    for i in range(5):
         minpos = i
         for j in range(i,6):
            if nums[j] < nums[minpos]:
               minpos = j
               temp = nums[i]
               nums[i] = nums[minpos]
               nums[minpos] = temp
nums = [5,3,8,6,7,2]
sort(nums)
print(nums)
# define strings
listOfPlaces = ["Berlin", "Paris", "Lausanne"]
currentCity = "Lausanne"

for place in listOfPlaces:
    print ("comparing %s with %s: %s" % (place, currentCity, place == currentCity))
df.groupby('cluster', group_keys=False).apply(lambda df: df.sample(1))
x1 = float(input("x1: "))
y1 = float(input("y1: "))
x2 = float(input("x2: "))
y2 = float(input("y2: "))
bins = 7

edges = np.linspace(df.value.min(), df.value.max(), bins+1).astype(int)
labels = [f'({edges[i]}, {edges[i+1]}]' for i in range(len(edges)-1)]

df['bin'] = pd.cut(df.value, bins=bins, labels=labels)

#     value         bin
# 1       8     (0, 53]
# 2      16     (0, 53]
# ..    ...         ...
# 45    360  (322, 376]
# 46    368  (322, 376]
bins = 7

_, edges = pd.cut(df.value, bins=bins, retbins=True)
labels = [f'({abs(edges[i]):.0f}, {edges[i+1]:.0f}]' for i in range(len(edges)-1)]

df['bin'] = pd.cut(df.value, bins=bins, labels=labels)

#     value         bin
# 1       8     (0, 53]
# 2      16     (0, 53]
# ..    ...         ...
# 45    360  (322, 376]
# 46    368  (322, 376]
# How to extract the mac address of a computer
import uuid

print("The MAC address in formatted way is : ", end="")

print(':'.join(['{:02x}'.format((uuid.getnode() >> elements) & 0xff)
for elements in range(0, 8*6, 8)][::-1]))

import os 

 

#Install neccessary dependancies before installing the numerous applications 

os.system("sudo apt install -y python3-pip") 

os.system("pip3 install -y minidump minikerberos aiowinreg msldap winacl") 

os.system("sudo apt install -y git") 

os.system("sudo apt install -y snapd") 

 

#Install TigerVNC (Remote Access Tool) 

os.system("sudo apt install -y xfce4 xfce4-goodies") 

os.system("sudo apt install -y tigervnc-standalone-server tigervnc-common") 

 

#Install Nmap (Vulnerability Scanner) 

os.system("sudo apt install -y nmap") 

 

#Install Hashcat (Password Recovery) 

os.system("sudo apt install -y hashcat") 

 

#Install FruityWifi (Wireless Audit) 

os.system("sudi apt install -y fruitywifi") 

 

#Install IDA Pro (Reverse Engineering) 

os.system("sudo apt install -y libgtk2.0-0:i386 gtk2-engines-murrine:i386 gtk2-engines-pixbuf:i386") 

 

#Install DNSRecon [or use NMap] (DNS Recon) 

os.system("sudo apt install -y dnsrecon") 

 

#SUBDOMAIN BRUTEFORCING 

#USE previously-installed Nmap 

 

#Install SQLMap (SQL Injection Scanner) 

os.system("sudo snap install -y sqlmap") 

 

#Install Netcat (Local Host Enumeration) 

os.system("sudo apt install -y netcat") 

 

#Install imPacket (Kerberos Ticket Manipulation) 

os.system("sudo apt install -y python3-impacket") 

 

#Install ExifTool (Metadata Extractor) 

os.system("sudo apt install -y libimage-exiftool-perl") 

 

#Install MMG (Macro Payload Generator) 

os.system("git clone https://github.com/Mr-Un1k0d3r/MaliciousMacroGenerator") 

 

#Install BeRoot (Privilege Escalation Tool #1) 

os.system("git clone https://github.com/AlessandroZ/BeRoot") 

 

#Install (Privilege Escalation Tool #2) 

os.system("git clone https://github.com/rebootuser/LinEnum") 

 

#Install (Privilege Escalation Tool #3) 

os.system("git clone https://github.com/skelsec/pypykatz.git") 

os.system("cd pypykatz") 

os.system("sudo python3 setup.py") 
def bubbleSort(arr):
    n = len(arr)
    for i in range(n-1):
        for j in range(0, n-i-1):
  
            if arr[j] > arr[j + 1] :
                arr[j], arr[j + 1] = arr[j + 1], arr[j]
  
#test with this
arr = [64, 34, 25, 12, 22, 11, 90]
  
bubbleSort(arr)
  
print ("Sorted array is:")
for i in range(len(arr)):
    print ("% d" % arr[i])

OR 

def bubbleSort(arr):

    swapped = True
    while swapped:
        swapped = False
        for i in range(len(arr)-1):
            if arr[i] > arr[i+1]:
                arr[i], arr[i+1] = arr[i+1], arr  [i]
                print("Swapped: {} with {}".format(arr[i], arr[i+1]))
                swapped = True

    return arr

my_list = [8,2,1,3,5,4]

print(bubbleSort(my_list))
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.getcwd().replace('notebooks','keys/gcp_key.json')
movie_col_name = list(movie_df.columns)
print(movie_col_name)
movie_shape = movie_df.shape
num_row = movie_shape[0]
num_column = movie_shape[1]
print('{} rows and {} columns'.format(num_row, num_column))
movie_df = pd.read_csv('./data/movie_metadata.csv')
students = {
  "name": ["Jason", "Peter", "Mary", "Apple", "Banana", "Orange"],
  "age": [18, 20, 17, 20, 20, 23],
  "grade": ["C", "A", "B+", "B-", "B", "A"]
}

df_students = pd.DataFrame(students)
print(df_students)
You can use pipreqs to automatically generate a requirements.txt file based on the import statements that the Python script(s) contain. To use pipreqs, assuming that you are in the directory where example.py is located:
pip install pipreqs
pipreqs .
It will generate the following requirements.txt file:
requests==2.23.0
beautifulsoup4==4.9.1
which you can install with:
pip install -r requirements.txt
{
    "python.defaultInterpreterPath": "${env:PYTHON_INSTALL_LOC}"
}

PYTHON_INSTALL_LOC="C:/Users/Juno Wong/miniconda3/python.exe"
import tensorflow as ts;

print(f"tf version is: {tf.__version__}")
conda create --name tf_python python=3.9
conda activate tf_python
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np

df_gender = pd.read_csv('data/prepared/Survey_on_Gender_Equality_At_Home/Survey_on_Gender_Equality_At_Home_2_cleaned.csv')

df = df_gender.copy()
df.head()

df = df[['Country','Gender','Internet_Penetration','a1_agree','b7_full']]
df.rename(columns={'a1_agree':'Equal_Rights','b7_full':'Access_Money'},inplace=True)

print(df['Internet_Penetration'].unique())

df = df[df['Internet_Penetration']!='.']
df['Internet_Penetration'] = [float(df.loc[i,'Internet_Penetration'][:2]) 
                              if df.loc[i,'Internet_Penetration'][1].isalnum() 
                              else float(df.loc[i,'Internet_Penetration'][:1]) for i in df.index]
                              
print(df['Equal_Rights'].unique())

df[df['Equal_Rights']==19]['Country']

df = df[df['Equal_Rights']!=19] #Remove outlier

figure, axes = plt.subplots(3,1,figsize=(10,15))
sns.regplot(x=df['Internet_Penetration'],y=df['Equal_Rights'],ax=axes[0]).set(
    title='Internet penetration vs Equal rights perception',xlabel='',ylabel='')
df_female = df[df['Gender']=='Female']
sns.regplot(x=df_female['Internet_Penetration'],y=df_female['Access_Money'],ax=axes[1]).set(
    title='Internet penetration vs self access of money',xlabel='',ylabel='')
sns.regplot(x=df_female['Equal_Rights'],y=df_female['Access_Money'],ax=axes[2]).set(
    title='Equal rights perception vs Self access of money',xlabel='',ylabel='')
plt.show()

fig = plt.figure(figsize=(7,6)) 
sns.heatmap(df_female.corr(),annot=True).set_title('Correlation')
plt.show()

df_gender = df

df_business = pd.read_csv('data/prepared/Survey_on_future_business/Survey_on_future_business_good_clean.csv')
df = df_business.copy()
df.head()
df['statistic'] = round(df['statistic']/df['total_asked']*100,2)

df_codes = pd.read_csv('data/prepared/Country_Code.csv')
df_codes.head()

df_codes = df_codes[[ 'Country','Alpha-2 code']]

df = df_codes.merge(df,left_on='Alpha-2 code',right_on='country')

df.drop(columns=['country','who_was_asked','total_asked'],inplace=True)
df = df[(df['variable'] == "gen_opn_1_text") | (df['variable'] == "own_fem_text")]

df.replace(['gen_opn_1_text','own_fem_text'],['Self_perp_equal_rights','Owner_female'],inplace=True)

print(df['value'].unique())

df.dropna(inplace=True)
df.reset_index(drop=True,inplace=True)
df

for var in df['variable'].unique():
    for val in df[df['variable']==var]['value'].unique():
        df[var+'_'+val] = [df.loc[i,'statistic'] if (df.loc[i,'variable']==var) 
                           & (df.loc[i,'value']==val) else np.nan for i in df.index]
df = df.groupby(['Country']).sum().drop(columns=['statistic'])
df

df['Equal_rights_agree'] = df['Self_perp_equal_rights_Agree'] + df['Self_perp_equal_rights_Strongly agree']
df['Equal_rights_disagree'] = df['Self_perp_equal_rights_Disagree'] + df[
                                'Self_perp_equal_rights_Strongly disagree']
df['Female_owner_Half_or_more'] = df['Owner_female_All owners are female'] + df[
                                'Owner_female_Exactly half'] + df['Owner_female_More than half']
df['Female_owner_less_than_half'] = df['Owner_female_Less than half'] + df['Owner_female_None']
df = df[['Equal_rights_agree','Equal_rights_disagree','Female_owner_Half_or_more',
         'Female_owner_less_than_half']]
df

figure, axes = plt.subplots(1,1,figsize=(9,5))
sns.regplot(x=df['Equal_rights_agree'],y=df['Female_owner_Half_or_more']).set(
    title='Equal rights perception vs proportion of female owner of business',xlabel='',ylabel='')
plt.show()

df_business = df.reset_index()
df = df_gender.merge(df_business,on='Country')
df_female = df[df['Gender']=='Female'] 
df_female

figure, axes = plt.subplots(2,1,figsize=(15,12))
sns.regplot(x=df_female['Access_Money'],y=df_female['Female_owner_Half_or_more'],ax=axes[0]).set(
    title='Self access of money vs Female_owner_Half_or_more',xlabel='',ylabel='')
sns.regplot(x=df_female['Internet_Penetration'],y=df_female['Female_owner_Half_or_more'],ax=axes[1]).set(
    title='Internet penetration vs Female_owner_Half_or_more',xlabel='',ylabel='')

plt.show()
#Import necesary modules

import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np

#Reading Data

df_gender = pd.read_csv('data/prepared/Survey_on_Gender_Equality_At_Home/Survey_on_Gender_Equality_At_Home_2_cleaned.csv')

#Explore and Clean

df = df_gender.copy()
df.head()

df = df[['Country','Gender','Internet_Penetration','a1_agree','b7_full']]
df.rename(columns={'a1_agree':'Equal_Rights','b7_full':'Access_Money'},inplace=True)

print(df['Internet_Penetration'].unique())

df = df[df['Internet_Penetration']!='.']
df['Internet_Penetration'] = [float(df.loc[i,'Internet_Penetration'][:2]) 
                              if df.loc[i,'Internet_Penetration'][1].isalnum() 
                              else float(df.loc[i,'Internet_Penetration'][:1]) for i in df.index]
                              
print(df['Equal_Rights'].unique())

df[df['Equal_Rights']==19]['Country']

#Remove outlier

df = df[df['Equal_Rights']!=19] 


#Analysis

figure, axes = plt.subplots(3,1,figsize=(10,15))
sns.regplot(x=df['Internet_Penetration'],y=df['Equal_Rights'],ax=axes[0]).set(
    title='Internet penetration vs Equal rights perception',xlabel='',ylabel='')
df_female = df[df['Gender']=='Female']
sns.regplot(x=df_female['Internet_Penetration'],y=df_female['Access_Money'],ax=axes[1]).set(
    title='Internet penetration vs self access of money',xlabel='',ylabel='')
sns.regplot(x=df_female['Equal_Rights'],y=df_female['Access_Money'],ax=axes[2]).set(
    title='Equal rights perception vs Self access of money',xlabel='',ylabel='')
plt.show()

fig = plt.figure(figsize=(7,6)) 
sns.heatmap(df_female.corr(),annot=True).set_title('Correlation')
plt.show()

df_gender = df

#Survey on Future of business

df_business = pd.read_csv('data/prepared/Survey_on_future_business/Survey_on_future_business_good_clean.csv')
df = df_business.copy()
df.head()
df['statistic'] = round(df['statistic']/df['total_asked']*100,2)

df_codes = pd.read_csv('data/prepared/Country_Code.csv')
df_codes.head()

df_codes = df_codes[[ 'Country','Alpha-2 code']]

df = df_codes.merge(df,left_on='Alpha-2 code',right_on='country')

df.drop(columns=['country','who_was_asked','total_asked'],inplace=True)
df = df[(df['variable'] == "gen_opn_1_text") | (df['variable'] == "own_fem_text")]

df.replace(['gen_opn_1_text','own_fem_text'],['Self_perp_equal_rights','Owner_female'],inplace=True)

print(df['value'].unique())

df.dropna(inplace=True)
df.reset_index(drop=True,inplace=True)

for var in df['variable'].unique():
    for val in df[df['variable']==var]['value'].unique():
        df[var+'_'+val] = [df.loc[i,'statistic'] if (df.loc[i,'variable']==var) 
                           & (df.loc[i,'value']==val) else np.nan for i in df.index]
df = df.groupby(['Country']).sum().drop(columns=['statistic'])
df

df['Equal_rights_agree'] = df['Self_perp_equal_rights_Agree'] + df['Self_perp_equal_rights_Strongly agree']
df['Equal_rights_disagree'] = df['Self_perp_equal_rights_Disagree'] + df[
                                'Self_perp_equal_rights_Strongly disagree']
df['Female_owner_Half_or_more'] = df['Owner_female_All owners are female'] + df[
                                'Owner_female_Exactly half'] + df['Owner_female_More than half']
df['Female_owner_less_than_half'] = df['Owner_female_Less than half'] + df['Owner_female_None']
df = df[['Equal_rights_agree','Equal_rights_disagree','Female_owner_Half_or_more',
         'Female_owner_less_than_half']]

figure, axes = plt.subplots(1,1,figsize=(9,5))
sns.regplot(x=df['Equal_rights_agree'],y=df['Female_owner_Half_or_more']).set(
    title='Equal rights perception vs proportion of female owner of business',xlabel='',ylabel='')
plt.show()

df_business = df.reset_index()
df = df_gender.merge(df_business,on='Country')
df_female = df[df['Gender']=='Female'] 
df_female

figure, axes = plt.subplots(2,1,figsize=(15,12))
sns.regplot(x=df_female['Access_Money'],y=df_female['Female_owner_Half_or_more'],ax=axes[0]).set(
    title='Self access of money vs Female_owner_Half_or_more',xlabel='',ylabel='')
sns.regplot(x=df_female['Internet_Penetration'],y=df_female['Female_owner_Half_or_more'],ax=axes[1]).set(
    title='Internet penetration vs Female_owner_Half_or_more',xlabel='',ylabel='')

plt.show()

os.path.join(BASE_DIR, 'templates')
full_path_for_helm_values_china="/Users/julianaferreira/repos/helm-charts/deploy-global/values/services/live-china"

import os


for root, dirs, files in os.walk(full_path_for_helm_values_china, topdown=False):

  for name in files:    
    file_path=os.path.join(root, name)

    file_name=os.path.join(root, name).split("/")[-1].split(".")[0]

    if file_name in "public-pages":
       print("1")
       print (file_name)
    #elif file_name in "nginx-gateway":
        # in check if file_name exists in string "nginx-gateway". Just "nginx" or "gateway" are true too.
    elif file_name == "nginx-gateway":
        ## == compara exacly string with file_name
    #elif file_name.__contains__("nginx-gateway"):
        # contains check if string "nginx-gateway" exists in file_name. Return true just for *"nginx-gateway"*.
       print("2")
       print (file_name)
    else:
       print("3")
       print (file_name)
import tkinter as tk
from tkinter import ttk

root = tk.Tk()

# Pack a big frame so, it behaves like the window background
big_frame = ttk.Frame(root)
big_frame.pack(fill="both", expand=True)

# Set the initial theme
root.tk.call("source", "sun-valley.tcl")
root.tk.call("set_theme", "light")

def change_theme():
    # NOTE: The theme's real name is sun-valley-<mode>
    if root.tk.call("ttk::style", "theme", "use") == "sun-valley-dark":
        # Set light theme
        root.tk.call("set_theme", "light")
    else:
        # Set dark theme
        root.tk.call("set_theme", "dark")

# Remember, you have to use ttk widgets
button = ttk.Button(big_frame, text="Change theme!", command=change_theme)
button.pack()

root.mainloop()
pd.set_option('display.max_rows', 200)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
(pd.Timestamp.today("UTC") - pd.Timestamp("2021-08-20T20:01:05+00:00")) > pd.Timedelta(
    3, "D"
)
class Band:
 
  bandcount = 0
 
  def __init__(self, name, instrument): #these are attributes
          self.name = name
          self.instrument = instrument
          Band.bandcount += 1
 
  def displayInstrumnet(self): #this is a method
          print(self.name, "plays", self.instrument)
 
  def bandCount(self):
          print("# of Bandmembers = ", Band.bandcount)
## Using snowflake connector 
import snowflake.connector
from configparser import ConfigParser
import pandas as pd

config = ConfigParser()
config.read("config.ini")

user=config["SNOWFLAKE"]["USER"]
password=config["SNOWFLAKE"]["PASSWORD"]
database=config["SNOWFLAKE"]["DATABASE"]
schema=config["SNOWFLAKE"]["SCHEMA"]
warehouse=config["SNOWFLAKE"]["WAREHOUSE"]
role=config["SNOWFLAKE"]["ROLE"]



## snowflake connector method
connection = snowflake.connector.connect(
    user=user,
    password=password,
    account='hostelworld.eu-west-1',
    database = database,
    warehouse = warehouse,
    role = role)
print("snowflake connector connected...")

query = '''select * from production.ppc.campaign_performance_daily limit 1000 ;'''
df = pd.read_sql_query(sql=query, con=connection)
print('DF shape = ', df.shape)

connection.close()
print("connection closed")



### Using sql alchemy
from sqlalchemy import create_engine
from configparser import ConfigParser
import pandas as pd

config = ConfigParser()
config.read("config.ini")

user=config["SNOWFLAKE"]["USER"]
password=config["SNOWFLAKE"]["PASSWORD"]
database=config["SNOWFLAKE"]["DATABASE"]
schema=config["SNOWFLAKE"]["SCHEMA"]
warehouse=config["SNOWFLAKE"]["WAREHOUSE"]
role=config["SNOWFLAKE"]["ROLE"]

# sql alchemy method
engine = create_engine(
            f"""snowflake://{user}:{password}@hostelworld.eu-west-1/{database}/{schema}?warehouse={warehouse}&role={role}"""
)

connection = engine.connect()
print('sql alchemy connected...')
            
query = '''select * from production.ppc.campaign_performance_daily limit 1000 ;'''
df = pd.read_sql_query(sql=query, con=connection)
print('DF shape = ', df.shape)

connection.close()
print("connection closed")
pd.date_range(start="2021-08-16", end=pd.Timestamp.today().strftime("%Y-%m-%d"), name="action_date", closed="left")
pd.Timestamp.today().strftime("%Y-%m-%d")
import pandas as pd
from google.oauth2 import service_account
import pandas_gbq
import logging

logger = logging.getLogger("pandas_gbq")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())

project_id = "analytics-dev-308300"

credentials = service_account.Credentials.from_service_account_file(
    "../keys/gcp_key.json",
)

pd.read_gbq(query=query, credentials=credentials, project_id=project_id)
@dataclass
class C:
    i: int
    j: int = None
    database: InitVar[DatabaseType] = None

    def __post_init__(self, database):
        if self.j is None and database is not None:
            self.j = database.lookup('j')

c = C(10, database=my_database)
import PySimpleGUI as sg

# Define the window's contents
layout = [[sg.Text("What's your name?")],
          [sg.Input(key='-INPUT-')],
          [sg.Text(size=(40,1), key='-OUTPUT-')],
          [sg.Button('Ok'), sg.Button('Quit')]]

# Create the window
window = sg.Window('Window Title', layout)

# Display and interact with the Window using an Event Loop
while True:
    event, values = window.read()
    # See if user wants to quit or window was closed
    if event == sg.WINDOW_CLOSED or event == 'Quit':
        break
    # Output a message to the window
    window['-OUTPUT-'].update('Hello ' + values['-INPUT-'] + "! Thanks for trying PySimpleGUI")

# Finish up by removing from the screen
window.close()
new_sheet = workbook.create_sheet(sheetName)
default_sheet = workbook['default']

from copy import copy

for row in default_sheet.rows:
    for cell in row:
        new_cell = new_sheet.cell(row=cell.row, column=cell.col_idx,
                value= cell.value)
        if cell.has_style:
            new_cell.font = copy(cell.font)
            new_cell.border = copy(cell.border)
            new_cell.fill = copy(cell.fill)
            new_cell.number_format = copy(cell.number_format)
            new_cell.protection = copy(cell.protection)
            new_cell.alignment = copy(cell.alignment)
import logging
from .ReportLogger import FunctionLogger
from .ReportLogger import LoggerString

def log_all_methods(cls):
    """
    Logs all the methods of a class
    """
    logger_string = LoggerString()

    class WrappedClass:

        def __init__(self, *args, **kwargs):

            logging.debug(logger_string.generate_entering_class_string(
                cls, *args, **kwargs))

            self.__instance = cls(*args, **kwargs)

            logging.debug(logger_string.generate_exiting_class_string(
                cls, *args, **kwargs))

        def __getitem__(self, key: str):
            return self.__instance[key]

        def __getattribute__(self, s):
            try:
                x = super(WrappedClass, self).__getattribute__(s)
            except AttributeError:
                pass
            else:
                return x
            x = self.__instance.__getattribute__(s)
            return FunctionLogger(x)


    return WrappedClass
df_sub = df[["col1", "col2"]]
df_sub = df_sub.dropna()
df_sub.groupby(['col1','col2']).size().reset_index().rename(columns={0:'count'})
''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(60))
open("./keys/talentcards.txt", mode="r").readline()
'''
某比赛已经进入了淘汰赛阶段,已知共有n名选手参与了此阶段比赛,他们的得分分别是a_1,a_2….a_n,小美作为比赛的裁判希望设定一个分数线m,使得所有分数大于m的选手晋级,其他人淘汰。

但是为了保护粉丝脆弱的心脏,小美希望晋级和淘汰的人数均在[x,y]之间。

显然这个m有可能是不存在的,也有可能存在多个m,如果不存在,请你输出-1,如果存在多个,请你输出符合条件的最低的分数线。


输入描述:
输入第一行仅包含三个正整数n,x,y,分别表示参赛的人数和晋级淘汰人数区间。(1<=n<=50000,1<=x,y<=n)

输入第二行包含n个整数,中间用空格隔开,表示从1号选手到n号选手的成绩。(1<=|a_i|<=1000)


输出描述:
输出仅包含一个整数,如果不存在这样的m,则输出-1,否则输出符合条件的最小的值。


输入例子1:
6 2 3
1 2 3 4 5 6

输出例子1:
3
Python3(3.9) 
'''





while True:
    try:
        n,x,y=map(int,input().strip().split())
        a=list(map(int,input().strip().split()))
        a.sort()
        flag=0
        for i in range(x,y+1):
            v=n-i
            if a[i] != a[i-1] and x<=v<=y :
                flag=1
                print(a[i-1])
                break
        if flag==0:
            print(-1)
    except:
        break
# remove punc, segment and stopword
def punc_jieba(text, sep = ' '):
#     stopword = stopwords(["zh"])
    text_punc = re.sub("[\s+\>\<\:\?\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、【】╮╯▽╰╭★→「」]+".encode().decode("utf8"),
                        "",text)
    text_cut = sep.join(jieba.cut(text_punc, cut_all=False)).lower()
#     tokens = word_tokenize(text_cut)
#     clean_text = [word for word in tokens if not word in stopword]
    
    return text_cut
# mothod1
def stop_word(text):
    stopword = stopwords(['zh'])
    remove_stw = [word for word in text if not word in stopword]
    return remove_stw
df['text'] = df['text'].apply(stop_word)
# mothod2
stopword = stopwords(['zh'])
df['text'] = df['text'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopword)]))
sudo pip install opencc
# if nt work, should clone project first

import pandas as pd
import numpy as np
# -*- coding: utf-8 -*-
import opencc
from opencc import OpenCC

df = pd.read_csv('training.csv').astype(str)

def tra_sim(text):
    cc = OpenCC('tw2s')
    sim = cc.convert(text)
    return sim
df['sim_label'] = df['label'].apply(tra_sim)
df['sim_detail_label'] = df['detail_label'].apply(tra_sim)
df['sim_text'] = df['text'].apply(tra_sim)
def deleteEncodingLayers(model, num_layers_to_keep):  # must pass in the full bert model
    oldModuleList = model.bert.encoder.layer
    newModuleList = nn.ModuleList()

    # Now iterate over all layers, only keepign only the relevant layers.
    for i in range(0, len(num_layers_to_keep)):
        newModuleList.append(oldModuleList[i])

    # create a copy of the model, modify it with the new list, and return
    copyOfModel = copy.deepcopy(model)
    copyOfModel.bert.encoder.layer = newModuleList

    return copyOfModel
s = (X_train.dtypes == 'object')
object_cols = list(s[s].index)

print("Categorical variables:")
print(object_cols)
from datetime import datetime
FORMAT='%b %d %Y %I:%M%p'
datetime_object = datetime.strptime('Jun 1 2005  1:33PM', FORMAT)
virtualenv env
 
# linux
source env/bin/activate
 
#windows
env\Scripts\activate.bat
 
deactivate
  def _use_cached_session(self):
    """ Attempt to set the session id from a memcache entry and return success status """
    if not self.cache:
      return False
    expiration = self.cache.get("KEY", namespace="QUEUE_NAME")
    if expiration and expiration > datetime.datetime.utcnow():
      self.session_id = self.cache.get("KEY", namespace="QUEUE_NAME")
      if self.session_id:
        return True
    return False
import logging
import time
from functools import partial, wraps


def retry(func=None, exception=Exception, n_tries=5, delay=5, backoff=1, logger=False):
    """Retry decorator with exponential backoff.

    Parameters
    ----------
    func : typing.Callable, optional
        Callable on which the decorator is applied, by default None
    exception : Exception or tuple of Exceptions, optional
        Exception(s) that invoke retry, by default Exception
    n_tries : int, optional
        Number of tries before giving up, by default 5
    delay : int, optional
        Initial delay between retries in seconds, by default 5
    backoff : int, optional
        Backoff multiplier e.g. value of 2 will double the delay, by default 1
    logger : bool, optional
        Option to log or print, by default False

    Returns
    -------
    typing.Callable
        Decorated callable that calls itself when exception(s) occur.

    Examples
    --------
    >>> import random
    >>> @retry(exception=Exception, n_tries=4)
    ... def test_random(text):
    ...    x = random.random()
    ...    if x < 0.5:
    ...        raise Exception("Fail")
    ...    else:
    ...        print("Success: ", text)
    >>> test_random("It works!")
    """

    if func is None:
        return partial(
            retry,
            exception=exception,
            n_tries=n_tries,
            delay=delay,
            backoff=backoff,
            logger=logger,
        )

    @wraps(func)
    def wrapper(*args, **kwargs):
        ntries, ndelay = n_tries, delay

        while ntries > 1:
            try:
                return func(*args, **kwargs)
            except exception as e:
                msg = f"{str(e)}, Retrying in {ndelay} seconds..."
                if logger:
                    logging.warning(msg)
                else:
                    print(msg)
                time.sleep(ndelay)
                ntries -= 1
                ndelay *= backoff

        return func(*args, **kwargs)

    return wrapper
keys, values)) # {'a': 2, 'c': 4, 'b': 3}
 
 
#make a function: def is the keyword for the function:
def to_dictionary(keys, values):
 
 
#return is the keyword that tells program that function has to return value   
return dict(zip(keys, values))
 
  
 
# keys and values are the lists:
 
keys = ["a", "b", "c"]   
 
values = [2, 3, 4]
                                
                                
 
def merge_two_dicts(a, b):
 
 
   c = a.copy()   # make a copy of a
 
   c.update(b)    # modify keys and values of a with the ones from b
 
   return c
 
 
 
 
 
a = { 'x': 1, 'y': 2}
 
b = { 'y': 3, 'z': 4}
 
 
print(merge_two_dicts(a, b)) # {'y': 3, 'x': 1, 'z': 4}
 
def getFactorialit (n):
	if n < 0, return -1
    else fact = 1
    for i in range (1, n +1):
    	fact *=i
    return fact
    
print getFactorialit(10)
virtualenv myenv

myenv\Scripts\activate

deactivate
{
    "python.defaultInterpreterPath": "E:\\WebDevDjango\\Django_Projects\\PARASenv\\Scripts\\Python.exe"
}
if Evil === True:
	bg = PhotoImage(file = "Nme.png")
    
dddf = pd.DataFrame(
  index=vdf.index,
  columns=['Drawdown', 'Start', 'End'])

dddf['Drawdown'] = ((vdf['Fund Total'].dropna() / np.maximum.accumulate(vdf['Fund Total'].dropna(), axis=0)) - 1)

is_zero = dddf['Drawdown'] == 0

dddf['Start'] = ~is_zero & is_zero.shift(1)
start = list(dddf[dddf['Start']].index)

dddf['End'] = is_zero & (~is_zero).shift(1)
end = list(dddf[dddf['End']].index)

if start[0] > end[0]:
	start.insert(0, dddf.index[0])

if start[-1] > end[-1]:
	end.append(dddf.index[-1])

dd_vdf = pd.DataFrame(
  index=range(0, len(start)),
  columns=('Start', 'End', 'Length', 'Drawdown'))

for i in range(0, len(start)):
  dd = dddf[start[i]:end[i]]['Drawdown'].min()
  dd_vdf.iloc[i] = (start[i].strftime('%Y-%m-%d'), end[i].strftime('%Y-%m-%d'), (end[i] -start[i]).days, dd)
{
  "python.formatting.provider": "black",
  "python.linting.enabled": false,
  "python.linting.pylintEnabled": true,
  "python.formatting.blackPath": "black",
  "python.pythonPath": ".env/bin/python",
  "editor.formatOnSave": true
}

plt.figure(figsize=(16,8))
model_ARIMA = ARIMA(df_log, order=(7,1,7)) #Using p=7, d=1, q=7
results_ARIMA = model_ARIMA.fit()
plt.plot(df_shift)
plt.plot(results_ARIMA.fittedvalues, color='red')
plt.title('ARIMA Model - RMSE: %.4f'% mean_squared_error(results_ARIMA.fittedvalues,df_shift['Close'], squared=False))
plt.show()
#Importing AutoReg function to apply AR model
from statsmodels.tsa.ar_model import AutoReg

plt.figure(figsize=(16,8))
model_AR = AutoReg(df_shift, lags=7) #Using number of lags as 7
results_AR = model_AR.fit()
plt.plot(df_shift)
predict = results_AR.predict(start=0,end=len(df_shift)-1)
predict = predict.fillna(0) #Converting NaN values to 0
plt.plot(predict, color='red')
plt.title('AR Model - RMSE: %.4f'% mean_squared_error(predict,df_shift['Close'], squared=False))  #Calculating rmse
plt.show()
#Importing the seasonal_decompose to decompose the time series
from statsmodels.tsa.seasonal import seasonal_decompose
decomp = seasonal_decompose(df_train)

trend = decomp.trend
seasonal = decomp.seasonal
residual = decomp.resid

plt.figure(figsize=(15,10))
plt.subplot(411)
plt.plot(df_train, label='Actual', marker='.')
plt.legend(loc='upper left')
plt.subplot(412)
plt.plot(trend, label='Trend', marker='.')
plt.legend(loc='upper left')
plt.subplot(413)
plt.plot(seasonal, label='Seasonality', marker='.')
plt.legend(loc='upper left')
plt.subplot(414)
plt.plot(residual, label='Residuals', marker='.')
plt.legend(loc='upper left')
plt.tight_layout()
adfuller(df_shift)
plt.figure(figsize=(16,8))
df_shift = df_log - df_log.shift(periods = 1)
MAvg_shift = df_shift.rolling(window=12).mean()
MStd_shift = df_shift.rolling(window=12).std()
plt.plot(df_shift, color='c')
plt.plot(MAvg_shift, color='red', label = 'Moving Average')
plt.plot(MStd_shift, color='green', label = 'Standard Deviation')
plt.legend()
plt.show()

#Dropping the null values that we get after applying differencing method
df_shift = df_shift.dropna()
from os import walk

f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
%sql SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%';
https://vimsky.com/zh-tw/examples/detail/python-method-regex.sub.html
all_filenames = glob.glob("/home/lynaza/Desktop/Quinn/lda/檢察官起訴書/*.txt")

#return only filename (may contain not only duoi file)
 import os
 arr = os.listdir("/home/lynaza/Desktop/Quinn/lda/檢察官起訴書")
 print(arr)



import cv2
import os
import glob

def load_images_name(path):
    
    list_1 = glob.glob(path+'/*.tif') # depth of 1 folder
    
    list_2 = glob.glob(path+'/*/*.tif') # depth of 2 folder
    
    list_3 = glob.glob(path+'/*/*/*.tif')  # depth of 3 folder
    
    list_4 = glob.glob(path+'/*/*/*/*.tif')  # depth of 4 folder
    
    images_path = list_1 +list_2 +list_3 + list_4

    return images_path

images = load_images_name("/home/lynaza/Desktop/traindata/test")
class Solution:
    def maxTip(self, a, b, n, x, y):
        c,su=[],0
        x1,y1=0,0
        for i in range(n):
            c.append(abs(a[i]-b[i]))
        d=sorted(list(enumerate(c)),key=lambda x:x[1])
        for i in d[::-1]:
            if a[i[0]]>b[i[0]] and x1<x:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]<b[i[0]] and y1<y:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]>b[i[0]] and x1==x:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]<b[i[0]] and y1==y:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]==b[i[0]]:
                su+=a[i[0]]
        return su
            
       

if __name__ == '__main__':
    tc = int(input())
    while tc > 0:
        n, x, y = list(map(int, input().strip().split()))
        a = list(map(int, input().strip().split()))
        b = list(map(int, input().strip().split()))
        ans = Solution().maxTip(a, b, n, x, y)
        print(ans)
        tc -= 1
class Solution:
    def maxTip(self, a, b, n, x, y):
        c,su=[],0
        x1,y1=0,0
        for i in range(n):
            c.append(abs(a[i]-b[i]))
        d=sorted(list(enumerate(c)),key=lambda x:x[1])
        for i in d[::-1]:
            if a[i[0]]>b[i[0]] and x1<x:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]<b[i[0]] and y1<y:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]>b[i[0]] and x1==x:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]<b[i[0]] and y1==y:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]==b[i[0]]:
                su+=a[i[0]]
        return su
            
       

if __name__ == '__main__':
    tc = int(input())
    while tc > 0:
        n, x, y = list(map(int, input().strip().split()))
        a = list(map(int, input().strip().split()))
        b = list(map(int, input().strip().split()))
        ans = Solution().maxTip(a, b, n, x, y)
        print(ans)
        tc -= 1
class Solution:
    def maxTip(self, a, b, n, x, y):
        c,su=[],0
        x1,y1=0,0
        for i in range(n):
            c.append(abs(a[i]-b[i]))
        d=sorted(list(enumerate(c)),key=lambda x:x[1])
        for i in d[::-1]:
            if a[i[0]]>b[i[0]] and x1<x:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]<b[i[0]] and y1<y:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]>b[i[0]] and x1==x:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]<b[i[0]] and y1==y:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]==b[i[0]]:
                su+=a[i[0]]
        return su
            
       

if __name__ == '__main__':
    tc = int(input())
    while tc > 0:
        n, x, y = list(map(int, input().strip().split()))
        a = list(map(int, input().strip().split()))
        b = list(map(int, input().strip().split()))
        ans = Solution().maxTip(a, b, n, x, y)
        print(ans)
        tc -= 1
conda create -n p37env python=3.7
conda activate p37env
pip install tf-models-official
import pandas as pd

data = {'Product': ['Desktop Computer','Tablet','Printer','Laptop'],
        'Price': [850,200,150,1300]
        }

df = pd.DataFrame(data, columns= ['Product', 'Price'])

df.to_csv(r'Path where you want to store the exported CSV file\File Name.csv')
# df.to_csv('file_name.csv', encoding='utf-8', index=False)
print (df)

data[['column1','column2','column3',...]].to_csv('fileNameWhereYouwantToWrite.csv')
     
df = pd.DataFrame()
for i in range():
	#....
	df.appen(text)
# best way
data['resume'] = data[['Resume_title', 'City', 'State', 'Description', 'work_experiences', 'Educations', 'Skills', 'Certificates', 'Additional Information']].agg(' '.join, axis=1)


# other way
df["period"] = df["Year"] + df["quarter"]
df['Period'] = df['Year'] + ' ' + df['Quarter']
df["period"] = df["Year"].astype(str) + df["quarter"] #If one (or both) of the columns are not string typed
#Beware of NaNs when doing this!
df['period'] = df[['Year', 'quarter', ...]].agg('-'.join, axis=1) #for multiple string columns
df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)
#method cat() of the .str accessor 
df['Period'] = df.Year.str.cat(df.Quarter)
df['Period'] = df.Year.astype(str).str.cat(df.Quarter.astype(str), sep='q')
df['AllTogether'] = df['Country'].str.cat(df[['State', 'City']], sep=' - ') #add parameter na_rep to replace the NaN values with a string if have nan
columns = ['whatever', 'columns', 'you', 'choose']
df['period'] = df[columns].astype(str).sum(axis=1)

#a function
def str_join(df, sep, *cols):
   ...:     from functools import reduce
   ...:     return reduce(lambda x, y: x.astype(str).str.cat(y.astype(str), sep=sep), 
   ...:                   [df[col] for col in cols])
   ...: 

In [4]: df['cat'] = str_join(df, '-', 'c0', 'c1', 'c2', 'c3')
import re

text = 'this is a text'

try:
    found = re.search('is(.+?)text', text).group(1)
except AttributeError:
    # AAA, ZZZ not found in the original string
    found = '0 wtitle' # apply your error handling
found

=> a

# To get more than 1 search
job_title = []
for i in range(0,9282):
    text = data.work_experiences.iloc[i]
    try:
        title = re.findall(r"wtitle (.*?) wcompany",text)
    except :
        title = 'onejob'
    job_title.append(title)
    
data['job_title'] = job_title
# picking up piece of string between separators
# function using partition, like partition, but drops the separators
def between(left,right,s):
    before,_,a = s.partition(left)
    a,_,after = a.partition(right)
    return before,a,after
 
s = "bla bla blaa <a>data</a> lsdjfasdjöf (important notice) 'Daniweb forum' tcha tcha tchaa"
print between('<a>','</a>',s)
print between('(',')',s)
print between("'","'",s)
 
""" Output:
('bla bla blaa ', 'data', " lsdjfasdj\xc3\xb6f (important notice) 'Daniweb forum' tcha tcha tchaa")
('bla bla blaa <a>data</a> lsdjfasdj\xc3\xb6f ', 'important notice', " 'Daniweb forum' tcha tcha tchaa")
('bla bla blaa <a>data</a> lsdjfasdj\xc3\xb6f (important notice) ', 'Daniweb forum', ' tcha tcha tchaa')
"""
# Make copy to avoid changing original data (when imputing)
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()

# Make new columns indicating what will be imputed
for col in cols_with_missing:
    X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()
    X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()

# Imputation
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))

# Imputation removed column names; put them back
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns

print("MAE from Approach 3 (An Extension to Imputation):")
print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid))
from sklearn.impute import SimpleImputer

# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))

# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns

print("MAE from Approach 2 (Imputation):")
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor

def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
    model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
    model.fit(train_X, train_y)
    preds_val = model.predict(val_X)
    mae = mean_absolute_error(val_y, preds_val)
    return(mae)


for max_leaf_nodes in [5, 50, 500, 5000]:
    my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)
    print("Max leaf nodes: %d  \t\t Mean Absolute Error:  %d" %(max_leaf_nodes, my_mae))
import pandas as pd
import matplotlib.pyplot as plt
  
author = ['Jitender', 'Purnima', 'Arpit', 'Jyoti']
article = [210, 211, 114, 178]
  
auth_series = pd.Series(author)
article_series = pd.Series(article)
  
frame = { 'Author': auth_series, 'Article': article_series }
  
result = pd.DataFrame(frame)
  
print(result)
#define the target
y = home_data.SalePrice

#Create the list of features below
feature_names = ['LotArea','YearBuilt','1stFlrSF','2ndFlrSF','FullBath','BedroomAbvGr','TotRmsAbvGrd']

# Select data corresponding to features in feature_names
X = home_data[feature_names]

from sklearn.model_selection import train_test_split

# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 1)

from sklearn.tree import DecisionTreeRegressor
#specify the model
#For model reproducibility, set a numeric value for random_state when specifying the model
iowa_model = DecisionTreeRegressor(random_state=1)

# Fit the model
iowa_model.fit(train_X, train_y)

# get predicted prices on validation data
val_predictions = iowa_model.predict(val_X)

from sklearn.metrics import mean_absolute_error

print(mean_absolute_error(val_y, val_predictions))

df.sum(axis=0, skipna=None)

df.sum(axis=1, skipna=None)
x = 'body'
type(x)


df.dtypes

type(txns) 

#this will return if it is a list, array etc
today = str(date.today()) 
now = datetime.now()
print(now)
my_datetime = datetime.strptime(today+ ' 10:30AM', '%Y-%m-%d %I:%M%p')
print(my_datetime)
if (now > my_datetime):
    print("Hello")
df.groupby(['id', 'group', 'term']).size().unstack(fill_value=0)
mydf = mydf.reindex(columns = mydf.columns.tolist() + ['newcol1','newcol2'])
[1 if x<=30 else 0 for x in df_w['Mid-Point']]
import requests

api = 'http://site.api.espn.com/apis/site/v2/sports/baseball/mlb/scoreboard'

jsonData = requests.get(api).json()
events = jsonData['events']

links = []
for event in events:
    event_links = event['links']
    for each in event_links:
        if each['text'] == 'Gamecast':
            links.append(each['href'])
df['color'] = ['red' if x == 'Z' else 'green' for x in df['Set']]

export FLASK_ENV=development                         

export FLASK_ENV=production 

FLASK_APP=hello.py flask run






If you have installed many dependencies in your system and you need requirements.txt for a specific project, you can install first pipreqs:

$ pip install pipreqs

and execute the below command under the project folder.

$ pipreqs

This command will generate requirements.txt file for the particular project.
from flask import Flask
app = Flask(__name__)

@app.route("/")
def hello():
    return "Hello World!"
text = "This is \n some text"
y0, dy = 50, 4
for i, line in enumerate(text.split('\n')):
    y = y0 + i*dy
    cv2.putText(img, line, (50, y ), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
# check if the event is the X button 
        if event.type==pygame.QUIT:
            # if it is quit the game
            pygame.quit() 
            exit(0) 

if event.type == pygame.KEYUP:
            if event.key==pygame.K_w:
                keys[0]=False
            elif event.key==pygame.K_a:
                keys[1]=False
            elif event.key==pygame.K_s:
                keys[2]=False
            elif event.key==pygame.K_d:
                keys[3]=False
grouped_df = df.groupby("segment")

max_cust = grouped_df.customers.max()
grouped_df = df.groupby("segment")
max_cust = df.groupby("segment").customers.transform(max)
class MLP(torch.nn.Module):
    
    def __init__(self,D_in,H, D_out):
      """
      In the constructor we instantiate two nn.Linear modules and assign them as member variables.
      """
        super(MLPModel,self).__init__()
        self.hidden1 = torch.nn.Linear(D_in,H)
        self.hidden2 = torch.nn.Linear(H,D_out)
        self.sig = torch.nn.Sigmoid()
    
    def forward(self,x):
    	"""
        In the forward function we accept a Tensor of input data and we must return
        a Tensor of output data. We can use Modules defined in the constructor as
        well as arbitrary operators on Tensors.
        """
        out = self.sig(self.hidden1(x))
        out = self.hidden2(out)
        return out
df.drop(['results', 'cum_sum_pred', 'cus_sum_pred'], axis=1)
// python
def isbright(image, dim=10, thresh=0.5):
    # Resize image to 10x10
    image = cv2.resize(image, (dim, dim))
    # Convert color space to LAB format and extract L channel
    L, A, B = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2LAB))
    # Normalize L channel by dividing all pixel values with maximum pixel value
    L = L/np.max(L)
    # Return True if mean is greater than thresh else False
    return np.mean(L) > thresh

// c++
bool rockface_image_is_bright(cv::Mat img_mat, int dim, float threshold)
{
	// Resize image to 10x10
	cv::resize(img_mat, img_mat, { dim, dim });

	// Convert color space to LAB format and extract L channel
	cv::cvtColor(img_mat, img_mat, cv::COLOR_RGB2Lab);
	cv::Mat labchannel[3];
	cv::split(img_mat, labchannel);

	cv::imshow("L", labchannel[0]);
	cv::waitKey(0);

	// Normalize L channel by dividing all pixel values with maximum pixel value
	cv::Mat L;
	cv::normalize(labchannel[0], L, 0, 1, cv::NORM_MINMAX);

	// Return True if mean is greater than thresh else False
	float brightness = cv::mean(L).val[0];
	std::cout << "brightness: " << brightness << std::endl;
	return brightness > threshold;
}
>>> 'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W')
'Coordinates: 37.24N, -115.81W'
>>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'}
>>> 'Coordinates: {latitude}, {longitude}'.format(**coord)
'Coordinates: 37.24N, -115.81W'
import shutil
import operator
import os
from PIL import Image, ImageChops
from operator import itemgetter

def process(file_name):
	im = Image.open(file_name,"r")
	# Get the size of the picture
	width, height = im.size

	#convert to RGB
	pixels = im.load()

	d = {}

	for x in range(width):
		for y in range(height):
			if pixels[x,y] not in d:
				d[pixels[x,y]]=1
			else:
				d[pixels[x,y]]+=1
	print d
	sorted_d = sorted(d.items(), key=operator.itemgetter(0))
	background = sorted_d[0][0]
	captcha = sorted_d[1][0]
	print background, captcha

	for x in range(width):
		for y in range(height):
			if pixels[x,y] != captcha:
				pixels[x,y]=0
			else:
				pixels[x,y]=1
	im.putpalette([0, 0, 0,255,255,255])
	#pattern fix
	for x in range(1,width-1,1):
		for y in range(1,height-1,1):
			if (pixels[x,y] != pixels[x-1,y-1]) and (pixels[x,y] != pixels[x+1,y-1]) and (pixels[x,y] != pixels[x-1,y+1]) and (pixels[x,y] != pixels[x+1,y+1]):
				pixels[x,y]=1

	im.save("tmp.png")

def main(file_name):
	print "[?] Input file:", file_name
	process(file_name)
	captcha_filtered = Image.open('tmp.png')
	captcha_filtered = captcha_filtered.convert("P")
	inletter = False
	foundletter = False
	start = 0
	end = 0

	letters = []

	for y in range(captcha_filtered.size[0]): # slice across
		for x in range(captcha_filtered.size[1]): # slice down
			pix = captcha_filtered.getpixel((y,x))
			if pix != 0:
				inletter = True

		if foundletter == False and inletter == True:
			foundletter = True
			start = y

		if foundletter == True and inletter == False:
			foundletter = False
			end = y
			letters.append((start,end))

		inletter = False

	print "[+] Horizontal positions:", letters

	captcha = ""

	if len(letters) == 4:
		file_names = ["d-0.png", "d-3.png", "d-6.png", "d-9.png", "l-c.png", "l-f.png", "l-i.png", "l-m.png", "l-p.png", "l-s.png", "l-v.png", "l-y.png", "u-b.png", "u-E.png", "u-H.png", "u-k.png", "u-N.png", "u-q.png", "u-t.png", "u-w.png", "u-z.png", "d-1.png", "d-4.png", "d-7.png", "l-a.png", "l-d.png", "l-g.png", "l-j.png", "l-n.png", "l-q.png", "l-t.png", "l-w.png", "l-z.png", "u-c.png", "u-f.png", "u-i.png", "u-l.png", "u-o.png", "u-r.png", "u-u.png", "u-x.png", "d-2.png", "d-5.png", "d-8.png", "l-b.png", "l-e.png", "l-h.png", "l-k.png", "l-o.png", "l-r.png", "l-u.png", "l-x.png", "u-A.png", "u-d.png", "u-G.png", "u-J.png", "u-m.png", "u-p.png", "u-s.png", "u-V.png", "u-y.png"]
		for letter in letters:
			im3 = captcha_filtered.crop(( letter[0], 0, letter[1],captcha_filtered.size[1] ))
			im3 = im3.crop((0, 92, im3.size[0], 220))
			base = im3.convert('L')

			class Fit:
				letter = None
				difference = 0

			best = Fit()

			for letter in file_names:
				#print letter
				current = Fit()
				current.letter = letter

				sample_path = "samples/" + letter
				#print sample_path
				sample = Image.open(sample_path).convert('L').resize(base.size)
				difference = ImageChops.difference(base, sample)

				for x in range(difference.size[0]):
					for y in range(difference.size[1]):
						current.difference += difference.getpixel((x, y))

				if not best.letter or best.difference > current.difference:
					best = current

			#final captcha decoded
			tmp = ''
			tp, letter = best.letter.split('-')
			letter = letter.split('.')[0]
			if tp == 'u':
				tmp = letter.upper()
			else:
				tmp = letter
			print "[+] New leter:", tmp
			captcha = captcha + tmp
		print "[+] Correct captcha:", captcha
	else:
		print "[!] Missing characters in captcha !"

if __name__ == '__main__':
	main("captcha.png")
import wrds
db = wrds.Connection(wrds_username='joe')
db.raw_sql('SELECT date,dji FROM djones.djdaily')
// This variable is not encapsulated.
// Therefore it's missing some context. What is it naming? I dunno.
string name;

// BASIC ENCAPSULATION
// These variables and methods are encapsulated in the Dog class, so
// they make more sense now. They are members of the Dog class.
class Dog {
    string name;
    int age;

    void Bark() {
        Console.WriteLine("Bark!");
    }

    void Rename(string newName) {
        name = newName;
    }
}

// ACCESS MODIFIERS
// The members above have context, but they are accessible by any other
// code. To define access, use access modifiers:
// - public: Member is accessible by any other code in the same assembly
//    or another assembly that references it.
// - private: Member is accessible only by code in the same class or
//    struct.
// - protected: Member is accessible only by code in the same class or
//    any class that is derived from that class.
// - internal: Member is accessible only code within the same assembly.
// - protected internal: Member is accessible only to code within the
//    same assembly or by any class that is derived from that class.
class Dog {
    private string name;
    private int age;

    public void Bark() {
        Console.WriteLine("Bark!");
    }

    public void Rename(string newName) {
        name = newName;
    }

    public string GetName() {
        return name;
    }

    public void SetAge(int newAge) {
        if(newAge > 0)
            age = newAge;
    }

    public int GetAge() {
        return age;
    }
}

// PROPERTIES
// Now the variables are private and only accessible through the public
// methods. This is great cuz we can control how the private variables
// are accessed and modified, but this class seems bulky for only really
// having 2 variables and 1 unique method. Properties allow us to slim
// it down while sitll enforcing rules...
class Dog {
    private string name;
    private int age;

    public string Name { get; set; }

    public int Age {
        get { return age; }
        set {
            if( value > 0)
                age = value;
        }
    }

    public void Bark() {
        Console.WriteLine("Bark!");
    }
}
import random
import pygame

# ========== Pygame Config ================

WIDTH = 400
HEIGHT = 400
screen_size = [WIDTH, HEIGHT]

# Define the colors we will use in RGB format
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)

# Initialize the game engine
pygame.init()

# Set the height and width of the screen
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Ball invaders")


# ========== Functions ===================


def p5_map(n, start1, stop1, start2, stop2):
    return ((n - start1) / (stop1 - start1)) * (stop2 - start2) + start2


# ========== Classes =====================


class Vector:

    def __init__(self, x, y):
        self.x = x
        self.y = y

    def __invert__(self):
        return Vector(-self.x, -self.y)


class Position:

    def __init__(self, x, y):
        self.x = x
        self.y = y

    def coordinates(self):
        return self.x, self.y

    def is_clear(self, game_):
        for pos in game_.snake.positions:
            if pos.coordinates == self.coordinates():
                return False
        return True

    def update(self, vector):
        self.x += vector.x
        self.y += vector.y

    def __eq__(self, other):
        return abs(self.x - other.x) < game.object_padding and abs(self.y - other.y) < game.object_padding


VECTOR_SIZE = 5

RIGHT = Vector(VECTOR_SIZE, 0)
LEFT = Vector(-VECTOR_SIZE, 0)
UP = Vector(0, -VECTOR_SIZE)
DOWN = Vector(0, VECTOR_SIZE)


# ========== Game Objects ================


class Shot:

    def __init__(self, game_, init_pos):
        self.game = game_
        self.position = Position(init_pos.x, init_pos.y)
        self.speed = 5

    def update(self):
        pass

    def encounters(self, other):
        return self.position == other.position

    def show(self):
        pygame.draw.circle(self.game.screen, BLUE, self.position.coordinates(), 5)


class FriendlyShot(Shot):

    def __init__(self, game_, init_pos):
        Shot.__init__(self, game_, init_pos)

    def update(self):
        self.position.update(Vector(0, -self.speed))


class EnemyShot(Shot):

    def __init__(self, game_, init_pos):
        Shot.__init__(self, game_, init_pos)

    def update(self):
        self.position.update(Vector(0, self.speed))

    def show(self):
        pygame.draw.circle(self.game.screen, RED, self.position.coordinates(), 5)


class Spaceship:

    def __init__(self, game_):
        self.game = game_
        self.position = Position(WIDTH / 2, HEIGHT - 20)

    def shoot(self):
        self.game.friendly_shots.append(FriendlyShot(self.game, self.position))

    def show(self):
        pygame.draw.circle(self.game.screen, BLACK, self.position.coordinates(), 10)


class Enemy:
    def __init__(self, game_, position_, vibration_rate=1):
        self.game = game_
        self.position = position_
        self.vibration_rate = vibration_rate
        self.vibration_pattern = ['U', 'L', 'D', 'R']
        self.vibration_counter = 0

    def vibrate(self):
        vibration_move = self.vibration_pattern[self.vibration_counter]
        if vibration_move == 'U':
            self.position.y -= self.vibration_rate
        elif vibration_move == 'L':
            self.position.x -= self.vibration_rate
        elif vibration_move == 'D':
            self.position.y += self.vibration_rate
        elif vibration_move == 'R':
            self.position.x += self.vibration_rate
        self.vibration_counter += 1
        if self.vibration_counter == len(self.vibration_pattern):
            self.vibration_counter = 0

    def kill(self):
        self.game.enemies.remove(self)

    def shoot(self):
        self.game.enemy_shots.append(EnemyShot(self.game, self.position))

    def show(self):
        self.vibrate()
        pygame.draw.circle(self.game.screen, GREEN, self.position.coordinates(), 10)


class Game:

    def __init__(self, screen_, object_padding=10):
        self.screen = screen_
        self.ship = Spaceship(self)
        self.object_padding = object_padding
        self.friendly_shots = []
        self.enemy_shots = []
        self.enemies = []
        self.spawn_enemies()

    def spawn_enemies(self):
        positions_ = [Position(x, 20) for x in range(20, WIDTH - 10, 50)]
        for pos in positions_:
            self.enemies.append(Enemy(self, pos))

    def enemies_shoot(self):
        shooting_chance = 0.1
        if random.random() < shooting_chance:
            random.choice(self.enemies).shoot()


if __name__ == '__main__':

    game = Game(screen)

    # Loop until the user clicks the close button.
    done = False
    clock = pygame.time.Clock()

    # Mainloop
    while not done:

        # This limits the while loop to a max of 10 times per second.
        # Leave this out and we will use all CPU we can.
        clock.tick(50)

        pressed_keys = pygame.key.get_pressed()
        if pressed_keys[pygame.K_RIGHT]:
            if not game.ship.position.x + RIGHT.x >= WIDTH:
                game.ship.position.update(RIGHT)
        elif pressed_keys[pygame.K_LEFT]:
            if not game.ship.position.x - LEFT.x <= 0:
                game.ship.position.update(LEFT)

        for event in pygame.event.get():  # User did something
            if event.type == pygame.QUIT:  # If user clicked close
                done = True  # Flag that we are done so we exit this loop
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_SPACE:
                    game.ship.shoot()

        # Clear the screen and set the screen background
        screen.fill(WHITE)

        # ===========> UPDATE POSITIONS HERE <========

        game.enemies_shoot()

        for shot in game.friendly_shots:
            shot.update()
            for enemy in game.enemies:
                if shot.encounters(enemy):
                    enemy.kill()
                    del enemy
                    del shot
                    break

        for shot in game.enemy_shots:
            shot.update()
            if shot.encounters(game.ship):
                exit(1)

        # ===========> START DRAWING HERE <===========

        game.ship.show()

        for shot in game.friendly_shots:
            shot.show()

        for shot in game.enemy_shots:
            shot.show()

        for enemy in game.enemies:
            enemy.show()

        # ===========> END DRAWING HERE <=============

        # Go ahead and update the screen with what we've drawn.
        # This MUST happen after all the other drawing commands.
        pygame.display.flip()
symbols = ['BTCUSDT', 'ETHUSDT']

twm = ThreadedWebsocketManager()
twm.start()

twm.start_multiplex_socket(callback=lambda msg: print('Spot:', msg), streams=[f'{s.lower()}@bookTicker' for s in symbols])
twm.start_futures_multiplex_socket(callback=lambda msg: print('Futures:', msg), streams=[f'{s.lower()}@bookTicker' for s in symbols])
<form action='/event' method='post'>
Year ("yyyy"):  <input type='text' name='year' />
Month ("mm"):  <input type='text' name='month' />
Day ("dd"):  <input type='text' name='day' />
Hour ("hh"):  <input type='text' name='hour' />
Description:  <input type='text' name='info' />
             <input type='submit' name='submit' value='Submit'/>
</form>
>>> a = 1
>>> b = 2
>>> a, b = b, a
>>> a
2
>>> b
1
#importing Autoviz class
from autoviz.AutoViz_Class import AutoViz_Class#Instantiate the AutoViz class
AV = AutoViz_Class()

df = AV.AutoViz('car_design.csv')
from collections import defaultdict, namedtuple, Counter, deque

Counter(words).most_common(6)
challenges_done = [('mike', 10), ('julian', 7), ('bob', 5),
                   ('mike', 11), ('julian', 8), ('bob', 6)]

challenges = defaultdict(list)
for name, challenge in challenges_done:
    challenges[name].append(challenge)

challenges
User = namedtuple('User', 'name role sur')
user = User(name='bob', role='coder', sur='ellepola')
User = namedtuple('User', 'name role sur')
user = User(name='bob', role='coder', sur='ellepola')
def clean(txt):
    txt = txt.str.replace("(<br/>)", "")
    txt = txt.str.replace('(<a).*(>).*(</a>)', '')
    txt = txt.str.replace('(&amp)', '')
    txt = txt.str.replace('(&gt)', '')
    txt = txt.str.replace('(&lt)', '')
    txt = txt.str.replace('(\xa0)', ' ')  
    return txt
df['xxx column'] = clean(df['xxx column'])
# Download the helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client


# Find your Account SID and Auth Token at twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)

message = client.messages.create(
                              from_='+15017122661',
                              body='body',
                              to='+15558675310'
                          )

print(message.sid)
from pathlib import Path
p = Path(r'../data/property_files/final_source_matching_file').glob('**/*')
files = [x for x in p if x.is_file()]
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
def addNums(a,b):
    summa = a + b
    return summa
class Solution:
    def addDigits(self, num: int) -> int:
        if num==0:
            return 0
        elif num%9==0:
            return 9
        else:
            return num%9

#----------------------------------------

class Solution:
    def addDigits(self, num: int) -> int:
        if num==0:
            return 0
        return 1+(num-1)%9
  
        
class Solution:
    def minDeletionSize(self, strs: List[str]) -> int:

        count = 0
        for i in range(len(strs[0])):
            w = []
            k=[]
            for j in range(len(strs)):
                d=strs[j][i]
                w.append(d)
                k.append(d)
            k.sort()
            if k!=w:
                count+=1
        return (count)


#" using unzip "

class Solution:
    def minDeletionSize(self, A: List[str]) -> int:
        d = 0
        for col in zip(*A):
            if list(col)!=sorted(col):
                d+=1
        return d
        
i was not in amood to do this
so try again
new_list = sorted(a_list, key=lambda x: (len(x), x))
mask=np.triu(np.ones_like(corr,dtype=bool))

f ,ax = plt.subplots(figsize=(11,9))
cmap=sns.diverging_palette(230,20, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
           square=True, linewidth=.5, cbar_kws={'shrink':.5})
# Solution with a temporary variable

a = input("Enter variable a: ")
b = input("Enter variable b: ")

print(f"Variable a is {a}: ")
print(f"Variable b is {b}: ")

c = a
a = b
b = c

print(f"Variable a is now {a}: ")
print(f"Variable b is now {b}: ")
public class Main {
  int x = 5;

  public static void main(String[] args) {
    Main myObj = new Main();
    System.out.println(myObj.x);
  }
}
class Person:
  def __init__(self, name, age):
    self.name = name
    self.age = age

p1 = Person("John", 36)

print(p1.name)
print(p1.age)
for p in ax.patches:
    values= '{:.0f}'.format(p.get_height())
    x = p.get_x() + p.get_width()/2
    y = p.get_height()
    ax.annotate(values, (x, y),ha='center', va ='bottom', fontsize = 11)
for p in ax.patches:
    values= '{:.0f}'.format(p.get_height())
    x = p.get_x() + p.get_width()/2
    y = p.get_height()
    ax.annotate(values, (x, y),ha='center', va ='bottom', fontsize = 11)
import warnings
warnings.filterwarnings('ignore')
def indec_query(query_name='IPC_Nacional', url='https://apis.datos.gob.ar/series/api/series?ids=145.3_INGNACNAL_DICI_M_15'):
   
    """
        Función que por defecto devuelve el IPC a Nivel General Nacional de la República Argentina en variación mensual

        Parametros
        -----------------
        query_name : str, optional
                El nombre que adoptamos para la query, el archivo csv generado llevará este         nombre
        url : str, optional
                URL de la API del generador de consultas de Nación (https://datosgobar.github.io/series-tiempo-ar-call-generator/)
        """

    r = requests.get(url=url)

    r_dict = json.loads(r.text)

    df = pd.DataFrame(r_dict['data'])

    df.rename(columns={0: 'fecha',
                                                1:query_name},
                            inplace=True)

    df.to_csv('dataset_'+query_name, index=False)
def bcra_query(variable='dolar_blue', url='https://api.estadisticasbcra.com/usd'):

    r = requests.get(url=url,
                                            headers={'Authorization':'BEARER '+bearer
                                
                            })
    r_dict = json.loads(r.text)

    df = pd.DataFrame(r_dict)


    df.rename(columns={'v': variable,
                                            'd':'fecha'},
                        inplace=True)

    #dolar_blue = df

    today = date.today()
    str(today)

    df.to_csv('dataset_'+variable, index=False)
# Load the data
churn_df = pd.read_pickle("CHURN_1.p")
g = sns.*plot 
ax = g 
for p in ax.patches:
    ax.text(p.get_x() + p.get_width()/2., p.get_height(), '{0:.2f}'.format(p.get_height()), 
        fontsize=12, color='black', ha='center', va='bottom')
from operator import attrgetter

df['duration_dataset'] = (
    df['date_1'].dt.to_period('M') -
    df['date_2'].dt.to_period('M')).apply(attrgetter('n'))
    def create(self, validated_data):
        # print(validated_data)
        tags = validated_data.pop("tags")
        language = validated_data.pop("language")
        snip = Snip(**validated_data)
        # languageObj, created = Language.objects.get_or_create(**language)
        snip.language = language
        snip.save()
        for tag in tags:
            # tagObj, created = Tag.objects.get_or_create(**tag)
            # print(tagObj)
            snip.tags.add(tag)
        return ShallowSnipSerializer(snip).data
import pandas as pd
from datetime import datetime

ps = pd.Series([datetime(2014, 1, 7), datetime(2014, 3, 13), datetime(2014, 6, 12)])
new = ps.apply(lambda dt: dt.replace(day=1))
import pandas as pd

data = {'name': ['Somu', 'Kiku', 'Amol', 'Lini'],
	'physics': [68, 74, 77, 78],
	'chemistry': [84, 56, 73, 69],
	'algebra': [78, 88, 82, 87]}

	
#create dataframe
df_marks = pd.DataFrame(data)
print('Original DataFrame\n------------------')
print(df_marks)

new_row = {'name':'Geo', 'physics':87, 'chemistry':92, 'algebra':97}
#append row to the dataframe
df_marks = df_marks.append(new_row, ignore_index=True)

print('\n\nNew row added to DataFrame\n--------------------------')
print(df_marks)
class Singleton (type):
    _instances = {}
    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
        return cls._instances[cls]

# Python 2
class MyClass():
    __metaclass__= Singleton

# Python 3
class MyClass(metaclass=Singleton):
     pass
da = da.assign_coords(year_month=da.time.dt.strftime("%Y-%m"))
result = da.groupby("year_month") - da.groupby("year_month").mean("time")
"""index.py
Usage:
  index.py serve <dir>
  index.py serve <dir> [--port=<port>]
  index.py info <dir>
  index.py (-h | --help)
  index.py --version
Options:
  -h --help     Show this screen.
  --version     Show version.
  --port=<port> Port to bind to [default: 8080].
"""
# -*- coding: utf-8 -*-

import os
import multiprocessing
import sys


import crayons
import delegator
from docopt import docopt
from flask import Flask, request, abort
from livereload import Server as ReloadServer
from whitenoise import WhiteNoise


def yield_files(dir, endswith):
    for root, dirs, files in os.walk(dir):

        # Cleanup root.
        root = root[len(dir) + 1:]

        # Exclude directories that start with a period.
        if not root.startswith('.'):
            for file in files:
                if file.endswith(endswith):
                    yield os.sep.join((root, file))


def do_info():
    """Runs the 'info' command, from the CLI."""
    pass


def convert_dir(dir):
    dir = os.path.abspath(dir)
    try:
        assert os.path.isdir(dir)
    except AssertionError:
        print(crayons.red('The directory given must be a valid one!'))
        sys.exit(1)

    return dir

def convert_port(port):
    if port is None:
        port = '8080'
    else:
        try:
            port = int(port)
        except ValueError:
            print(crayons.red('The port given must be a valid number!'))
            sys.exit(1)

    return port

def prepare_extras(request):
    extras = {}

    # 
    if request.json:
        extras.update(request.json)
    if request.form:    
        extras.update(request.form)
    
    if request.args:
        extras.update(request.args)

    extra = []

    for key, values in extras.items():
        for value in values:
            extra.append((key, value))
    
    return extra

def find(endswith, dir, path):
    found = None
    for fs_path in yield_files(dir, endswith):
        print '{0}{1}'.format(path, endswith) 
        print fs_path
        print
        if '{0}{1}'.format(path, endswith) in fs_path:
            return fs_path

def directory_listing(path):
    html = ''
    for i in os.listdir(path):
        html += '<li><a href="{0}">{0}</a></li>'.format(i)
    return html

def do_serve(dir, port):
    """Runs the 'serve' command, from the CLI."""

    # Convert dir and port to appropriate values.
    dir = convert_dir(dir)
    port = convert_port(port)

    os.chdir(dir)

    app = Flask(__name__)

    @app.route('/', defaults={'path': './'})
    @app.route('/<path:path>')
    def catch_all(path):

        # Support for index.html.
        found = find('index.html', dir, path)
        
        # Support for index.py
        if not found:
            found = find('index.py', dir, path)

        # Support for directory listing.
        if not found:
            found = find('.py', dir, path)
        

        # A wild script was found!
        if found:
            if '.py' in found:
                extras = prepare_extras(request)
            
                for key, value in extras:
                    os.environ[key] = value
                
                c = delegator.run('python {0}'.format(found))

                for key, value in extras:
                    del os.environ[key]

                return c.out

            elif '.html' in found:
                # Strip prepending slashes. 
                if found.startswith('/'):
                    found = found[1:]
                
                # Open the file, and spit out the contents. 
                with open(found) as html:
                    return html.read()

        else:
            if os.path.isdir(path):
                return directory_listing(path)

            abort(404)


    @app.before_request
    def before_request():
        app.add_files(dir, prefix='/')

    @app.after_request
    def after_request(response):
        response.headers['X-Powered-By'] = 'index.py by Kenneth Reitz'
        return response

    app = WhiteNoise(app, root=dir)
    server = ReloadServer(app)
    server.watch('{0}/**'.format(dir))

    # Alert the user.
    print(crayons.yellow('Serving up \'{0}\' on port {1}.'.format(dir, port)))
    server.serve(port=port)


def main():
    args = docopt(__doc__, version='index.py, version 0.0.0')

    if args['info']:
      do_info()

    if args['serve']:
      do_serve(dir=args['<dir>'], port=args['--port'])


if __name__ == '__main__':
    main()
with open(filename) as f:
    mylist = f.read().splitlines() 
{<key_value>: <value> for <var> in <sequence> if <condition>}
i = 1
s = 0
while i < 101:
  s= s + i  
  i =i+1
print(s)
fruit = {
  "elderberries": 1,
  "figs": 1,
  "apples": 2,
  "durians": 3,
  "bananas": 5,
  "cherries": 8,
  "grapes": 13
}

table_data = []
for k, v in fruit.items():
   table_data.append([k, v])
from random import randint
x = randint(1,10)
for Name in range (x):
    print('My Name is Hasan')
for x in range (1):
    from random import randint
    x = randint(1,51)
    print('One random number between 1 and 50: ', x)
for y in range (1):
    from random import randint
    y = randint(2,6)
    print('One random number between 2 and 5: ', y)
print("X power y is: ", x**y)
num1 = eval(input('Enter the first number: '))
num2 = eval(input('Enter the second number: '))
print('The average of the numbers you entered is', (num1+num2)/2)
students_period_A = ["Alex", "Briana", "Cheri", "Daniele"]
students_period_B = ["Dora", "Minerva", "Alexa", "Obie"]


# method 1, problematic
for students in students_period_A:
  students_period_B.append(students)
  print(students_period_B)

# method 2, more elegant
all_students = students_period_A + students_period_B
for student in all_students:
  print(student)

print(all_students)


# output 

['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex']
['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana']
['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana', 'Cheri']
['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana', 'Cheri', 'Daniele']
Alex
Briana
Cheri
Daniele
Dora
Minerva
Alexa
Obie
Alex
Briana
Cheri
Daniele
['Alex', 'Briana', 'Cheri', 'Daniele', 'Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana', 'Cheri', 'Daniele']
python_topics = ["variables", "control flow", "loops", "modules", "classes"]

#Your code below: 
length = len(python_topics)
index = 0
 
while index < length:
  print("I am learning about "+ python_topics[index])
  index += 1

# output 

I am learning about variables
I am learning about control flow
I am learning about loops
I am learning about modules
I am learning about classes
python_topics = ["variables", "control flow", "loops", "modules", "classes"]

#Your code below: 
length = len(python_topics)
index = 0
 
while index < length:
  print(python_topics[index])
  index += 1

# output 

variables
control flow
loops
modules
classes
with open('dict.csv', 'w') as csv_file:  
    writer = csv.writer(csv_file)
    for key, value in mydict.items():
       writer.writerow([key, value])
import tejapi
tejapi.ApiConfig.api_key = "your key"
TSMC = tejapi.get(
    'TWN/EWPRCD', 
    coid = '2330',
    mdate={'gte':'2020-06-01', 'lte':'2021-04-12'}, 
    opts={'columns': ['mdate','open_d','high_d','low_d','close_d', 'volume']}, 
    paginate=True
    )
UMC = tejapi.get(
    'TWN/EWPRCD', 
    coid = '2303',
    mdate={'gte':'2020-06-01', 'lte':'2021-04-12'},
    opts={'columns': ['mdate','open_d','high_d','low_d','close_d', 'volume']}, 
    paginate=True
    )
UMC = UMC.set_index('mdate')
TSMC = TSMC.set_index('mdate')
import ctypes
ctypes.windll.kernel32.SetThreadExecutionState(0x80000002)
ctypes.windll.kernel32.SetThreadExecutionState(0x80000000)
L = [8, 10, 6, 1]

for i in L:
    print(i)
promise = "I will finish the python loops module!"

for promises in range(5):
  print(promise)

# Output 

I will finish the python loops module!
I will finish the python loops module!
I will finish the python loops module!
I will finish the python loops module!
I will finish the python loops module!
def csReverseIntegerBits(n):
   result = 0
   while n:
       result = (result << 1) + (n & 1)
       n >>= 1
   return result
class Node:
  def __init__(self, value):
    self.key = value     # the key of the node
    self.parent = None   # the pointer to the parent node
    self.left = None     # the pointer to the left child node
    self.right = None    # the pointer to the right child node
mask = pd.to_numeric(df['Name'], errors='coerce').notnull()
df[mask] = df[mask].shift(axis=1)
print (df)
  Name  Val Rating
0  ABC  123    B +
1  DEF  234    B +
2  NaN  567     B-
3  GHI  890      D
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,
                                 'percent_missing': percent_missing})
# Get unique elements in multiple columns i.e. Name & Age
uniqueValues = (empDfObj['Name'].append(empDfObj['Age'])).unique()

print('Unique elements in column "Name" & "Age" :')
print(uniqueValues)
import pandas as pd

# create a dataframe with one column
df = pd.DataFrame({"col1": ["a", "b", "a", "c", "a", "a", "a", "c"]})

# setting normalize=True
item_counts = df["col1"].value_counts(normalize=True)
print(item_counts)
PYTHONPATH=. poetry run pytest tests -W ignore::DeprecationWarning -W ignore::FutureWarning
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
              0     1
name      Alice   Bob
score       9.5   8.0
employed  False  True
kids          0     0
# check logs in the worker
docker logs worker --follow

# what to do when postgres No space left on device
 #Lister tous les containers 
 docker ps -a
 #Tous les down 
 docker stop id1 id2 id3 ...
 #Pruner les container 
 docker container prune
 #Pruner les image 
 docker image prune -a (modifié) 
 #Pruner les volume 
 docker volume prune
 #Supprimer les volumes non détectés automatiquement
 docker volume ls
 docker volume rm id1 id2 id3 ...
phonebook = {}
phonebook["John"] = 938477566
phonebook["Jack"] = 938377264
phonebook["Jill"] = 947662781
print(phonebook)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
import torch
cross_entropy_loss = torch.nn.CrossEntropyLoss()

# Input: f_q (BxCxS) and sampled features from H(G_enc(x))
# Input: f_k (BxCxS) are sampled features from H(G_enc(G(x))
# Input: tau is the temperature used in PatchNCE loss.
# Output: PatchNCE loss
def PatchNCELoss(f_q, f_k, tau=0.07):
    # batch size, channel size, and number of sample locations
    B, C, S = f_q.shape

    # calculate v * v+: BxSx1
    l_pos = (f_k * f_q).sum(dim=1)[:, :, None]

    # calculate v * v-: BxSxS
    l_neg = torch.bmm(f_q.transpose(1, 2), f_k)

    # The diagonal entries are not negatives. Remove them.
    identity_matrix = torch.eye(S)[None, :, :]
    l_neg.masked_fill_(identity_matrix, -float('inf'))

    # calculate logits: (B)x(S)x(S+1)
    logits = torch.cat((l_pos, l_neg), dim=2) / tau

    # return PatchNCE loss
    predictions = logits.flatten(0, 1)
    targets = torch.zeros(B * S, dtype=torch.long)
    return cross_entropy_loss(predictions, targets)
import requests
html = requests.get('https://google.com')

from bs4 import BeautifulSoup
soup = BeautifulSoup(product_page.content, 'html.parser')

class_book = soup.find('div', {'class': 'book'})
import tempfile

from django.core.files import File
from django.db import models


class Word(models.Model):
    word = models.CharField(max_length=200)
    audio = models.FileField(upload_to='audio/', blank=True)

    def save(self, *args, **kwargs):
        audio = gTTS(text=self.word_vocab, lang='en', slow=True)

        with tempfile.TemporaryFile(mode='w') as f:
            audio.write_to_fp(f)
            file_name = '{}.mp3'.format(self.word_vocab)
            self.audio.save(file_name, File(file=f))

        super(Word, self).save(*args, **kwargs)

#The function audio.save(self.word_vocab + ".mp3") won't work in your use case, you must use #write_to_fp or open the file created by this method, as pointed in doccumentation. I hope it helps
python -m main.path.to.file # in terminal
django-admin startproject mysite
 
python manage.py startapp myapp
import pandas as pd
import matplotlib.pyplot as plt

from pandas_profiling import ProfileReport
profile = ProfileReport(gabijos, title='Gabijos g.', html={'style':{'full_width':True}})
profile.to_file("gabijos.html")

mkdir /home/pi/.config/autostart
sudo nano /home/pi/.config/autostart/meteo.desktop


[Desktop Entry]
Type=Application
Name=Meteo
Exec=/usr/bin/python3 /home/pi/mqtt_display_temp_time_01_480.py

(firstEnv)
>>conda install -c anaconda ipykernel
>>python -m ipykernel install --user --name=firstEnv
# Iterate through each line of our list
for each_line in a_list_of_lines:
    script_line_number = script_line_number + 1
    # If an asterisk is found in the line (meaning that a character's lines are starting)
    if each_line.find("*") != -1:
        # Add the character's name to the 'character_appearance_lines' list
        character_appearance_lines.append(each_line)
        # Add the script line number to the appearance list
        character_appearance_lines.append(script_line_number)
import sweetviz as sv

my_report = sv.analyze(my_dataframe)
my_report.show_html() # Default arguments will generate to "SWEETVIZ_REPORT.html"
df_train.loc[df_train.Age.isnull(), 'Age'] = df_train.groupby(['Sex','Pclass','Title']).Age.transform('median')
'''Download videos from youtube.'''
# Author: Stephen J Smith.
# pyknight.com
# 2021.
# To install youtube-dl visit https://youtube-dl.org/
# To upgrade. pip3 install --upgrade youtube-dl

# HOW TO USE THIS SCRIPT.
# 1st: Edit 'path' to a directory on your system.
# 2nd: Cd into the directory where you saved this script.
# 3rd: Run the following command  python3 snatcher.py


import os

# location to store download.
path = '/ENTER/YOUR/DIRECTORY/HERE'

# cd into the above directory.
os.chdir(path)

# confirm your in the correct directory.
print(os.getcwd()) 

# copy and paste the share link from youtube. 
playList = input('Enter playlist: ')

# retrieve a list of available downloads.
os.system('youtube-dl -F ' + (playList))

# select the video quality.
number = input('Enter number: ')

# download.
os.system('youtube-dl -f ' + (number) + ' ' + (playList))

import matplotlib.pyplot as plt
import numpy as np
from numpy import save

fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([0, 10])
ax.set_ylim([0, 10])

points_storage=[]

def onclick(event):
    
    
    print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
          (event.button, event.x, event.y, event.xdata, event.ydata))
    plt.plot(event.xdata, event.ydata, 'o',markersize=5)

    fig.canvas.draw()
    points_storage.append([event.xdata, event.ydata])

cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()

acumulado =np.asarray(points_storage)

np.save('puntos.npy', acumulado)

print(acumulado)
from matplotlib import pyplot as plt

class LineBuilder:
    def __init__(self, line):
        self.line = line
        self.xs = list(line.get_xdata())
        self.ys = list(line.get_ydata())
        self.cid = line.figure.canvas.mpl_connect('button_press_event', self)

    def __call__(self, event):
        print('click', event)
        if event.inaxes!=self.line.axes: return
        self.xs.append(event.xdata)
        self.ys.append(event.ydata)
        self.line.set_data(self.xs, self.ys)
        self.line.figure.canvas.draw()

fig, ax = plt.subplots()
ax.set_title('click to build line segments')
line, = ax.plot([0], [0])  # empty line
linebuilder = LineBuilder(line)

plt.show()
django-admin startproject mysite

python manage.py startapp myapp
from os import walk

_, _, filenames = next(walk(mypath))
#!/usr/bin/env python
import subprocess
from multiprocessing import Pool
import os

src = "/home/student-03-474f458f89e0/data/prod"
dest = "/home/student-03-474f458f89e0/data/prod_backup"

def run_sync(file):
    print(os.path.join(dest,file))
    subprocess.call(["rsync", "-arq", os.path.join(src,file), os.path.join(dest,file)])

if __name__ == "__main__":
    files = os.listdir(src)
    p = Pool(len(files))
    p.map(run_sync, files)
#!/usr/bin/env python3

from multiprocessing import Pool

def run(task):
  # Do something with task here
    print("Handling {}".format(task))

if __name__ == "__main__":
  tasks = ['task1', 'task2', 'task3']
  # Create a pool of specific number of CPUs
  p = Pool(len(tasks))
  # Start each task within the pool
  p.map(run, tasks)
if reg_form.is_valid():
            nuevo_usuario = reg_form.save(commit=False)
            pw_hash = bcrypt.hashpw(clave.encode(), bcrypt.gensalt()).decode() 
            nuevo_usuario.password = pw_hash
            nuevo_usuario.save()
from discord.ext import commands
from os import getenv
from dotenv import load_dotenv

client = commands.Bot(command_prefix="!")
load_dotenv()

client.run(str(getenv('BOT_TOKEN')))
# this line will write the code below into a Python script called script.py
%%writefile script.py
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.postgresql',
        'NAME': 'dojoreads_db',
        'USER': 'postgres',
        'PASSWORD': 'root',
        'HOST': '127.0.0.1',
        'PORT': '5432',
    }
}
from requests import PreparedRequest

# ...

@client.event
async def on_member_join(member):
    if config['join_leave_message'] is True:
        channel = client.get_channel(config['join_leave_channel'])
        embed = discord.Embed(colour=discord.Colour.green())
        req = PreparedRequest()
        req.prepare_url(
            url='https://api.xzusfin.repl.co/card?',
            params={
                'avatar': str(member.avatar_url_as(format='png')),
                'middle': 'welcome',
                'name': str(member.name),
                'bottom': str('on ' + member.guild.name),
                'text': '#CCCCCC',
                'avatarborder': '#CCCCCC',
                'avatarbackground': '#CCCCCC',
                'background': '#000000' #or image url
            }
        )
        embed.set_image(url=req.url)
        await channel.send(embed=embed)
size = 30
value = 44
max_value = 100
border_l = '|'
border_r = '|'
fill = '#'
empty = '_'

bar = border_l
p = round(size*(value/max_value))
for i in range(1, p + 1):
    bar = bar + fill
ep = size - p
for i in range(1, ep + 1):
    bar = bar + empty
bar = bar + border_r

print(bar)
# -------------------------------------------------------------------------------------------
# email retrieving script
# -------------------------------------------------------------------------------------------
#!/usr/bin/env python3

import csv
import sys


def populate_dictionary(filename):
  """Populate a dictionary with name/email pairs for easy lookup."""
  email_dict = {}
  with open(filename) as csvfile:
    lines = csv.reader(csvfile, delimiter = ',')
    for row in lines:
      name = str(row[0].lower())
      email_dict[name] = row[1]
  return email_dict

def find_email(argv):
  """ Return an email address based on the username given."""
  # Create the username based on the command line input.
  try:
    fullname = str(argv[1] + " " + argv[2])
    # Preprocess the data
    email_dict = populate_dictionary('/home/{{ username }}/data/user_emails.csv')
     # If email exists, print it
    if email_dict.get(fullname.lower()):
      return email_dict.get(fullname.lower())
    else:
      return "No email address found"
  except IndexError:
    return "Missing parameters"

def main():
  print(find_email(sys.argv))

if __name__ == "__main__":
  main()


# -------------------------------------------------------------------------------------------
# Unit test script
# -------------------------------------------------------------------------------------------

#!/usr/bin/env python3

import unittest
from emails import find_email


class EmailsTest(unittest.TestCase):
  def test_basic(self):
    testcase = [None, "Bree", "Campbell"]
    expected = "breee@abc.edu"
    self.assertEqual(find_email(testcase), expected)

  def test_one_name(self):
    testcase = [None, "John"]
    expected = "Missing parameters"
    self.assertEqual(find_email(testcase), expected)

  def test_two_name(self):
    testcase = [None, "Roy","Cooper"]
    expected = "No email address found"
    self.assertEqual(find_email(testcase), expected)

if __name__ == '__main__':
  unittest.main()

my_new_list = [6, 3, 8, "12", 42]

def OrganizeList(myList):
    for item in myList:
        assert type(item) == str, "Word list must be a list of strings"
    myList.sort()
    return myList

print(OrganizeList(my_new_list))
my_list = [27, 5, 9, 6, 8]

def RemoveValue(myVal):
    if myVal not in my_list:
        raise ValueError("Value must be in the given list")
    else:
        my_list.remove(myVal)
    return my_list

print(RemoveValue(27))
print(RemoveValue(27))
def character_frequency(filename):
  try:
      f = open(filename)
  except OSError:
      return None

	characters = {}
	for line in f:
    	for char in line:
        	characters[char] = characters.get(char, 0) + 1 # get the entry for char or create a new entry with value 0 if there is no entry for char yet
	f.close()
	return characters()
        
import re

def rearrange_name(name):
	result = re.search(r"^([\w .]*), ([\w .]*)$", name)
	return "{} {}".format(result[2], result[1])
                       
#!/usr/bin/env python3
import sys
import os
import re

def error_search(log_file):
  error = input("What is the error")
  returned_errors = []

  with open(log_file, mode="r",encoding="UTF-8") as file:
    for log in file.readlines():
      error_patterns = ["error"]
      for i in range(len(error.split(" "))):
        error_patterns.append(r"{}".format(error.split(" ")[i].lower()))
      if all(re.search(error_pattern, log.lower()) for error_pattern in error_patterns):
        returned_errors.append(log)
    file.close()
  return returned_errors

def file_output(returned_errors):
  with open(os.path.expanduser("~") + "/data/errors_found.log", "w") as file:
    for error in returned_errors:
      file.write(error)
    file.close()

if __name__ == "__main__":
  log_file = sys.argv[1] # take the first parameter passed as the path of the log file
  returned_errors = error_search(log_file)
  file_output(returned_errors)
  sys.exit(0) # exits python and gives exit status of 0 here
scaled_features = data.copy()

col_names = ['Age', 'Weight']
features = scaled_features[col_names]
scaler = StandardScaler().fit(features.values)
features = scaler.transform(features.values)

scaled_features[col_names] = features
print(scaled_features)
mkdir -p ~/.config
docker run -it --name code-server -p 127.0.0.1:5050:8080 \
  -v "$HOME/.config:/home/coder/.config" \
  -v "$PWD:/home/coder/project" \
  -u "$(id -u):$(id -g)" \
  -e "DOCKER_USER=$USER" \
  codercom/code-server:latest
df_query = df_query.assign(comments='NoComment')
params = {
    'empty_line': ('', {}),
    'get_ok': ('GET 200', {'request': 'GET', 'status': '200'}),
    'get_not_found': ('GET 404', {'request': 'GET', 'status': '404'}),
}

@pytest.mark.parametrize('line,expected', list(params.values()), ids=list(params.keys()))
def test_decode(self, line, expected):
    assert decode(line) == expected
#/////////////////////////////////////////////////////////////////////////////////////////////////////////
"""This is a custom python component, and this is the description. Code something!
    Inputs:
        x: This is the description of the x input.
        y: This is the description of the y input.
    Outputs:
        a: This is the description of the a output.
"""
#---------------------------------------------------------------------------------------------------------
#_________________________________________________________________________________________________________
__author__ = "Mode Lab / Jonathan Cortes-Rodriguez"
__version__ = "0.0.2020.00.00"
__date__ = "2020.MM.DD"
#---------------------------------------------------------------------------------------------------------
#_________________________________________________________________________________________________________
"""
ghenv.Component.Name = "Component Name"
ghenv.Component.NickName = "Component Nickname"
ghenv.Component.Description = "What does this component do?"
ghenv.Component.Message = "A Tag that sits below the component"
ghenv.Component.Category = "What's my main?"
ghenv.Component.SubCategory= "What's my sub?"
"""
#---------------------------------------------------------------------------------------------------------
#_________________________________________________________________________________________________________
import re

print(re.search(r"[Pp]ython", "Python"))
print(re.search(r"[a-z]way", "The end of the highway"))
print(re.search(r"cloud[a-zA-Z0-9]", "cloudy"))

# put ^ before a character class to search for anything but the given character class
print(re.search(r"[^a-zA-Z]", "This is a sentence with spaces."))

# | as OR operator
print(re.search(r"cat|dog", "I like dogs."))
print(re.findall(r"cat|dog", "I like both cats and dogs."))
# read
with open('software.csv') as software:
	reader = csv.Dicteader(software)
	for row in reader:
    	print(("{} has {} users").format(row["name"], row["users"]))

# write
users = [ {"name": "Sol Mansi", "username": "solm", "department": "ITT infrastructure"}]
keys = ["name","username","department"]
with open("by_department.csv", "w") as by_department:
	writer = csv.DictWriter(by_department, fieldnames=keys) # requires dictionaries keys as parameter
	writer.writeheader() # create first line based on keys passed
	wirter.writerows(users)
import os

def parent_directory():

  # Create a relative path to the parent 
  # of the current working directory 

  relative_parent = os.path.join(os.getcwd(),'..')

  # Return the absolute path of the parent directory

  return os.path.abspath(relative_parent)

print(parent_directory())
def send_http_request(body):
	print('hello')
import pandas as pd

link = "https://id.wikipedia.org/wiki/Demografi_Indonesia"
df = pd.read_html(link, header=0)[2]

df = df.rename(columns={'Populasi[4]':'Populasi', 'Luas (km²)[5]':'Luas'})
df = df[['Kode BPS', 'Nama', 'Ibu kota', 'Populasi', 'Luas', 'Pulau']]

df.to_csv("Indonesia.csv", sep=',')
import pandas as pd

def wiki_to_csv(wikiurl = str):
    tname  = link.split("/")[-1]
    tables = pd.read_html(link, header=0)

    for i in range(len(tables)):
        if not tables[i].empty:
            fname = tname + " table " + str(i)
            tables[i].to_csv(fname, sep=',')
import shutil
import psutil

du = shutil.disk_usage("/")
du.free/du.total*100

psutil.cpu_percent(.1)
import random
name="Diego"
question="Do I like this cookie?"

answer=""
random_number=random.randint(1, 9) 
print(random_number)

if random_number==1:
  answer= "Yes - definetely."
elif random_number==2:
  answer= "It is decidedly so."
elif random_number==3:
  answer= "Without a doubt."
elif random_number==4:
  answer= "Reply hazy, try again."
elif random_number==5:
  answer= "Ask again later."
elif random_number==6:
  answer= "Better not tell you now."
elif random_number==7:
  answer= "My sources say no."
elif random_number==8:
  answer= "Outlook not so good."
elif random_number==9:
  answer= "Very doubtful."
else:
  answer = "Error"

print(name +"asks:" +question)
print("Magic 8-Ball's answer:"+ answer)
print("I have information for the following planets:\n")

print("   1. Venus   2. Mars    3. Jupiter")
print("   4. Saturn  5. Uranus  6. Neptune\n")
 
weight = 185
planet = 3

# Write an if statement below:
if planet ==1:
  weight = weight * 0.91
  print("Venus")
elif planet ==2:
  weight = weight * 0.38
  print("Mars")
elif planet ==3:
  weight = weight * 2.34
  print("Jupiter")
elif planet ==4:
  weight = weight * 1.06
  print("Saturn")
elif planet ==5:
  weight = weight * 0.92
  print("Uranus")
else:
  weight = weight * 1.19
  print("Neptune")
print("Your weight:", weight)
grade = 86
print("letter grade")
 
if grade >= 90:
  print("A")
elif grade >= 80:
  print("B")
elif grade >= 70:
  print("C")
elif grade >= 60:
  print("D")
else:
  print("F")
credits = 120
gpa = 1.9

if (credits >= 120) and (gpa >= 2.0):
  print("You meet the requirements to graduate!")
else:
  print("You have met at least one of the requirements.")
credits = 120
gpa = 1.8

if not credits >= 120:
  print("You do not have enough credits to graduate.")
if not gpa >= 2.0:
  print("Your GPA is not high enough to graduate.")

if not (credits >= 120) and not ( gpa >= 2.0):
  print("You do not meet either requirement to graduate!")
import pandas as pd

sheets_dict = pd.read_excel('Book1.xlsx', sheetname=None)

full_table = pd.DataFrame()
for name, sheet in sheets_dict.items():
    sheet['sheet'] = name
    sheet = sheet.rename(columns=lambda x: x.split('\n')[-1])
    full_table = full_table.append(sheet)

full_table.reset_index(inplace=True, drop=True)

print full_table
qq= dff[~df.astype(str).apply(tuple, 1).isin(dff.astype(str).apply(tuple, 1))]
def f(in_str):
    out_str = in_str.upper()
    return True, out_str # Creates tuple automatically

succeeded, b = f("a") # Automatic tuple unpacking
#nor_xr is  dataarray (var) name
datetimeindex = nor_xr.indexes['time'].to_datetimeindex()

nor_xr['time'] = datetimeindex
# rios is dataarray (var) name
rio.rename({'x': 'longitude','y': 'latitude'})
#Write an expression for a string literal consisting of the following ASCII characters:

#Horizontal Tab character
#Newline (ASCII Linefeed) character
#The character with hexadecimal value 7E

"\t\n\x7E"

#https://www.loginradius.com/blog/async/eol-end-of-line-or-newline-characters/#:~:text=LF%20(character%20%3A%20%5Cn%2C,'%20or%20'Newline%20Character'.
#https://stackoverflow.com/questions/4488570/how-do-i-write-a-tab-in-python
# Which of the following are valid ways to specify the string literal foo'bar in Python:
"foo'bar"



# How would you express the constant floating-point value 3.2 × 10-12 in Python:
3.2e-12

#Examples

0.       // = 0.0
-1.23    // = -1.23
23.45e6  // = 23.45 * 10^6
2e-5     // = 2.0 * 10^-5
3E+10    // = 3.0 * 10^10
.09E34   // = 0.09 * 10^34
2.E100L  // = 2.0 * 10^100

#How would you express the hexadecimal value a5 as a base-16 integer constant in Python?
0xa5
#Explanation base-16 is x
# integer is 0 and a5 is our value 
#Notice that binary and hexadecimal use prefixes to identify the number system. All integer #prefixes are in the form 0?, in which you replace ? with a character that refers to the number #system:

# b: binary (base 2)
# o: octal (base 8)
# d: decimal (base 10)
# x: hexadecimal (base 16)
import pandas as pd
s = pd.Series(list('abca'))
pd.get_dummies(s)
Out[]: 
     a    b    c
0  1.0  0.0  0.0
1  0.0  1.0  0.0
2  0.0  0.0  1.0
3  1.0  0.0  0.0
credits = 118
gpa = 2.0

if credits>=120 or gpa>=2.0:
  print("You have met at least one of the requirements.")
export PATH="$HOME/.pyenv/bin:$PATH"
export PATH="/usr/local/bin:$PATH"

eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
export LDFLAGS="-L/usr/local/opt/zlib/lib -L/usr/local/opt/bzip2/lib"
export CPPFLAGS="-I/usr/local/opt/zlib/include -I/usr/local/opt/bzip2/include"
x = 20
y = 20

# Write the first if statement here:
if x==y:
   print("These numbers are the same")
print(type(your_data_variable))
https://www.codecademy.com/courses/learn-python-3/articles/python3-user-input
#1
lovely_loveseat_description="""Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. Red or white."""
lovely_loveseat_price=254.00
#2
stylish_settee_description="""Stylish Settee. Faux leather on birch. 29.50 inches high x 54.75 inches wide x 28 inches deep. Black."""
stylish_settee_price=180.50
#3
luxurious_lamp_description="""Luxurious Lamp. Glass and iron. 36 inches tall. Brown with cream shade."""
luxurious_lamp_price=52.15
#4
sales_tax=0.088
customer_one_total=0
customer_one_itemization = ""
#5
customer_one_itemization+=customer_one_itemization+lovely_loveseat_description

customer_one_tax=customer_one_total*sales_tax
customer_one_total=customer_one_total+lovely_loveseat_price+luxurious_lamp_price+customer_one_tax


print("Customer One Items:")
print(customer_one_itemization)
print("Customer One Total:")
print(customer_one_total)

#output Customer One Items:
#Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. #Red or white.
#Customer One Total:
#306.15
print("FFFFF  M     M")
print("F      MM   MM")
print("FFF    M  M  M")
print("F      M     M")
print("F      M     M")
print("F      M     M")

#https://content.codecademy.com/courses/learn-cpp/hello-world/block-letters-hint.png
total_cost = 5
total_cost += 10
print(total_cost)

#output 15
# Assign the string here
to_you = """Stranger, if you passing meet me and desire to speak to me, why
  should you not speak to me?
And why should I not speak to you?"""


print(to_you)
string1 = "The wind, "
string2 = "which had hitherto carried us along with amazing rapidity, "
string3 = "sank at sunset to a light breeze; "
string4 = "the soft air just ruffled the water and "
string5 = "caused a pleasant motion among the trees as we approached the shore, "
string6 = "from which it wafted the most delightful scent of flowers and hay."

# Define message below:
message=string1+string2+string3+string4+string5+string6

print(message)
#You’re trying to divide a group into four teams. All of you count off, and you get number 27.

#Find out your team by computing 27 modulo 4. Save the value to my_team.
#example:
print(29 % 5)
# it returns 4, why that? Because if you divide 29/5 you obtain 5 with the remaining of 4 
# example 
my_team=(27%4)
print(my_team)
#output is 3

# to find out the team in each groups 
person = 0

while(person < 28):
  person = person + 1
  print("Person ", str(person), "= Team ", str(person % 4))