Snippets Collections
row,col = map(int,input().split())
matrix = []
trans = []
for i in range(row):
    l = list(map(int,input().split()))
    matrix.append(l)
for i in range(row):
    lnew = []
    for j in range(col):
        print(matrix[j][i],end=" ")
        lnew.append(matrix[j][i])
    print()
    trans.append(lnew)
print(trans)

    
x=input()
x=float(x)
y=int(x-0.5)+1
print(y)
x=input()
y=input()
if(len(x)==len(y)):
    if(sorted(x)==sorted(y)):
        print("anagram")
    else:
        print("Not anagram")
else:
    print("not anagram")
x=input()
count=1
for i in x:
	c=x.count(i)
	if c>count:
    	count=c
		s=i
	print(s,c)
# best way
data['resume'] = data[['Resume_title', 'City', 'State', 'Description', 'work_experiences', 'Educations', 'Skills', 'Certificates', 'Additional Information']].agg(' '.join, axis=1)


# other way
df["period"] = df["Year"] + df["quarter"]
df['Period'] = df['Year'] + ' ' + df['Quarter']
df["period"] = df["Year"].astype(str) + df["quarter"] #If one (or both) of the columns are not string typed
#Beware of NaNs when doing this!
df['period'] = df[['Year', 'quarter', ...]].agg('-'.join, axis=1) #for multiple string columns
df['period'] = df[['Year', 'quarter']].apply(lambda x: ''.join(x), axis=1)
#method cat() of the .str accessor 
df['Period'] = df.Year.str.cat(df.Quarter)
df['Period'] = df.Year.astype(str).str.cat(df.Quarter.astype(str), sep='q')
df['AllTogether'] = df['Country'].str.cat(df[['State', 'City']], sep=' - ') #add parameter na_rep to replace the NaN values with a string if have nan
columns = ['whatever', 'columns', 'you', 'choose']
df['period'] = df[columns].astype(str).sum(axis=1)

#a function
def str_join(df, sep, *cols):
   ...:     from functools import reduce
   ...:     return reduce(lambda x, y: x.astype(str).str.cat(y.astype(str), sep=sep), 
   ...:                   [df[col] for col in cols])
   ...: 

In [4]: df['cat'] = str_join(df, '-', 'c0', 'c1', 'c2', 'c3')
for c in df_drop.columns:
    df_drop[c] = df_drop[c].str.replace('[^\w\s]+', '')
df_drop = df_drop.astype(str)
df_drop.head()
rmsval = df.loc[:, 'c1':'c4']
def getrms(row):  
  a = np.sqrt(sum(row**2/4))
  return a
df['rms'] = df.apply(getrms,axis=1)
df.head()
import pandas as pd

data = {'Product': ['Desktop Computer','Tablet','Printer','Laptop'],
        'Price': [850,200,150,1300]
        }

df = pd.DataFrame(data, columns= ['Product', 'Price'])

df.to_csv(r'Path where you want to store the exported CSV file\File Name.csv')

print (df)
import re

text = 'this is a text'

try:
    found = re.search('is(.+?)text', text).group(1)
except AttributeError:
    # AAA, ZZZ not found in the original string
    found = '0 wtitle' # apply your error handling
found

=> a
import pandas as pd, re

junk = """Shot - Wounded/Injured, Shot - Dead (murder, accidental, suicide), Suicide - Attempt, Murder/Suicide, Attempted Murder/Suicide (one variable unsuccessful), Institution/Group/Business, Mass Murder (4+ deceased victims excluding the subject/suspect/perpetrator , one location), Mass Shooting (4+ victims injured or killed excluding the subject/suspect"""

rx = re.compile(r'\([^()]+\)|,(\s+)')

data = [x 
        for nugget in rx.split(junk) if nugget
        for x in [nugget.strip()] if x]

df = pd.DataFrame({'incident_characteristics': data})
print(df)
import pyparsing as pp

word = pp.Word(pp.alphanums)

s = 'gfgfdAAA1234ZZZuijjk'
rule = pp.nestedExpr('AAA', 'ZZZ')
for match in rule.searchString(s):
    print(match)
# picking up piece of string between separators
# function using partition, like partition, but drops the separators
def between(left,right,s):
    before,_,a = s.partition(left)
    a,_,after = a.partition(right)
    return before,a,after

s = "bla bla blaa <a>data</a> lsdjfasdjöf (important notice) 'Daniweb forum' tcha tcha tchaa"
print between('<a>','</a>',s)
print between('(',')',s)
print between("'","'",s)

""" Output:
('bla bla blaa ', 'data', " lsdjfasdj\xc3\xb6f (important notice) 'Daniweb forum' tcha tcha tchaa")
('bla bla blaa <a>data</a> lsdjfasdj\xc3\xb6f ', 'important notice', " 'Daniweb forum' tcha tcha tchaa")
('bla bla blaa <a>data</a> lsdjfasdj\xc3\xb6f (important notice) ', 'Daniweb forum', ' tcha tcha tchaa')
"""
discover_api_url = 'https://api.themoviedb.org/3/discover/movie?api_key=2bca7835c548e3242e8ccc0aa44a0513&language=en-US&sort_by=popularity.desc&include_adult=false&include_video=false&primary_release_year=>%3D2004&with_genres=18' 
    discover_api = requests.get(discover_api_url).json()
    most_popular_films = discover_api["results"]
    for page in range(2, discover_api["total_pages"]+1):
        discover_api = requests.get(discover_api_url + f"&page={page}").json()
        most_popular_films.extend(discover_api["results"])
        response = enumerate(most_popular_films)
star

Sat Sep 04 2021 09:25:33 GMT+0000 (UTC)

#py
star

Sat Aug 21 2021 04:52:00 GMT+0000 (UTC)

#py
star

Sat Aug 21 2021 04:45:16 GMT+0000 (UTC) https://www.programiz.com/python-programming/examples/anagram

#py
star

Sun Aug 15 2021 16:38:54 GMT+0000 (UTC) https://stackoverflow.com/questions/4131123/finding-the-most-frequent-character-in-a-string

#py
star

Thu Jul 01 2021 03:03:11 GMT+0000 (UTC) https://stackoverflow.com/questions/50951955/pytesseract-tesseractnotfound-error-tesseract-is-not-installed-or-its-not-i

#py #ocr #pytesseract #install
star

Tue Jun 29 2021 16:32:19 GMT+0000 (UTC)

#py #dataframe #pandas
star

Tue Jun 29 2021 16:21:45 GMT+0000 (UTC)

#py #dataframe #pandas
star

Mon Jun 28 2021 17:29:44 GMT+0000 (UTC)

#py #dataframe #pandas
star

Mon Jun 28 2021 17:26:17 GMT+0000 (UTC) https://stackoverflow.com/questions/4666973/how-to-extract-the-substring-between-two-markers?noredirect=1&lq=1

#textpreprocessing #nlp #py
star

Mon Jun 28 2021 17:17:19 GMT+0000 (UTC) http://www.daniweb.com/code/snippet289548.html

#textpreprocessing #nlp #py
star

Sat Nov 07 2020 07:36:33 GMT+0000 (UTC)

#dj #py

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension