Snippets Collections
git init

git add -A

git commit -m 'Added my project'

git remote add origin git@github.com:sammy/my-new-project.git

git push -u -f origin main
find . -type f -name '*.txt' | xargs grep 'command'

// The xargs command, when combined with other commands like find, uses the output of the first command as an argument.
#!/usr/bin/env bash

set -o errexit
set -o pipefail

# Function to output details of script.
script_info() {
    cat <<EOF
                                                    
Name:           autobrew.sh
Description:    Automate the installation of macOS 
                applications and packages using homebrew
Author:         Mark Bradley
Requirements:   Command Line Tools (CLT) for Xcode

EOF
}

# Function to set terminal colors if supported.
term_colors() {
    if [[ -t 1 ]]; then
        RED=$(printf '\033[31m')
        GREEN=$(printf '\033[32m')
        YELLOW=$(printf '\033[33m')
        BLUE=$(printf '\033[34m')
        MAGENTA=$(printf '\033[35m')
        CYAN=$(printf '\033[36m')
        BOLD=$(printf '\033[1m')
        RESET=$(printf '\033[0m')
    else
        RED=""
        GREEN=""
        YELLOW=""
        BLUE=""
        MAGENTA=""
        CYAN=""
        BOLD=""
        RESET=""
    fi
}

# Function to output colored or bold terminal messages.
# Usage examples: term_message "This is a default color and style message"
#                 term_message nb "This is a default color bold message"
#                 term_message rb "This is a red bold message"
term_message() {
    local set_color=""
    local set_style=""
    [[ -z "${2}" ]] && echo -ne "${1}" >&2 && return
    [[ ${1:0:1} == "d" ]] && set_color=${RESET}
    [[ ${1:0:1} == "r" ]] && set_color=${RED}
    [[ ${1:0:1} == "g" ]] && set_color=${GREEN}
    [[ ${1:0:1} == "y" ]] && set_color=${YELLOW}
    [[ ${1:0:1} == "b" ]] && set_color=${BLUE}
    [[ ${1:0:1} == "m" ]] && set_color=${MAGENTA}
    [[ ${1:0:1} == "c" ]] && set_color=${CYAN}
    [[ ${1:1:2} == "b" ]] && set_style=${BOLD}
    echo -e "${set_color}${set_style}${2}${RESET}" >&2 && return
}

# Displays a box containing a dash and message
task_start() {
    echo -ne "[-] ${1}"
}

# Displays a box containing a green tick and optional message if required.
task_done() {
    echo -e "\r[\033[0;32m\xE2\x9C\x94\033[0m] ${1}"
}

# Displays a box containing a red cross and optional message if required.
task_fail() {
    echo -e "\r[\033[0;31m\xe2\x9c\x98\033[0m] ${1}"
}

# Function to pause script and check if the user wishes to continue.
check_continue() {
    local response
    while true; do
        read -r -p "Do you wish to continue (y/N)? " response
        case "${response}" in
        [yY][eE][sS] | [yY])
            echo
            break
            ;;
        *)
            echo
            exit
            ;;
        esac
    done
}

# Function check command exists
command_exists() {
    command -v "${@}" >/dev/null 2>&1
}

install_homebrew() {
    term_message cb "\nInstalling Homebrew..."
    task_start "Checking for Homebrew..."
    if command_exists "brew"; then
        task_done "Homebrew is installed.$(tput el)"
        task_start "Running brew update..."
        if brew update >/dev/null 2>&1; then
            task_done "Brew update completed.$(tput el)"
        else
            task_fail "Brew update failed.$(tput el)"
        fi
        task_start "Running brew upgrade..."
        if brew upgrade >/dev/null 2>&1; then
            task_done "Brew upgrade completed.$(tput el)"
        else
            task_fail "Brew upgrade failed.$(tput el)"
        fi
    else
        task_fail "\n"
        term_message mb "Attempting to install Homebrew..."
        if /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"; then
            task_done "Homebrew installed.\n"
        else
            task_fail "Homebrew install failed.\n"
            exit 1
        fi
    fi
}

brew_packages() {
    if [[ ! -z "$tap_list" ]]; then
        term_message cb "\nAdding additional Homebrew taps..."
        for tap in ${tap_list}; do
            task_start "Checking for tap > ${tap}"
            if brew tap | grep "${tap}" >/dev/null 2>&1 || command_exists "${tap}"; then
                task_done "Tap ${tap} already added.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to add tap ${tap}..."
                if brew tap "${tap}"; then
                    task_done "Tap ${tap} added.\n"
                else
                    task_fail "Unable to add tap ${tap}.\n"
                fi
            fi
        done
    fi
    if [[ ! -z "$term_list" ]]; then
        term_message cb "\nInstalling brew terminal packages..."
        for pkg in ${term_list}; do
            task_start "Checking for package > ${pkg}"
            if brew list "${pkg}" >/dev/null 2>&1 || command_exists "${pkg}"; then
                task_done "Package ${pkg} already installed.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to install ${pkg}..."
                if brew install "${pkg}"; then
                    task_done "Package ${pkg} installed.\n"
                else
                    task_fail "Package ${pkg} install failed.\n"
                fi
            fi
        done
    fi
    if [[ ! -z "$cask_list" ]]; then
        term_message cb "\nInstalling brew cask packages..."
        for cask in ${cask_list}; do
            task_start "Checking for cask package > ${cask}"
            if brew list --cask "${cask}" >/dev/null 2>&1; then
                task_done "Package ${cask} already installed.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to install ${cask}..."
                if brew install --cask "${cask}"; then
                    task_done "Package ${cask} installed.\n"
                else
                    task_fail "Package ${cask} install failed.\n"
                fi
            fi
        done
    fi
}

brew_cleanup() {
    task_start "Running brew cleanup..."
    if brew cleanup >/dev/null 2>&1; then
        task_done "Brew cleanup completed.$(tput el)"
    else
        task_fail "Brew cleanup failed.$(tput el)"
    fi
}

# One function to rule them all.
main() {
    # Customise the following list variables (tap_list, term_list and cask_list) 
    # Leave list blank or comment out the list if not required.
    tap_list="qlik-oss/taps"
    term_list="cask git wget mambaforge"
    cask_list="the-unarchiver visual-studio-code google-chrome \
    font-fira-code 1password typora alfred \
    hazel onedrive upic marginnote itau kindle whatsapp zoom \
    noun-project appcleaner"

    clear
    term_colors
    script_info
    check_continue
    install_homebrew
    brew_packages
    brew_cleanup
    term_message gb "\nScript completed."
}

main "${@}"
0. Reboot to Recovery Mode by holding `command-R` during restart

1. Open Utilities → Terminal and type
```
$ csrutil disable
$ reboot
```

4. After rebooting in normal mode, open Terminal, and type
```
$ cd "/etc"
$ echo "0.0.0.0 iprofiles.apple.com" >> hosts
$ echo "0.0.0.0 mdmenrollment.apple.com" >> hosts
$ echo "0.0.0.0 deviceenrollment.apple.com" >> hosts
$ echo "0.0.0.0 gdmf.apple.com" >> hosts
```

5. Reboot to Recovery Mode by holding `command-R` during restart and type
```
$ csrutil enable
$ reboot
```

4. After rebooting in normal mode, open Terminal and type the code below to verify verify the DEP status
```
$ profiles status -type enrollment
Enrolled via DEP: No
MDM enrollment: No
```
docker-compose down # Stop container on current dir if there is a docker-compose.yml
docker rm -fv $(docker ps -aq) # Remove all containers
sudo lsof -i -P -n | grep <port number> # List who's using the port
# sudo kill -9 <process id> (macOS)
# sudo kill <process id> (Linux)
split
split -v
focus down
split -v

screen -t bash /bin/bash
screen -t deploy1 /usr/bin/ssh deploy1
screen -t deploy2 /usr/bin/ssh deploy2
screen -t deploy3 /usr/bin/ssh deploy3
screen -t deploy4 /usr/bin/ssh deploy4

focus up
focus left
select 1
focus right
select 2
focus left
focus down
select 3
focus right
select 4
ls -R | grep ":$" | sed -e 's/:$//' -e 's/[^-][^\/]*\//--/g' -e 's/^/   /' -e 's/-/|/'

# Output will be
# |---folder
# |------file_1
# ...
$ uglifyjs file1.js file2.js ... --compress --mangle --output out.min.js
#sudo apt install poppler-utils

curl -s "<url of pdf file>" | pdftotext -layout - -

sudo adduser brsmt
sudo usermod -aG sudo brsmt
From server console:

$> nano /etc/pve/lxc/{machine id, ex:100}.conf

add: 

lxc.cgroup2.devices.allow: c 10:200 rwm
lxc.mount.entry: /dev/net dev/net none bind,create=dir

$> chown 100000:100000 /dev/net/tun
$> chmod 666 /dev/net/tun

$> ls -l /dev/net/tun

Restart machine
#Backup

gbak -b -v -user SYSDBA -password "masterkey" D:\database.FDB E:\database.fbk

#Restore

gbak -c -user SYSDBA -password masterkey E:\database.fbk E:\database_restore.fdb
#Copy the image

$ docker pull doctorkirk/oracle-19c

#Create local directory

$ mkdir -p /your/custom/path/oracle-19c/oradata
$ cd /your/custom/path/
$ sudo chown -R 54321:54321 oracle-19c/

#Run the Container

docker run --name oracle-19c \
  -p 1521:1521 \
  -e ORACLE_SID=[ORACLE_SID] \
  -e ORACLE_PWD=[ORACLE_PASSWORD] \
  -e ORACLE_CHARACTERSET=[CHARSET] \
  -v /your/custom/path/oracle-19c/oradata/:/opt/oracle/oradata \
doctorkirk/oracle-19c

#Charset: WE8MSWIN1252(*default), AL16UTF8, US7ASCI
#* If omitted in docker run , the default characterset for this build will be WE8MSWIN1252.
You can determine the version of the primary MDF file of a database by looking at the two bytes at offset 0x12064

SQL Server Version	    Internal DB Version     DB Compat Level	    Supported DB Compatibility Levels
SQL Server 2022             ?                           160	                       ?
SQL Server 2019 CTP 3.2 / RC 1 / RC 1.1 / RTM	
                            904	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 3.0 / 3.1	
                            902	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.3 / 2.4 / 2.5	
                            897	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.1 / 2.2	
                            896	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.0	    895	                        150	        150,140,130,120,110,100
SQL Server 2017	            868 / 869	                140	        140,130,120,110,100
SQL Server 2016	            852	                        130         130,120,110,100
SQL Server 2014	            782	                        120	        120,110,100
SQL Server 2012	            706	                        110	        110,100,90
SQL Server 2012 CTP1
(a.k.a. SQL Server 2011 Denali)	
                            684	                        110	        110,100,90
SQL Server 2008 R2	        660 / 661	                100	        100,90,80
SQL Server 2008	            655	                        100	        100,90,80
SQL Server 2005 SP2+
with VarDecimal enabled	    612	                        90	        90,80,70
SQL Server 2005	            611	                        90	        90,80,70
SQL Server 2000	            539	                        80	        80,70
SQL Server 7.0	            515	                        70	        70
SQL Server 6.5	            408                     	65	        65
SQL Server 6.0	            406	                        60	        60
# Enable:

xdg-screensaver activate

# disable 

export DISPLAY=:0.0; xdotool key 27
docker run -v /home/marco:/backup --rm svarcoe/mssql-scripter mssql-scripter -S 172.18.0.3 -d CMUCE -U sa -P CMuce1970@ --schema-and-data -f /backup/mssql-scripter-CMUCE.sql

sqlcmd -S localhost -U SA -Q "BACKUP LOG [demodb] TO DISK = N'/var/opt/mssql/data/demodb_LogBackup.bak' WITH NOFORMAT, NOINIT, NAME = N'demodb_LogBackup', NOSKIP, NOREWIND, NOUNLOAD, STATS = 5"

sqlcmd -S localhost -U SA -Q "RESTORE DATABASE [demodb] FROM DISK = N'/var/opt/mssql/data/demodb.bak' WITH FILE = 1, NOUNLOAD, REPLACE, NORECOVERY, STATS = 5"
curl 'http://router.project-osrm.org/table/v1/driving/13.388860,52.517037;13.397634,52.529407;13.428555,52.523219?annotations=distance,duration'

Response:

{
	"code": "Ok",
	"distances": [
		[0, 1887.3, 3802.9],
		[1903.1, 0, 2845.8],
		[3280.4, 2292.8, 0]
	],
	"durations": [
		[0, 251.5, 384.4],
		[258.1, 0, 363.5],
		[354.7, 301.1, 0]
	],
	"sources": [{
		"hint": "N85xha7OcYUYAAAABQAAAAAAAAAgAAAASjFaQdLNK0AAAAAAsPePQQwAAAADAAAAAAAAABAAAAA_6wAA_kvMAKlYIQM8TMwArVghAwAA7wrV7s3X",
		"distance": 4.231666,
		"location": [13.388798, 52.517033],
		"name": "Friedrichstraße"
	}, {
		"hint": "npYWgHzyeYUGAAAACgAAAAAAAAB2AAAAW7-PQOKcyEAAAAAApq6DQgYAAAAKAAAAAAAAAHYAAAA_6wAAf27MABiJIQOCbswA_4ghAwAAXwXV7s3X",
		"distance": 2.789393,
		"location": [13.397631, 52.529432],
		"name": "Torstraße"
	}, {
		"hint": "oZYWgP___38fAAAAUQAAACYAAAAeAAAAsowKQkpQX0Lx6yZCvsQGQh8AAABRAAAAJgAAAB4AAAA_6wAASufMAOdwIQNL58wA03AhAwMAvxDV7s3X",
		"distance": 2.226595,
		"location": [13.428554, 52.523239],
		"name": "Platz der Vereinten Nationen"
	}],
	"destinations": [{
		"hint": "N85xha7OcYUYAAAABQAAAAAAAAAgAAAASjFaQdLNK0AAAAAAsPePQQwAAAADAAAAAAAAABAAAAA_6wAA_kvMAKlYIQM8TMwArVghAwAA7wrV7s3X",
		"distance": 4.231666,
		"location": [13.388798, 52.517033],
		"name": "Friedrichstraße"
	}, {
		"hint": "npYWgHzyeYUGAAAACgAAAAAAAAB2AAAAW7-PQOKcyEAAAAAApq6DQgYAAAAKAAAAAAAAAHYAAAA_6wAAf27MABiJIQOCbswA_4ghAwAAXwXV7s3X",
		"distance": 2.789393,
		"location": [13.397631, 52.529432],
		"name": "Torstraße"
	}, {
		"hint": "oZYWgP___38fAAAAUQAAACYAAAAeAAAAsowKQkpQX0Lx6yZCvsQGQh8AAABRAAAAJgAAAB4AAAA_6wAASufMAOdwIQNL58wA03AhAwMAvxDV7s3X",
		"distance": 2.226595,
		"location": [13.428554, 52.523239],
		"name": "Platz der Vereinten Nationen"
	}]
}
docker run -d -e ACCEPT_EULA=Y -e "SA_PASSWORD=P@ssW0rd" -p 1433:1433 \
  --restart unless-stopped \
  -v /var/opt/mssql/data:/var/opt/mssql/data \
  -v /tmp/:/backups/ \
  --name sqlserver \
  mcr.microsoft.com/mssql/server

#backup:

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "BACKUP DATABASE [dbname] TO DISK = N'/tmp/dbname-full.bak' WITH NOFORMAT, NOINIT, NAME = 'dbname-bak-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "BACKUP LOG [dbname] TO DISK = N'/tmp/dbname-log.bak' WITH NOFORMAT, NOINIT, NAME = N'dbname-bak-log', NOSKIP, NOREWIND, NOUNLOAD, STATS = 5"

#restore:

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "RESTORE DATABASE [dbname] FROM DISK = N'/tmp/dbname-full.bak' WITH FILE = 1, NOUNLOAD, REPLACE, NORECOVERY, STATS = 5"

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "RESTORE LOG [dbname] FROM DISK = N'/var/opt/mssql/data/dbname-log.bak'"


#create login myuser with password ='strongPass';
#create user myuser for login myuser;
#ALTER LOGIN [myuser] enable;
#Increment timeout and max_children:

/etc/php/7.0/fpm/php.ini  =>   default_socket_timeout = 60000
/etc/php/7.0/fpm/php.ini  =>   pm.max_children = 20
/etc/php/7.0/fpm/pool.d/www.conf  =>   request_terminate_timeout = 60000

#Increment timeout on /etc/nginx/nginx.conf:
keepalive_timeout 65000;

#After Restart php-fpm and nginx:

sudo service php7.0-fpm restart
sudo service nginx restart
export ORACLE_SID=$1
export NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P9
export USUARIO=system/org24h
export PATHBACKUP=/respaldo/o24/export
export FILENAME=CMLGDB`date +%d%m%Y%H%M`.DMP
export FILENAMELOG=CMLGDB`date +%d%m%Y%H%M`.log
echo  $PATHBACKUP

rm $PATHBACKUP/*.* -rf

if [ -a $PATHBACKUP ] ; then
	expdp $USUARIO FULL=yes DUMPFILE=dpump_dir1:$FILENAME LOGFILE=dpump_dir1:$FILENAMELOG
	#exp $USUARIO file=$PATHBACKUP/$FILENAME full=yes compress=yes indexes=no consistent=yes log=$PATHBACKUP/$FILENAMELOG
else
	echo "ERROR: Export no encontro el directorio de Respaldo"
	exit 1
fi
docker run -d --restart=always \
        --name oracle \
        --privileged  \
        -e ORACLE_SID=<custom sid> \
        -v /srv/oradata:/u01/app/oracle \
        -p 8080:8080 -p 1521:1521 \
 absolutapps/oracle-12c-ee
#use oracle user from system:

sqlplus "/ as sysdba"

SQL> ALTER USER SYS IDENTIFIED BY [password]; 
SQL> ALTER USER SYSTEM IDENTIFIED BY [password];
docker run -e 'ACCEPT_EULA=Y' \
    -e 'MSSQL_SA_PASSWORD=<YourStrong!Passw0rd>' \
    -p 1433:1433 -v <host directory>/data:/var/opt/mssql/data \
    -v <host directory>/log:/var/opt/mssql/log \
    -v <host directory>/secrets:/var/opt/mssql/secrets \
    -d mcr.microsoft.com/mssql/server:2019-latest
sudo mkdir -p /your/custom/path/oracle-19c/oradata/
sudo chmod -R 777 /your/custom/path/

docker run -d --name oracle19db \
  -p 1521:1521 \
  -e ORACLE_SID=ORCL \
  -e ORACLE_PDB=ORCLDB \
  -e ORACLE_PWD=Oracle123 \
  -e ORACLE_CHARSET=AL32UTF8 \
  -v /your/custom/path/oracle-19c/oradata:/opt/oracle/oradata \
  banglamon/oracle193db:19.3.0-ee

# Charset Value: WE8MSWIN1252, AL16UTF8

# ALTER SESSION SET NLS_DATE_FORMAT = 'RRRR-MM-DD';
# ALTER SESSION SET NLS_TIME_FORMAT = 'HH24:MI:SS';
# ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'RRRR-MM-DD HH24:MI:SS';
# ALTER SESSION SET NLS_TIME_TZ_FORMAT = 'HH24:MI:SS TZR';
# ALTER SESSION SET NLS_TIMESTAMP_TZ_FORMAT = 'RRRR-MM-DD HH24:MI:SS TZR';

# docker exec -it oracle19db bash -c "source /home/oracle/.bashrc; sqlplus /nolog”
# connect sys as sysdba;

# alter session set "_ORACLE_SCRIPT"=true;
# create user sistemas identified by las36horas;
# GRANT CONNECT, RESOURCE, DBA TO sistemas;
# GRANT UNLIMITED TABLESPACE TO sistemas;
$> docker pull haskell
$> docker run -it haskell stack <parameters>


$> git clone https://github.com/jean-lopes/dfm-to-json.git

$> cd dfm-to-json

$> stack setup
$> stack install
$> dfm-to-json --version
# go to path where .git is

# new branch:

$> git checkout -b "<name_of_new_branch>"

# change branch:

$> git checkout "<name_of_branch>"



$> git add <folder1> ... <foldern>

$> git commit -m "<commentn>"
    
#example: <branch> = main:

$> git push origin <branch>

#---------------------------------------------------------

# download last changes from branch:

$> git pull origin <branch>
#http://cdrtools.sourceforge.net/private/cdrecord.html

#create iso file:

$> mkisofs -J -r -o output.iso dir_with_files/
#backup

gbak -t -v -user <username> -password "<password>" <host>:/path/to/db.fdb path/to/file.gbk

#restore

gbak -c -v -user <username> -password "<password>" path/to/file.gbk <host>:/path/to/db.fdb
:> docker run -it --name fb --rm -v ~/tmp:/tmp almeida/firebird gbak -b -v 192.168.1.251:c:/host/path/database.fdb /tmp/backup.bak -user sysdba -pass XXXXX
location /worklist/ {
    try_files $uri $uri/ /worklist/index.php$is_args$args;
    fastcgi_split_path_info ^(.+\.php)(/.+)$;
    fastcgi_pass unix:/var/run/php/php7.2-fpm.sock;
    fastcgi_index worklist/index.php;
}

#
# from php you can use and index.php getting path as $_SERVER["REQUEST_URI"]
#
gsec -user sysdba -pass masterkey -add billyboy -pw sekrit66 -admin yes
#--> first identify your USB disk:

fdisk list

# --> Example OUTPUT
: '
/dev/disk0 (internal, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      GUID_partition_scheme                        *1.0 TB     disk0
   1:                        EFI EFI                     209.7 MB   disk0s1
   2:                 Apple_APFS Container disk1         1.0 TB     disk0s2

/dev/disk1 (synthesized):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      APFS Container Scheme -                      +1.0 TB     disk1
                                 Physical Store disk0s2
   1:                APFS Volume Macintosh HD - Datos    907.8 GB   disk1s1
   2:                APFS Volume Preboot                 81.5 MB    disk1s2
   3:                APFS Volume Recovery                526.6 MB   disk1s3
   4:                APFS Volume VM                      2.1 GB     disk1s4
   5:                APFS Volume Macintosh HD            11.0 GB    disk1s5

/dev/disk2 (external, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:     Apple_partition_scheme                        *248.7 GB   disk2
   1:        Apple_partition_map                         4.1 KB     disk2s1
   2:                  Apple_HFS                         4.1 MB     disk2s2
'

#--> in this example USB stick is disk2 (external, physical):

# --> let's blank complete pendrive:

sudo dd if=/dev/zero of=/dev/disk2 count=1 bs=4096

# --> let's check again:

diskutil list

: '
/dev/disk0 (internal, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      GUID_partition_scheme                        *1.0 TB     disk0
   1:                        EFI EFI                     209.7 MB   disk0s1
   2:                 Apple_APFS Container disk1         1.0 TB     disk0s2

/dev/disk1 (synthesized):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      APFS Container Scheme -                      +1.0 TB     disk1
                                 Physical Store disk0s2
   1:                APFS Volume Macintosh HD - Datos    907.8 GB   disk1s1
   2:                APFS Volume Preboot                 81.5 MB    disk1s2
   3:                APFS Volume Recovery                526.6 MB   disk1s3
   4:                APFS Volume VM                      2.1 GB     disk1s4
   5:                APFS Volume Macintosh HD            11.0 GB    disk1s5

/dev/disk2 (external, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:                                                   *248.7 GB   disk2
'

# Then you can run disk utility to initialize / format pendrive.
CMD = $dicom:rs --url "http://ip:8080/dcm4chee-arc/aets/DCM4CHEE/rs" -r "&studyUID=uid1" -r "&studyUID=uid2" --query-ext "&includedefaults=false" --accept-ext="transfer-syntax=1.2.840.10008.1.2.4.70"

weasis://url_encode(CMD)

#js: var link = "weasis://" + encodeURIComponent(CMD)
$ mkdir /var/www/html/vscode

$ htpasswd -c /var/www/html/vscode/pass marco
#for updating: htpasswd /var/www/html/vscode/pass marco

$ docker pull linuxserver/code-server

$ docker run -d \
  --name=code-server \
  -e PUID=1000 \
  -e PGID=1000 \
  -e TZ=America/Caracas \
  -e FILE__PASSWORD=/var/www/html/vscode/pass \
  -p 8943:8443 \
  -v /var/www/html/vscode-config:/config \
  --restart unless-stopped \
  linuxserver/code-server

$ docker start code-server
#!/bin/bash
printf "%-10s%-15s%-15s%s\n" "PID" "MEMORY" "OWNER" "COMMAND"

function sysmon_main() {
        RAWIN=$(ps -o pid,user,%mem,command ax | grep -v PID | awk '/[0-9]*/{print $1 ":" $2 ":" $4}') 
        for i in $RAWIN
        do
                PID=$(echo $i | cut -d: -f1)
                OWNER=$(echo $i | cut -d: -f2)
                COMMAND=$(echo $i | cut -d: -f3)
                MEMORY=$(pmap $PID | tail -n 1 | awk '/[0-9]K/{print $2}')

                printf "%-10s%-15s%-15s%s\n" "$PID" "$OWNER" "$MEMORY" "$COMMAND"
        done
}

sysmon_main | sort -bnr -k3 | head -20
docker ps -q | xargs -n 1 docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} {{ .Name }}' | sed 's/ \// /'
sudo ncat --sh-exec "ncat <dest.ip> <dest.port>" -l <local port> --keep-open

#ex:

sudo ncat --sh-exec "ncat 192.168.56.116 8084" -l 8084 --keep-open

#then test: http://localhost:8084
#!/bin/bash

#--- xvfb
sudo apt install -y xvfb

#-- add this into /etc/rc.local:

    #!/bin/sh -e
    Xvfb -ac :99 -screen 0 1024x768x16 &
    exit 0

#-- save & first run:
Xvfb -ac :99 -screen 0 1024x768x16 &

#--- wine
sudo dpkg --add-architecture i386

wget -O- -q https://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_18.04/Release.key | sudo apt-key add -
echo "deb http://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_18.04 ./" | sudo tee /etc/apt/sources.list.d/wine-obs.list

sudo apt update
sudo apt install --install-recommends winehq-stable winetricks

wine --version
wine --help

wineboot -u

winetricks allfonts

#-- install my app at /opt
sudo mkdir -p /opt/report/cache
sudo chmod -R 777 /opt/report
cp ReportService5.exe /opt/report
cd /opt/report

#-- and test it:
DISPLAY=:99 wine ReportService5.exe </dev/null &>/dev/null &

#-- create systemd service:

sudo nano /lib/systemd/system/report-service.service

[Unit]
Description=Reporting service

[Service]
Environment="DISPLAY=:99"
WorkingDirectory=/opt/report
ExecStart=/usr/bin/wine "/opt/report/ReportService5.exe" </dev/null &>/dev/null &
ExecStop=/opt/report/stop.sh
User=autana

[Install]
WantedBy=graphical.target

#-- save.

#-- create stop.sh

nano /opt/report/stop.sh

#!/bin/bash
kill $(pgrep ReportService5.exe)
kill -9 $(pgrep winedevice.exe)

#-- save.

sudo chmod +x /opt/report/stop.sh

#-- start service:
sudo systemctl enable report-service
sudo systemctl start report-service

DISPLAY=:99 import -window root -quality 90 /tmp/screenshot.jpg

# edit /etc/environment
# 
# add:
#
# LANG=es_DO.utf-8
# LC_ALL=es_DO.utf-8

# Then: logout ... login, then run this command:

$ sudo dpkg-reconfigure locales
wget -U "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" -qO - "https://example.com"

# Example
# wget -U "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" -qO - "https://www.infodolar.com.do/precio-dolar-entidad-banco-popular.aspx" | grep colCompraVenta | grep -Eo "([0-9.]+)" | head -1
$> pdftk file1.pdf file2.pdf file3.pdf cat output outputfile.pdf
ls -lct /etc | tail -1 | awk '{print $6, $7, $8}'
#!/bin/bash
#exit

#detect if port 11111 is open, if not do action:

netstat -ln | grep ":11111 " 2>&1 > /dev/null

if [ $? -eq 1 ]; then
    echo "Port is closed. Doing action..."
fi
#add this lines to /etc/mosquitto/mosquitto.conf

listener 1883
protocol mqtt

listener 9001
protocol websockets

#then restart service:  $> sudo service mosquitto restart
# Backup:
docker exec -t -u postgres your-db-container pg_dumpall -c > dump_`date +%d-%m-%Y"_"%H_%M_%S`.sql

# Restore:
cat your_dump.sql | docker exec -i your-db-container psql -U postgres
# =============== first let''s create user/password:
# 1: user

$> sudo sh -c "echo -n 'sammy:' >> /etc/nginx/.htpasswd"

# 2: passsord

$> sudo sh -c "openssl passwd -apr1 >> /etc/nginx/.htpasswd"

# You can repeat this process for additional usernames. You can see how the usernames and encrypted passwords are stored within the file by typing:

# let's see what we did:

$> cat /etc/nginx/.htpasswd

# Output (something like)
# sammy:$apr1$wI1/T0nB$jEKuTJHkTOOWkopnXqC1d1

# then, we need to add configuration:
# at /etc/nginx/sites-available/default (or whatever your configuration is):

server {
    listen 80 default_server;
    listen [::]:80 default_server ipv6only=on;

    root /usr/share/nginx/html;
    index index.html index.htm;

    server_name localhost;
    
    location /myrestrictedfolder {                  #<--- new here
        rewrite ^(.*[^/])$ $1/ permanent;           #<--- new here
        auth_basic "Restricted Content";            #<--- new here
        auth_basic_user_file /etc/nginx/.htpasswd;  #<--- new here
    }                                               #<--- new here

    location / {
        try_files $uri $uri/ =404;
    }
}

# then restart nginx daemon:

$> sudo service nginx restart


#you will be asked for basic user/password when entering: http://localhost/myrestrictedfolder/
$ sudo nano /etc/fstab

#add line:

//<remote host>/<share>  <mount>  cifs  username=<user>,password=<password>,uid=nobody,noperm,file_mode=0777,dir_mode=0777  0  0  

#Ex:
//200.200.0.124/images_autana  /mnt/nas  cifs  username=autana,password=*****,uid=nobody,noperm,file_mode=0777,dir_mode=0777  0  0  
$ find [folder] -type f -exec gdcmscu -L [log] -D --store --call [Target AET] [HOST] [PORT] {} \; &

#Ej:

$ find /mnt/images/dicom/ -type f -exec gdcmscu -L /tmp/output.log -D --store --call AUTANA localhost 11112 {} \; & 
#!/bin/sh

#----------------------------------------------------------------
# Para que funcione colocar en /usr/local/bin:
#
#     $> sudo cp verifica_nr /usr/local/bin
#
# Dar permiso de ejecución:
#
#     $> sudo chmod +x /usr/local/bin/verifica_nr
#
# Luego se agrega en crontab (cada minuto):
#
#     $> sudo crontab -e
#     (ir al final y agregar:)
#     * * * * * /usr/local/bin/verifica_nr
#     (Guardar)
#----------------------------------------------------------------
SERVICE="nrservice"
if ps ax | grep -v grep | grep -v $0 | grep $SERVICE > /dev/null
then
    echo "$SERVICE service running, everything is fine" > /dev/null
else
    sudo service nrservice.sh restart
fi
Over the last few days we've had a couple of issues with Imagick and processing PDFs on our servers. As it turns out, these issues are caused by automatic security updates. Let's look into the issue and its solution.

In Bugsnag, our error reporting service, the following exceptions have been popping up a lot:

not authorized `/path/to/some-pdf.pdf` @ error/constitute.c/ReadImage/412

convert: not authorized `/path/to/some-pdf.pdf` @ error/constitute.c/WriteImage/1028

not authorized `/path/to/some-image.png` @ error/convert.c/ConvertImageCommand/3015

unable to create temporary file `/some/path` Permission denied @ error/pdf.c/ReadPDFImage/465
Upon further investigation it looks like most of our sites and applications dealing with PDFs were actually experiencing issues. The weird thing is, some of these applications are quite old and haven't been updated or even touched for months, whilst others are recent and running the latest versions of packages and OS.

I don't care about your problems, just give me the fix!
A recent ImageMagick security update adds some extra policies regarding PDFs (or more specifcally: Ghostscript). We can actually see the diff for this update right here. Luckily, we can edit the policy.xml file ourselves and loosen up security for working with PDFs.

In /etc/ImageMagick-6/policy.xml (or /etc/ImageMagick/policy.xml) find the following line

<policy domain="coder" rights="none" pattern="PDF" />
and change it to allow reading and writing by the PDF coder in ImageMagick:

<policy domain="coder" rights="read|write" pattern="PDF" />
Finally, don't forget to restart your PHP-FPM and optionally queue workers:

sudo service php7.2-fpm restart
If you're experiencing issues with other file types or manipulations, you might need to change some of the other policies as well. The policy.xml file contains some good documentation in the comments. You can read more about the security policy file on ImageMagick's website.
/////run once:

/Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Resources/javawslauncher.app
#!/usr/bin/env python

'''Converts sequence of images to compact PDF while removing speckles,
bleedthrough, etc.

'''

# for some reason pylint complains about members being undefined :(
# pylint: disable=E1101

from __future__ import print_function

import sys
import os
import re
import subprocess
import shlex

from argparse import ArgumentParser

import numpy as np
from PIL import Image
from scipy.cluster.vq import kmeans, vq

######################################################################

def quantize(image, bits_per_channel=None):

    '''Reduces the number of bits per channel in the given image.'''

    if bits_per_channel is None:
        bits_per_channel = 6

    assert image.dtype == np.uint8

    shift = 8-bits_per_channel
    halfbin = (1 << shift) >> 1

    return ((image.astype(int) >> shift) << shift) + halfbin

######################################################################

def pack_rgb(rgb):

    '''Packs a 24-bit RGB triples into a single integer,
works on both arrays and tuples.'''

    orig_shape = None

    if isinstance(rgb, np.ndarray):
        assert rgb.shape[-1] == 3
        orig_shape = rgb.shape[:-1]
    else:
        assert len(rgb) == 3
        rgb = np.array(rgb)

    rgb = rgb.astype(int).reshape((-1, 3))

    packed = (rgb[:, 0] << 16 |
              rgb[:, 1] << 8 |
              rgb[:, 2])

    if orig_shape is None:
        return packed
    else:
        return packed.reshape(orig_shape)

######################################################################

def unpack_rgb(packed):

    '''Unpacks a single integer or array of integers into one or more
24-bit RGB values.

    '''

    orig_shape = None

    if isinstance(packed, np.ndarray):
        assert packed.dtype == int
        orig_shape = packed.shape
        packed = packed.reshape((-1, 1))

    rgb = ((packed >> 16) & 0xff,
           (packed >> 8) & 0xff,
           (packed) & 0xff)

    if orig_shape is None:
        return rgb
    else:
        return np.hstack(rgb).reshape(orig_shape + (3,))

######################################################################

def get_bg_color(image, bits_per_channel=None):

    '''Obtains the background color from an image or array of RGB colors
by grouping similar colors into bins and finding the most frequent
one.

    '''

    assert image.shape[-1] == 3

    quantized = quantize(image, bits_per_channel).astype(int)
    packed = pack_rgb(quantized)

    unique, counts = np.unique(packed, return_counts=True)

    packed_mode = unique[counts.argmax()]

    return unpack_rgb(packed_mode)

######################################################################

def rgb_to_sv(rgb):

    '''Convert an RGB image or array of RGB colors to saturation and
value, returning each one as a separate 32-bit floating point array or
value.

    '''

    if not isinstance(rgb, np.ndarray):
        rgb = np.array(rgb)

    axis = len(rgb.shape)-1
    cmax = rgb.max(axis=axis).astype(np.float32)
    cmin = rgb.min(axis=axis).astype(np.float32)
    delta = cmax - cmin

    saturation = delta.astype(np.float32) / cmax.astype(np.float32)
    saturation = np.where(cmax == 0, 0, saturation)

    value = cmax/255.0

    return saturation, value

######################################################################

def postprocess(output_filename, options):

    '''Runs the postprocessing command on the file provided.'''

    assert options.postprocess_cmd

    base, _ = os.path.splitext(output_filename)
    post_filename = base + options.postprocess_ext

    cmd = options.postprocess_cmd
    cmd = cmd.replace('%i', output_filename)
    cmd = cmd.replace('%o', post_filename)
    cmd = cmd.replace('%e', options.postprocess_ext)

    subprocess_args = shlex.split(cmd)

    if os.path.exists(post_filename):
        os.unlink(post_filename)

    if not options.quiet:
        print('  running "{}"...'.format(cmd), end=' ')
        sys.stdout.flush()

    try:
        result = subprocess.call(subprocess_args)
        before = os.stat(output_filename).st_size
        after = os.stat(post_filename).st_size
    except OSError:
        result = -1

    if result == 0:

        if not options.quiet:
            print('{:.1f}% reduction'.format(
                100*(1.0-float(after)/before)))

        return post_filename

    else:

        sys.stderr.write('warning: postprocessing failed!\n')
        return None

######################################################################

def percent(string):
    '''Convert a string (i.e. 85) to a fraction (i.e. .85).'''
    return float(string)/100.0

######################################################################

def get_argument_parser():

    '''Parse the command-line arguments for this program.'''

    parser = ArgumentParser(
        description='convert scanned, hand-written notes to PDF')

    show_default = ' (default %(default)s)'

    parser.add_argument('filenames', metavar='IMAGE', nargs='+',
                        help='files to convert')

    parser.add_argument('-q', dest='quiet', action='store_true',
                        default=False,
                        help='reduce program output')

    parser.add_argument('-b', dest='basename', metavar='BASENAME',
                        default='page',
                        help='output PNG filename base' + show_default)

    parser.add_argument('-o', dest='pdfname', metavar='PDF',
                        default='output.pdf',
                        help='output PDF filename' + show_default)

    parser.add_argument('-v', dest='value_threshold', metavar='PERCENT',
                        type=percent, default='25',
                        help='background value threshold %%'+show_default)

    parser.add_argument('-s', dest='sat_threshold', metavar='PERCENT',
                        type=percent, default='20',
                        help='background saturation '
                        'threshold %%'+show_default)

    parser.add_argument('-n', dest='num_colors', type=int,
                        default='8',
                        help='number of output colors '+show_default)

    parser.add_argument('-p', dest='sample_fraction',
                        metavar='PERCENT',
                        type=percent, default='5',
                        help='%% of pixels to sample' + show_default)

    parser.add_argument('-w', dest='white_bg', action='store_true',
                        default=False, help='make background white')

    parser.add_argument('-g', dest='global_palette',
                        action='store_true', default=False,
                        help='use one global palette for all pages')

    parser.add_argument('-S', dest='saturate', action='store_false',
                        default=True, help='do not saturate colors')

    parser.add_argument('-K', dest='sort_numerically',
                        action='store_false', default=True,
                        help='keep filenames ordered as specified; '
                        'use if you *really* want IMG_10.png to '
                        'precede IMG_2.png')

    parser.add_argument('-P', dest='postprocess_cmd', default=None,
                        help='set postprocessing command (see -O, -C, -Q)')

    parser.add_argument('-e', dest='postprocess_ext',
                        default='_post.png',
                        help='filename suffix/extension for '
                        'postprocessing command')

    parser.add_argument('-O', dest='postprocess_cmd',
                        action='store_const',
                        const='optipng -silent %i -out %o',
                        help='same as -P "%(const)s"')

    parser.add_argument('-C', dest='postprocess_cmd',
                        action='store_const',
                        const='pngcrush -q %i %o',
                        help='same as -P "%(const)s"')

    parser.add_argument('-Q', dest='postprocess_cmd',
                        action='store_const',
                        const='pngquant --ext %e %i',
                        help='same as -P "%(const)s"')

    parser.add_argument('-c', dest='pdf_cmd', metavar="COMMAND",
                        default='convert %i %o',
                        help='PDF command (default "%(default)s")')

    return parser

######################################################################

def get_filenames(options):

    '''Get the filenames from the command line, optionally sorted by
number, so that IMG_10.png is re-arranged to come after IMG_9.png.
This is a nice feature because some scanner programs (like Image
Capture on Mac OS X) automatically number files without leading zeros,
and this way you can supply files using a wildcard and still have the
pages ordered correctly.

    '''

    if not options.sort_numerically:
        return options.filenames

    filenames = []

    for filename in options.filenames:
        basename = os.path.basename(filename)
        root, _ = os.path.splitext(basename)
        matches = re.findall(r'[0-9]+', root)
        if matches:
            num = int(matches[-1])
        else:
            num = -1
        filenames.append((num, filename))

    return [fn for (_, fn) in sorted(filenames)]

######################################################################

def load(input_filename):

    '''Load an image with Pillow and convert it to numpy array. Also
returns the image DPI in x and y as a tuple.'''

    try:
        pil_img = Image.open(input_filename)
    except IOError:
        sys.stderr.write('warning: error opening {}\n'.format(
            input_filename))
        return None, None

    if pil_img.mode != 'RGB':
        pil_img = pil_img.convert('RGB')

    if 'dpi' in pil_img.info:
        dpi = pil_img.info['dpi']
    else:
        dpi = (300, 300)

    img = np.array(pil_img)

    return img, dpi

######################################################################

def sample_pixels(img, options):

    '''Pick a fixed percentage of pixels in the image, returned in random
order.'''

    pixels = img.reshape((-1, 3))
    num_pixels = pixels.shape[0]
    num_samples = int(num_pixels*options.sample_fraction)

    idx = np.arange(num_pixels)
    np.random.shuffle(idx)

    return pixels[idx[:num_samples]]

######################################################################

def get_fg_mask(bg_color, samples, options):

    '''Determine whether each pixel in a set of samples is foreground by
comparing it to the background color. A pixel is classified as a
foreground pixel if either its value or saturation differs from the
background by a threshold.'''

    s_bg, v_bg = rgb_to_sv(bg_color)
    s_samples, v_samples = rgb_to_sv(samples)

    s_diff = np.abs(s_bg - s_samples)
    v_diff = np.abs(v_bg - v_samples)

    return ((v_diff >= options.value_threshold) |
            (s_diff >= options.sat_threshold))

######################################################################

def get_palette(samples, options, return_mask=False, kmeans_iter=40):

    '''Extract the palette for the set of sampled RGB values. The first
palette entry is always the background color; the rest are determined
from foreground pixels by running K-means clustering. Returns the
palette, as well as a mask corresponding to the foreground pixels.

    '''

    if not options.quiet:
        print('  getting palette...')

    bg_color = get_bg_color(samples, 6)

    fg_mask = get_fg_mask(bg_color, samples, options)

    centers, _ = kmeans(samples[fg_mask].astype(np.float32),
                        options.num_colors-1,
                        iter=kmeans_iter)

    palette = np.vstack((bg_color, centers)).astype(np.uint8)

    if not return_mask:
        return palette
    else:
        return palette, fg_mask

######################################################################

def apply_palette(img, palette, options):

    '''Apply the pallete to the given image. The first step is to set all
background pixels to the background color; then, nearest-neighbor
matching is used to map each foreground color to the closest one in
the palette.

    '''

    if not options.quiet:
        print('  applying palette...')

    bg_color = palette[0]

    fg_mask = get_fg_mask(bg_color, img, options)

    orig_shape = img.shape

    pixels = img.reshape((-1, 3))
    fg_mask = fg_mask.flatten()

    num_pixels = pixels.shape[0]

    labels = np.zeros(num_pixels, dtype=np.uint8)

    labels[fg_mask], _ = vq(pixels[fg_mask], palette)

    return labels.reshape(orig_shape[:-1])

######################################################################

def save(output_filename, labels, palette, dpi, options):

    '''Save the label/palette pair out as an indexed PNG image.  This
optionally saturates the pallete by mapping the smallest color
component to zero and the largest one to 255, and also optionally sets
the background color to pure white.

    '''

    if not options.quiet:
        print('  saving {}...'.format(output_filename))

    if options.saturate:
        palette = palette.astype(np.float32)
        pmin = palette.min()
        pmax = palette.max()
        palette = 255 * (palette - pmin)/(pmax-pmin)
        palette = palette.astype(np.uint8)

    if options.white_bg:
        palette = palette.copy()
        palette[0] = (255, 255, 255)

    output_img = Image.fromarray(labels, 'P')
    output_img.putpalette(palette.flatten())
    output_img.save(output_filename, dpi=dpi)

######################################################################

def get_global_palette(filenames, options):

    '''Fetch the global palette for a series of input files by merging
their samples together into one large array.

    '''

    input_filenames = []

    all_samples = []

    if not options.quiet:
        print('building global palette...')

    for input_filename in filenames:

        img, _ = load(input_filename)
        if img is None:
            continue

        if not options.quiet:
            print('  processing {}...'.format(input_filename))

        samples = sample_pixels(img, options)
        input_filenames.append(input_filename)
        all_samples.append(samples)

    num_inputs = len(input_filenames)

    all_samples = [s[:int(round(float(s.shape[0])/num_inputs))]
                   for s in all_samples]

    all_samples = np.vstack(tuple(all_samples))

    global_palette = get_palette(all_samples, options)

    if not options.quiet:
        print('  done\n')

    return input_filenames, global_palette

######################################################################

def emit_pdf(outputs, options):

    '''Runs the PDF conversion command to generate the PDF.'''

    cmd = options.pdf_cmd
    cmd = cmd.replace('%o', options.pdfname)
    if len(outputs) > 2:
        cmd_print = cmd.replace('%i', ' '.join(outputs[:2] + ['...']))
    else:
        cmd_print = cmd.replace('%i', ' '.join(outputs))
    cmd = cmd.replace('%i', ' '.join(outputs))

    if not options.quiet:
        print('running PDF command "{}"...'.format(cmd_print))

    try:
        result = subprocess.call(shlex.split(cmd))
    except OSError:
        result = -1

    if result == 0:
        if not options.quiet:
            print('  wrote', options.pdfname)
    else:
        sys.stderr.write('warning: PDF command failed\n')

######################################################################

def notescan_main(options):

    '''Main function for this program when run as script.'''

    filenames = get_filenames(options)

    outputs = []

    do_global = options.global_palette and len(filenames) > 1

    if do_global:
        filenames, palette = get_global_palette(filenames, options)

    do_postprocess = bool(options.postprocess_cmd)

    for input_filename in filenames:

        img, dpi = load(input_filename)
        if img is None:
            continue

        output_filename = '{}{:04d}.png'.format(
            options.basename, len(outputs))

        if not options.quiet:
            print('opened', input_filename)

        if not do_global:
            samples = sample_pixels(img, options)
            palette = get_palette(samples, options)

        labels = apply_palette(img, palette, options)

        save(output_filename, labels, palette, dpi, options)

        if do_postprocess:
            post_filename = postprocess(output_filename, options)
            if post_filename:
                output_filename = post_filename
            else:
                do_postprocess = False

        outputs.append(output_filename)

        if not options.quiet:
            print('  done\n')

    emit_pdf(outputs, options)

######################################################################

def main():
    '''Parse args and call notescan_main().'''
    notescan_main(options=get_argument_parser().parse_args())

if __name__ == '__main__':
    main()
: 'http://www.modbusdriver.com/modpoll.html:

Usage: modpoll [options] serialport|host
    Arguments:
    serialport    Serial port when using Modbus ASCII or Modbus RTU protocol
                  COM1, COM2 ...                on Windows
                  /dev/ttyS0, /dev/ttyS1 ...    on Linux
                  /dev/ser1, /dev/ser2 ...      on QNX
    host          Host name or dotted ip address when using MODBUS/TCP protocol
    General options:
    -m ascii      Modbus ASCII protocol
    -m rtu        Modbus RTU protocol (default)
    -m tcp        MODBUS/TCP protocol
    -m enc        Encapsulated Modbus RTU over TCP
    -a #          Slave address (1-255, 1 is default)
    -r #          Start reference (1-65536, 100 is default)
    -c #          Number of values to poll (1-100, 1 is default)
    -t 0          Discrete output (coil) data type
    -t 1          Discrete input data type
    -t 3          16-bit input register data type
    -t 3:hex      16-bit input register data type with hex display
    -t 3:int      32-bit integer data type in input register table
    -t 3:mod      32-bit module 10000 data type in input register table
    -t 3:float    32-bit float data type in input register table
    -t 4          16-bit output (holding) register data type (default)
    -t 4:hex      16-bit output (holding) register data type with hex display
    -t 4:int      32-bit integer data type in output (holding) register table
    -t 4:mod      32-bit module 10000 type in output (holding) register table
    -t 4:float    32-bit float data type in output (holding) register table
    -i            Slave operates on big-endian 32-bit integers
    -f            Slave operates on big-endian 32-bit floats
    -1            Poll only once, otherwise poll every second
    -e            Use Daniel/Enron single register 32-bit mode
    -0            First reference is 0 (PDU addressing) instead 1
    Options for MODBUS/TCP:
    -p #          TCP port number (502 is default)
    Options for Modbus ASCII and Modbus RTU:
    -b #          Baudrate (e.g. 9600, 19200, ...) (9600 is default)
    -d #          Databits (7 or 8 for ASCII protocol, 8 for RTU)
    -s #          Stopbits (1 or 2, 1 is default)
    -p none       No parity
    -p even       Even parity (default)
    -p odd        Odd parity
    -4 #          RS-485 mode, RTS on while transmitting and another # ms after
    -o #          Time-out in seconds (0.01 - 10.0, 1.0 s is default)
'

# Lectura de Holding Registers
# addess = 4001 (-a 1) count = 10 (-c = 10) port = 5502 (-p 5502)

    modpoll -m tcp -a 1 -c 10 -p 5502 192.168.56.1

# Escritura de Holding Registers
# addess = 4001 (-a 1) count = 3 (-c = 10) port = 5502 (-p 5502) ... value1 value2 value3

    modpoll -m tcp -a 1 -c 3 -p 5502 192.168.56.1 11 32 56

# To retrieve once 5 floating point values starting from reference 100 with Modbus/TCP from slave device with IP 10.0.0.100:
    
    modpoll -m tcp -t4:float -r 100 -c 5 -1 10.0.0.100
    
    
upstream newserver {
  server 172.16.0.1:80;  # this is new server, by IP address
}

server {
  listen 80;
  server_name subdomain.site.com;
  location / {
    proxy_set_header Host $host;
    proxy_pass http://newserver;
  }
}
#from bash command line
#first create folder to save python dependencies:

    > sudo mkdir /var/www/.local
    > sudo mkdir /var/www/.cache
    > sudo chown www-data.www-data /var/www/.local
    > sudo chown www-data.www-data /var/www/.cache

# then install dependencies (imports):

    > sudo -H -u www-data pip install <dep1>
    > sudo -H -u www-data pip install <dep2>
    :
    
# then set user permissions to run your script to www-data user:
# creating a file at /etc/sudoers.d/:

    > sudo nano /etc/sudoers.d/mysudoerfile
    
    www-data ALL=(ALL) NOPASSWD: /usr/bin/python <path of your script here>

# then set execute permissions to your script:

    sudo chmod +x <path of your script here>

# then run your script 
#!/bin/bash
# create_barcode.sh
# sudo apt-get install barcode imagemagick

CODE=$1 #the code ... first parameter
FNAME=$2  #the filename .png .... second parameter (without file extension)

# let's create postscript:
barcode -E -b "$CODE" | convert -density 600 ps:- png:- > $FNAME

# use:
#   
#   bash create_barcode.sh 123456789 output.png  #it autodetect's the preferable encoding
#
#   this creates "output.png"
#
# Security Error:
#
# if you get security error: convert not authorized (ps/png) do this:
#
# edit /etc/ImageMagick-6/policy.xml
#
# disable this:

  <!--policy domain="coder" rights="none" pattern="PS" />-->

# and append this:

  <policy domain="coder" rights="read/write" pattern="PNG,PS" />
# Create Channel
# Create new Bot and get Bot TOKEN (to replace TOKEN_OF_BOT)
# and edit:
# /etc/ssh/sshrc

ip=`echo $SSH_CONNECTION | cut -d " " -f 1`

logger -t ssh-wrapper $USER login from $ip

curl -s -X POST https://api.telegram.org/botTOKEN_OF_BOT/sendMessage \
     -d text="Hello world!" -d chat_id=@autanaChannel > /dev/null
$ sudo ip route add prohibit <ip address to block>/32

#Ex: sudo ip route add prohibit 58.15.238.31/32
$> cd <folder>

$> perl -e 'for(<1.*>){((stat)[9]<(unlink))}'

$> find ./ -name "1.*" -exec rm {} \;

$> for i in 1.*; do rm -rf $i; done
$> sudo apt-get install cifs-utils

$> sudo mkdir /mnt/shared

$> sudo mount -t cifs -o username=guest,password=,rw,iocharset=utf8,file_mode=0777,dir_mode=0777,noperm //<windows address>/the_folder /mnt/shared/
$ ifconfig -a | grep "inet\s" | awk -F'[: ]+' '{ print $4 }'

$ ip addr  | grep "inet\s" | awk -F'[: ]+' '{ print $3 }'
sudo vgdisplay #ver vg/lvm
sudo pvcreate /dev/sdX /dev....
sudo vgextend <name-vg> /dev/sdX
sudo lvextend -l +100%FREE /dev/<name-vg>/root
sudo resize2fs /dev/<name-vg>/root
# create mount folder:

mkdir /tmp/my10mbvirtualdisk

# create file system (filename=filesyst in current folder) (10Mb):

dd if=/dev/zero of=./filesyst bs=10485760 count=1
sudo losetup /dev/loop0 ./filesyst
sudo mkfs.ext3 /dev/loop0

sudo mount /dev/loop0 /tmp/my10mbvirtualdisk


# now you can use /tmp/my10mbvirtualdisk as disk



# destroy:

sudo umount /tmp/my10mbvirtualdisk
sudo losetup -d /dev/loop0
sudo rm ./filesyst
find /src/dir/ -mtime -<n days> -printf %P\\0|rsync --files-from=- --from0 /src/dir/ /dst/dir/
##src: https://www.digitalocean.com/community/tutorials/how-to-set-up-master-slave-replication-on-postgresql-on-an-ubuntu-12-04-vps#configure-the-master-server

############## Master:

psql -c "CREATE USER rep REPLICATION LOGIN CONNECTION LIMIT 1 ENCRYPTED PASSWORD 'yourpassword';"

#//at file /etc/postgresql/9.5/main/pg_hba.conf 

	host    replication     rep     IP_address_of_slave/32   md5

#//at file /etc/postgresql/9.5/main/postgresql.conf

	listen_addresses = 'localhost,IP_address_of_THIS_host'
	wal_level = 'hot_standby'
	archive_mode = on
	archive_command = 'cd .'
	max_wal_senders = 1
	hot_standby = on

service postgresql restart


############### Slave:

service postgresql stop

#//at file /etc/postgresql/9.5/main/pg_hba.conf 

	host    replication     rep     IP_address_of_master/32  md5

#//at file /etc/postgresql/9.5/main/postgresql.conf

	listen_addresses = 'localhost,IP_address_of_THIS_host'
	wal_level = 'hot_standby'
	archive_mode = on
	archive_command = 'cd .'
	max_wal_senders = 1
	hot_standby = on


################## Master:

psql -c "select pg_start_backup('initial_backup');"
rsync -cva --inplace --exclude=*pg_xlog* /var/lib/postgresql/9.5/main/ slave_IP_address:/var/lib/postgresql/9.5/main/
psql -c "select pg_stop_backup();"


################### Slave:

cd /var/lib/postgresql/9.5/main/recovery.conf

	standby_mode = 'on'
	primary_conninfo = 'host=master_IP_address port=5432 user=rep password=yourpassword'
	trigger_file = '/tmp/postgresql.trigger.5432' ##When we want to set SLAVE db to Master (because of original MASTER fail) creating this file is enough. With the existence of this file db will act like MASTER.

service postgresql start

## we check if no problem:

less /var/log/postgresql/postgresql-9.5-main.log
#convert VDI to RAW:
$ vboxmanage clonehd --format RAW ubuntu.vdi ubuntu.img

#mount RAW:
$ mount -t ext3 -o loop,rw ./ubuntu.img /mnt
# duration time of file:
#   ej: sox --i -D test.ogg

  sox --i -D <sound file>

# play sound to default output
#    Linux/OSX?: 

  sox <sound file> -d

#    Windows: 

  sox <sound file> -t waveaudio
  
# record sound from default input:
#    Linux/OSX?: 

  sox -t alsa <output file>
  
#    Windows:
  
  sox -t waveaudio -d <output file>
  
# play sound from starting time (secs) (trim):
#    Linux/OSX?:

  sox <sound file> -d trim <n secs>
  
#    Windows:

  sox <sound file> -t waveaudio trim <n secs>
  
# split big file into small files with equal time fraction:
#    %1n = autoincremental: 1,2,3...

  sox <input file> <output file>_%1n.ogg trim 0 <secs> : newfile : restart
  
# concatenate small files into one:

  sox <input file1> <input file2> ... <input filen> <output file>

# cut silences with tolerance:

  sox in.wav out.wav silence -l 1 0.1 1% -1 2.0 1%
diff -r dir1 dir2 | grep dir1 | awk '{print $4}' > difference1.txt; clear; cat difference1.txt
$ sudo nano /etc/environment

#
# (Append these lines at the end of file:)

http_proxy="http://myproxy.server.com:8080/"
https_proxy="http://myproxy.server.com:8080/"
ftp_proxy="http://myproxy.server.com:8080/"
no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com"
HTTP_PROXY="http://myproxy.server.com:8080/"
HTTPS_PROXY="http://myproxy.server.com:8080/"
FTP_PROXY="http://myproxy.server.com:8080/"
NO_PROXY="localhost,127.0.0.1,localaddress,.localdomain.com"

#
# (save and... )

$ source /etc/environment

# To unset proxies:

# sudo nano /etc/environment
#
# (Remove proxies lines (see above))
#
# (save and them...) 

unset http_proxy
unset https_proxy
unset ftp_proxy
unset no_proxy
unset HTTP_PROXY
unset HTTPS_PROXY
unset FTP_PROXY
unset NO_PROXY

# (that's all)


# ========== using proxies for apt (it does not obey proxy configuration):

# (we create a new file at /etc/apt/apt.conf.d/)
#

$ sudo nano /etc/apt/apt.conf.d/95proxies

# (now append this lines...)

Acquire::http::proxy "http://myproxy.server.com:8080/";
Acquire::ftp::proxy "ftp://myproxy.server.com:8080/";
Acquire::https::proxy "https://myproxy.server.com:8080/";

# (save and run "sudo apt update" for trying...)
#
/etc/php/7.0/fpm/pool.d/www.conf:

pm = dynamic
pm.max_children = 30 (original: 5)
pm.start_servers = 3 (original: 1)
pm.min_spare_servers = 2 (original: 1)
pm.max_spare_servers = 4 (original: 3)
pm.max_requests = 500 (originally commented)
pgrep -af <name of running process>

#who is running last created binary in /usr/bin (attack): (sudo apt-get install inotify-tools)
inotifywait -e create /usr/bin | echo $(awk '{print $3}') | xargs pgrep -af

#which process is calling this ID?
ls -l /proc/<ID>/exe
#just add this line at the end of /etc/ssh/sshd_config

AllowUsers <thelogin> 

#Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a  PDF file from 6 MB to approximately 1 MB:

$ gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -sOutputFile=output.pdf input.pdf

# You can use the following parameters for -dPDFSETTINGS instead of /screen:

# /screen – Lowest quality, lowest size (ugly)
# /ebook – Moderate quality
# /printer – Good quality
# /prepress – Best quality, highest size
#This will report the percentage of memory in use

% free | grep Mem | awk '{print $3/$2 * 100.0}'

#Ex:23.8171

#This will report the percentage of memory that's free

% free | grep Mem | awk '{print $4/$2 * 100.0}'

#Ex:76.5013

#You could create an alias for this command or put this into a tiny shell script. The specific output could be tailored to your needs using formatting commands for the print statement along these lines:

% free | grep Mem | awk '{ printf("free: %.4f %\n", $4/$2 * 100.0) }'
* Open CMD, then type regedit + Enter key.
* Look this route: 

HKEY_CURRENT_USER
 \Control Panel
  \Desktop
  
* And set value:
    
ForegroundLockTimeout DWORD 0x00000000 (0)
#
# first you must stablish iptables rule for keeping port 22 closed
# and ports to use as combination. I used 3030, 55050 and 7070 (is very important
# to use unsorted ports)
#
#  #-- rules to keep open combination ports:
#

sudo iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT

#
#  #-- rules to keep ssh port (22) closed:
#

sudo iptables -A INPUT -p tcp -m tcp --dport 22 -j DROP

#
#  #-- then we save iptables
#

sudo iptables-save

#
#  #-- if you want to know how to make this rules "persistent" search info on google about
#      iptables-persistent package or look at this url 
#
#      http://askubuntu.com/questions/119393/how-to-save-rules-of-the-iptables
#
#      it helped me.
#

# debian and derived distros... install knockd:

sudo apt-get install knockd

# we edit /etc/default/knockd: (knockd confif file)

sudo nano /etc/default/knockd

# and set:

    START_KNOCKD=0
    
# to

    START_KNOCKD=1
    
# let's create our ports sequence: let's say 3030,55050,7070 = open, and 7070,55050,3030 = close.
# for this we edit /etc/knockd.conf:

sudo nano /etc/knockd.conf:
    
[options]
  UseSyslog

[openSSH]
  sequence    = 3030,55050,7070
  seq_timeout = 1
# add our input access to iptables  
  command     = /sbin/iptables -I INPUT -s %IP% -p tcp --dport 22 -j DROP
  tcpflags    = syn

[closeSSH]
  sequence    = 7070,55050,3030
  seq_timeout = 1
# delete our input access to iptables
  command     = /sbin/iptables -D INPUT -s %IP% -p tcp --dport 22 -j DROP
  tcpflags    = syn
  
# we start service:

sudo /etc/init.d/knockd start

# That's all, we're done.
# .. and now... How can I open my host's ssh port (22) from remote location?
# ... just like this (using telnet):

# OPEN:
telnet 192.168.1.33 3030; telnet 192.168.1.33 55050; telnet 192.168.1.33 7070

# you'll this output at syslog (example with 192.168.1.33):

#  knockd: 192.168.1.33: openSSH: Stage 1
#  knockd: 192.168.1.33: openSSH: Stage 2
#  knockd: 192.168.1.33: openSSH: Stage 3
#  knockd: 192.168.1.33: openSSH: OPEN SESAME
#  knockd: openSSH: running command: /sbin/iptables -I INPUT -s 192.168.1.33...



# and then we CLOSE it:
telnet 192.168.1.33 7070; telnet 192.168.1.33 55050; telnet 192.168.1.33 3030

# you'll this output at syslog (example with 192.168.1.33):

#  knockd: 192.168.1.33: closeSSH: Stage 1
#  knockd: 192.168.1.33: closeSSH: Stage 2
#  knockd: 192.168.1.33: closeSSH: Stage 3
#  knockd: 192.168.1.33: closeSSH: OPEN SESAME
#  knockd: closeSSH: running command: /sbin/iptables -D INPUT -s 192.168.1.33...

#bypassing [n] files, (we must use [n+1]):

$ find <folder> -maxdepth 1 -type f -printf "%T@ %Tc %p\n" | grep -v '/\.' | sort -r | tail -n +60 | grep -Po "\./.*"

$ for f in "`find -maxdepth 1 -type f -print0 | xargs -r0 stat -c %y\ %n | grep -v '\.\/\.' | sort -r | grep -Po '\./.*' | tail -n +61`"; do 
    printf "$f\n"
$ done
#install unison

$ sudo apt install unison

# synchronizing from local folder "/home/user/sync/" with remote "ssh://user@remotehost.com/" folder "/home/user/sync/" (ssh port 22000)


$ unison -silent -auto -batch /home/user/sync/ ssh://user@remotehost.com//home/user/sync/ \
  -nodeletion ssh://user@remotehost.com//home/user/sync/ \
  -sshargs '-p22000' -logfile /tmp/mylog.txt
# NGINX: add <folder> in /etc/nginx/sites-available/default: 

server {
    :
    location /<folder>/ {
        proxy_pass http://<host>:<port>/;
        proxy_set_header X-Original-Host $http_host;
        proxy_set_header X-Original-Scheme $scheme;
        proxy_set_header X-Forwarded-For $remote_addr;
    }
    :
}
    
# APACHE2: add <folder> in /etc/apache2/sites-available/00-default.conf

<VirtualHost *:80>
    :
        ProxyPass /<folder> http://<host>:<port>/
        ProxyPassReverse /<folder> http://<host>:<port>/

        ProxyRequests Off
        ProxyPreserveHost On

        <proxy>
            Order deny,allow
            Allow from all
        </proxy>
    :
</VirtualHost>


#examples: folder "http://192.168.11.45/demo -> http://192.168.11.45:8080/"

server {
    :
    location /demo/ {
        proxy_pass http://localhost:8080/;
        proxy_set_header X-Original-Host $http_host;
        proxy_set_header X-Original-Scheme $scheme;
        proxy_set_header X-Forwarded-For $remote_addr;
    }
    :
}

<VirtualHost *:80>
    :
        ProxyPass /demo http://localhost:8080/
        ProxyPassReverse /demo http://localhost:8080/

        ProxyRequests Off
        ProxyPreserveHost On

        <proxy>
            Order deny,allow
            Allow from all
        </proxy>
    :
</VirtualHost>

/* other configuration for nginx:
    
server {
    listen        80;
    server_name   example.com *.example.com;
    location / {
        proxy_pass         http://127.0.0.1:5000;
        proxy_http_version 1.1;
        proxy_set_header   Upgrade $http_upgrade;
        proxy_set_header   Connection keep-alive;
        proxy_set_header   Host $host;
        proxy_cache_bypass $http_upgrade;
        proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header   X-Forwarded-Proto $scheme;
    }
}
*/
#changes in /etc/nginx/sites-available/default

server {
  server_name example.com;
  root /path/to/root;
  location / {
    # blah blah
  }
  location /demo {
    alias /path/to/root/production/folder/here;
  }
}
# Edit your /etc/postgresql/9.3/main/postgresql.conf, and change the lines as follows:

# Note: If you didn't find the postgresql.conf file, then just type 

$> locate postgresql.conf 

# in a terminal

1) change #log_directory = 'pg_log' to log_directory = 'pg_log'
2) change #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' to log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
3) change #log_statement = 'none' to log_statement = 'all'
4) change #logging_collector = off to logging_collector = on

# Optional: SELECT set_config('log_statement', 'all', true);

sudo /etc/init.d/postgresql restart or sudo service postgresql restart

#Fire query in postgresql: select 2+2

# Find current log in /var/lib/pgsql/9.2/data/pg_log/

#The log files tend to grow a lot over a time, and might kill your machine. For your safety, write a bash script that'll delete logs and restart postgresql server.
##sudo nano /etc/udev/rules.d/95-monitor-hotplug.rules

SUBSYSTEM=="drm", RUN+="/usr/local/bin/fix_tv_state.sh"

##---------------------



##sudo nano /usr/local/bin/fix_tv_state.sh

#!/bin/sh
#Fix TV state when HDMI link is lost.

export XAUTHORITY=/home/marco/.Xauthority

OUTPUT="HDMI1"
BAD_MODE="1280x720"
GOOD_MODE="1920x1080"

for MODE in $BAD_MODE $GOOD_MODE; do
 sleep 2
 DISPLAY=:0 xrandr --output $OUTPUT --mode $MODE
 sleep 2
done

##--------------------

sudo chmod +x /usr/local/bin/fix_tv_state.sh
sudo udevadm control --reload-rules

# warning: this is not script, it's a set of instructions.
#these steps create pptp vpn server so all clients can reach all others clients.

##################### SERVER SIDE (UBUNTU SERVER 16.04+) ######################

sudo apt-get install pptpd
sudo update-rc.d pptpd defaults

# I had to use this on 16.04... it fixes autostart problem:
sudo systemctl enable pptpd 

#edit file "/etc/pptpd.conf": example using nano: $> sudo nano /etc/pptpd.conf
#add the following lines:
    
    localip 10.20.0.1
    remoteip 10.20.1.100-200 #100 clients
#save it
        
#edit file "/etc/ppp/chap-secrets": example using nano: $> sudo nano /etc/ppp/chap-secrets
#add all clients with fixed ip addresses (change user1, user2... and password1, password2,.. according to your preference):

    user1 pptpd password1 10.20.1.100 
    user2 pptpd password2 10.20.1.101
    user3 pptpd password3 10.20.1.200
    :
#save it

#edit/add this line at "/etc/systl.conf":
    net.ipv4.ip_forward = 1
#save change:
sudo sysctl -p

#Configure iptables for forwarding (let clients see all each other):

iptables --table nat --append POSTROUTING --out-interface ppp0 -j MASQUERADE
iptables -I INPUT -s 10.20.0.0/16 -i ppp0 -j ACCEPT
iptables --append FORWARD --in-interface enp0s8 -j ACCEPT
iptables-save

#restart your service:

sudo service pptpd restart


##################### CLIENT SIDE FOR UBUNTU SERVER ######################

## Start client side (Ubuntu Server (w/o GUI)):
##
## ============================================================
## 1) Configure pptp: (Change your <vpn server address>)
##   (in this example we named the provider as "pptpserver")
## ============================================================

sudo apt-get install pptp-linux

sudo nano /etc/ppp/peers/pptpserver

# add the following lines:

pty "pptp <vpn server address> --nolaunchpppd"
lock
noauth
nobsdcomp
nodeflate
name server
password 13132828
remotename pptpserver
persist
maxfail 0
holdoff 5
require-mppe-128

# and save (ctrl-o ctrl-x)

# ==================================================================
# 2) Create config file for adding route automatically when startup:
#    this is necessary in order to not use vpn internet connection
#    (use same name of provider, in my case "pptpserver")
# ==================================================================

sudo nano /etc/ppp/ip-up.d/pptpserver

# add the wollowings lines:

#!/bin/bash
# This script is called with the following arguments:
# Arg Name
# $1 Interface name
# $2 The tty
# $3 The link speed
# $4 Local IP number
# $5 Peer IP number
# $6 Optional ''ipparam'' value foo
/sbin/route add -net 10.20.0.0 netmask 255.255.0.0 dev ppp0


# and save (ctrl-o ctrl-x)
#... then set execute permission:

sudo chmod +x /etc/ppp/ip-up.d/pptpserver

# ============================================================
#   STARTUP CONNECTION
# ============================================================

# ------------------------------------
# 1) Manual startup:
# ------------------------------------

sudo pon pptpserver

# ------------------------------------
# 2) Auto startup on boot:
# ------------------------------------

#
# a) USING INTERFACES: Edit interfaces file:
#

sudo nano /etc/network/interfaces

# add the following lines to the end:

auto tunnel
iface tunnel inet ppp
  provider pptpserver

# and save (ctrl-o ctrl-x)
# then restart networking:

sudo /etc/init.d/networking restart

#
# b) USING SERVICE SYSTEMCTL
#

sudo nano /etc/systemd/system/pppoe.service

# add these lines:

[Unit]
Description=PPPoE connection
 
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/usr/bin/pon pptpserver
ExecStop=/usr/bin/poff -a
 
[Install]
WantedBy=default.target

# and save
# then change permissions:

sudo chmod +x /etc/systemd/system/pppoe.service

# then reload daemons:

systemctl daemon-reload

# and it will connect on boot.

#start:
sudo systemctl start pppoe

#stop:
sudo systemctl stop pppoe
# let's create a backup from remote postgresql database using pg_dump:
#
#   pg_dump -h [host address] -Fc -o -U [database user] <database name> > [dump file]
#
# later it could be restored at the same remote server using:
#
#   sudo -u postgres pg_restore -C mydb_backup.dump
#
#Ex:

pg_dump -h 67.8.78.10 -p 5432 -Fc -o -U myuser mydb > mydb_backup.dump

pg_restore -C mydb_backup.dump



#complete (all databases and objects)

pg_dumpall -U myuser -h 67.8.78.10 -p 5432 --clean --file=mydb_backup.dump


#restore from pg_dumpall --clean:

psql -f mydb_backup.dump postgres #it doesn't matter which db you select here
#this command shows a list of supported encodings:
#pdftotext -listenc 

#this command convert pdf to html:
#pdftohtml -c -s -enc <encoding> <pdf to convert> <output html file>

#Ex:

pdftohtml -c -s -enc Latin1 test.pdf test.html
convert -density 144 myfile.pdf[0] -resize 10% -background white -alpha remove -strip -quality 90 mypreview.jpg
#!/bin/bash
# Delete all containers

$ docker rm $(docker ps -a -q)

# Delete all images

$ docker rmi $(docker images -q)
sudo su
cat /dev/null > /etc/apt/apt.conf
echo 'Acquire::http::Proxy "false";' > /etc/apt/apt.conf.d/proxy
apt-get update 
#for not running docker, use save:
docker save <dockernameortag> | gzip > mycontainer.tgz

#for running or paused docker, use export:
docker export <dockernameortag> | gzip > mycontainer.tgz

#load
gunzip -c mycontainer.tgz | docker load
find ./ -name <filemask>* -exec dcmodify \
  -m "(0010,0010)=MOLINA^HERNAN" \
  -m "(0010,0020)=3207639" \
  -m "(0010,0030)=19411128" \
  -m "(0010,0040)=M" \
  -m "(0008,0050)=" \
  -m "(0040,0275)[0].(0032,1060)=RMN HOMBRO IZQUIERDO" \
  -m "(0040,0275)[0].(0040,0007)=RMN HOMBRE IZQUIERDO" {} \;
#iptables -A OUTPUT -d <ipaddress> -j DROP

iptables -A OUTPUT -d 119.140.145.206 -j DROP
iptables-save
#> sudo apt-get install nethogs
#> sudo nethogs <network interface>

#example:

$> sudo nethogs eth0
#iptables -A INPUT -s <ipaddress> -j DROP

iptables -A INPUT -s 65.55.44.100 -j DROP
iptables-save

#un-block

iptables -D INPUT -s xx.xxx.xx.xx -j DROP
iptables -D INPUT -s xx.xxx.xx.xx/yy -j DROP
iptables-save
caffeinate -u -t 2
osascript -e 'tell application "System Events" to keystroke "mypassword"'
osascript -e 'tell application "System Events" to keystroke return'
#split the file into pieces:

  $> split --bytes=10M /path/to/bigfile.ext /path/to/image/prefixForPieces

#then put'em together again when necessary

  $> cat prefixForPieces* > bigfile.ext
#!/bin/bash

sudo apt-get install postgresql conquest-common conquest-postgres

sudo su postgres -c "createdb dicomserver"
sudo su postgres -c "createuser dicomserver"
sudo su postgres -c "psql -c \"ALTER USER dicomserver WITH ENCRYPTED PASSWORD 'dicomserver'\""
sudo su postgres -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE dicomserver TO dicomserver\""

sudo sed -i 's/CONQUESTSRV1/DICOMSERVER/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/CONQUESTSRV1/DICOMSERVER/g' /etc/conquest-dicom-server/acrnema.map

sudo sed -i 's/SQLServer\s*\=\ conquest/SQLServer\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/Username\s*\=\ postgres/Username\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/Password\s*\=\ postgres/Password\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini

sudo sed -i 's/DGATE_ENABLE\=false/DGATE_ENABLE\=true/g' /etc/default/dgate

sudo service dgate stop
sudo service postgresql restart
sudo dgate -v -r
sudo service dgate start

#when installed: AET=DICOMSERVER, PORT=11112
$ rsync -avz -e "ssh -p <ssh port number>" <user>@<remote addr>:<remote path/folder> <local path/folder>
$ sudo apt-get install tcpflow
$ sudo tcpflow -p -c -i <netinterface> port <portnum>

# Example: tcpflow -p -c -i eth0 port 80
$ find <folderpath> -name <filemask> -exec <command> <extra parameters> {} \;
#Using dcm4che:

#capture:
$ ffmpeg -an -f video4linux2 -s 640x480  -r 30 -i /dev/video0 -vcodec mpeg4 -vtag DIVX my_test.avi

# convert:
$ jpg2dcm -c mpg2dcm.cfg -ts 1.2.840.10008.1.2.4.100 <mpegfile> <dcmfile>

//---------------------------------------------------------------------

#Send to pacs: dcmtk:
$ dcmsend -d -aec AETITLE <ip address> <dicom port> <dcmfile>

//---------------------------------------------------------------------

#Video props:

$ mplayer video.wmv -identify -vo null -ao null -frames 0 2>&1 /dev/null | egrep "(^ID|VIDEO|AUDIO)"

//---------------------------------------------------------------------

# Use/compare mpg2dcm.config: (at DCM4CHE/BIN/JPG2DCM)

//---------------------------------------------------------------------

# jpg2dcm Sample Configuration for encapsulating MPEG2 MP@ML streams into
# DICOM Video Photographic Image objects
# (s. DICOM Part 3, A.32.7 Video Photographic Image IOD)
# Usage: jpg2dcm -c mpg2dcm.cfg -ts 1.2.840.10008.1.2.4.100 <mpegfile> <dcmfile>

# Patient Module Attributes
# Patient's Name
00100010:
# Patient ID
00100020:
# Issuer of Patient ID
#00100021:
# Patient's Birth Date
00100030:
# Patient's Sex
00100040:

# General Study Module Attributes
# Study Instance UID
#0020000D:
# Study Date
00080020:
# Study Time
00080030:
# Referring Physician's Name
00080090:
# Study ID
00200010:
# Accession Number
00080050:
# Study Description
#00081030:

# General Series Module Attributes
# Modality
00080060:XC
# Series Instance UID
#0020,000E:
# Series Number
00200011:1

# General Equipment Module Attributes
# Manufacturer
00080070:

# General Image Module Attributes
# Instance Number
00200013:1

# Cine Module Attributes
# Frame Time [525-line NTSC]
#00181063:33.33
# Frame Time [625-line PAL]
00181063:40.0
# Multiplexed Audio Channels Description Code Sequence
003A0300

# Multi-frame Module Attributes
#Number of Frames (use dummy value, if unknown)
00280008:1500
# Frame Increment Pointer
00280009:00181063

# Image Pixel Module Attributes (MUST be specified for encapsulating MPEG2 streams)
# (s. DICOM Part 5, 8.2.5 MPEG2 MP@ML IMAGE COMPRESSION for details)
# Samples per Pixel
00280002:3
# Photometric Interpretation
00280004:YBR_PARTIAL_420
# Planar Configuration
00280006:0
# Rows
00280010:480
# Columns
00280011:640
# Bits Allocated
00280100:8
# Bits Stored
00280101:8
# High Bit
00280102:7
# Pixel Representation
00280103:0

# Acquisition Context Module Attributes
# Acquisition Context Sequence
00400555

# VL Image Module Attributes
# Image Type
00080008:ORIGINAL\\PRIMARY
# Lossy Image Compression
00282110:01

# SOP Common Module Attributes
# SOP Class UID
00080016:1.2.840.10008.5.1.4.1.1.77.1.4.1
# SOP Instance UID
#00080018

#----------------------------------------------------------------------------
#convert video to frames:

$ ffmpeg -i test.mp4 -r 24 -f image2 test_files/%05d.png

#----------------------------------------------------------------------------
*> sudo visudo

#find 'root ALL(...' and append this line below:

www-data ALL=NOPASSWD:/usr/local/bin/myscript.sh

#Save

*> sudo cp myscript.sh /usr/local/bin/
*> sudo chmod 777 /usr/local/bin/myscript.sh

#at php script:

<?php

$cmd = shell_exec("/usr/local/bin/myscript.sh params");
echo $cmd;

?>
#Use udisks utility
#sudo apt-get install udisks

$> udisks --show-info /dev/sr0 | grep -c "blank: *1"

#this will return 0:if not blank/present; or 1:blank disk present
#!/bin/bash
#
#/etc/init.d/oracledb
#
#Run-level Startup script for the Oracle Listener and Instances
#It relies on the information on /etc/oratab

export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbname_1
export ORACLE_OWNR=oracle
export PATH=$PATH:$ORACLE_HOME/bin

if [ ! -f $ORACLE_HOME/bin/dbstart -o ! -d $ORACLE_HOME ]
then
  echo "Oracle startup: cannot start"
  exit 1
fi

case "$1" in
  start)
    #Oracle listener and instance startup
    echo -n "Starting Oracle: "
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/lsnrctl start"
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/dbstart $ORACLE_HOME"
    touch /var/lock/oracle
    echo "OK"
    ;;
  stop)
    #Oracle listener and instance shutdown
    echo -n "Shutdown Oracle: "
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/lsnrctl stop"
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/dbshut $ORACLE_HOME"
    rm -f /var/lock/oracle
    echo "OK"
    ;;
  reload|restart)
    $0 stop
    $0 start
    ;;
  *)
    echo "Usage: `basename $0` start|stop|restart|reload"
    exit 1
esac

exit 0
##################################################
#!/bin/sh

#----------------------------------------------------------------
# Put this file at /usr/local/bin:
#
#     $> sudo cp verify_nr /usr/local/bin
#
# Set executing permissions:
#
#     $> sudo chmod +x /usr/local/bin/verify_nr
#
# Then create crontab (cada minuto):
#
#     $> sudo crontab -e
#     #(Go to end and append:)
#     * * * * * /usr/local/bin/verify_nr
#     #(Save)
#----------------------------------------------------------------
SERVICE="nrservice"
if ps ax | grep -v grep | grep -v $0 | grep $SERVICE > /dev/null
then
    echo "$SERVICE service running, everything is fine" > /dev/null
else
    sudo service nrservice.sh restart
fi
$ sudo apt install dcmtk

#Service:

$ storescp -v +xa -pm +uf -fe .dcm -sp --fork -aet MARCO -od ./test_storescp 4006

#Store:

$ storescu -xs localhost 4006 dicom_file.dcm
#disable ping to your station:

sudo echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_all 

#enable ping back:

sudo echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_all
$ comm -13 \
  <(gzip -dc /var/log/installer/initial-status.gz | sed -n 's/^Package: //p' | sort) \
  <(comm -23 \
    <(dpkg-query -W -f='${Package}\n' | sed 1d | sort) \
    <(apt-mark showauto | sort) \
  )
$> sudo apt-get install freetds-bin

#At Lazarus:
#Put TZConnection component (ZConnection1) and set LibraryLocation as shown:

#  ZConnect1.LibraryLocation:=libsybdb.so.5;

#  and we're done!
$> wget -qO- ipecho.net/plain
$> dig +short myip.opendns.com @resolver1.opendns.com
$> wget -qO- shtuff.it/myip/short
$> wget -qO- whatismyip.akamai.com
$> sudo crontab -e

#then add a line like this:

* * * * * find /path/to/files/ -type f -mtime +<n> -exec rm -rf {} \;

#Ex:
#Delete "*.txt" files older than 1 day from /tmp folder every day at 2:00am:

0 2 * * * find /tmp/* -type f -mtime +1 -exec rm {} \;       #files
0 2 * * * find /tmp/* -type d -mtime +1 -exec rm -rf {} \;   #folders
#Merge file1.pdf and file2.pdf into merged.pdf:

$> gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=merged.pdf file1.pdf file2.pdf
$> find ./ -name "<filename/wild cards>" | xargs grep -i "<text to find>"

#Ex:

$> find ./ -name "*.txt" | xargs grep -i "Examples"

#Find all text files (*.txt) containing text 'Examples' from current path (./) and inner.
$> ssh <remote user>@<remote server ip> [-p <remote ssh port>] -L <remote server port>:<internal target ip>:<internal target port> -fN

#Ex:
#Redirect web traffic throughout myremoteserver.com, port 9999, to local machine 192.168.0.1, port 80.

$> ssh operator@myremoteserver.com -L 9999:192.168.0.1:80 -fN

#So you can access: "http://localhost:9999/"
#This url will respond as it was "http://192.168.0.1:80/"
sudo route ip route add <ip range> gw <gateway ip address> dev <interface>
sudo ip addr flush dev <interface>
sudo /etc/init.d/networking restart

#Ex:

route ip route add 192.168.32.0/24 gw 192.168.32.1 dev eth0
sudo ip addr flush dev eth0
sudo /etc/init.d/networking restart
#Setup the rate control and delay
sudo tc qdisc add dev lo root handle 1: htb default 12 
sudo tc class add dev lo parent 1:1 classid 1:12 htb rate 33kbps ceil 56kbps 
sudo tc qdisc add dev lo parent 1:12 netem delay 400ms
 
#Remove the rate control/delay
sudo tc qdisc del dev lo root
 
#To see what is configured on an interface, do this
sudo tc -s qdisc ls dev lo
 
#Replace lo with eth0/wlan0 to limit speed at lan
wkhtmltopdf <url1> <url2> ... <urln> <output-pdf-path-filename>
#install debian based:

sudo apt-get install nbtscan

#windows and others: download at http://www.unixwiz.net/tools/nbtscan.html

nbtscan 192.168.0.1-254 //IP Range
nbtscan 192.168.0.0/24  //whole C-class network
nbtscan 192.168.1.0/24  //whole C-class network
nbtscan 172.16.0.0/16   //Whole B-class network
nbtscan 10.0.0.0/8      //whole A-class network
$> sudo su

$> sync ; echo 1 > /proc/sys/vm/drop_caches
$> sync ; echo 2 > /proc/sys/vm/drop_caches
$> sync ; echo 3 > /proc/sys/vm/drop_caches
#install tools qemu-kvm (debian based distros)
$ sudo apt-get install qemu-kvm

#load module
$ sudo modprobe nbd

#create loopback dev for the image
$ sudo qemu-nbd -c /dev/nbd0 <path to virtual disk>.vdi

#mount the partitions, that are exposed as /dev/nbd0pXXX
$ sudo mount  -o noatime,noexec /dev/nbd0p1 /tmp/vdi/

#in the end unmount && shutdown the ndb
$ sudo umount /tmp/vdi/
$ sudo qemu-nbd -d /dev/nbd0
netsh routing ip nat add portmapping "<lan name>" tcp <caller ip> <listening port> <target ip> <target port>
#!/bin/sh

# get conda paths
export ACTIVATE_PATH=$CONDA_PREFIX/etc/conda/activate.d
export DEACTIVATE_PATH=$CONDA_PREFIX/etc/conda/deactivate.d
export ACTIVATE_SCRIPT=$ACTIVATE_PATH/env_vars.sh
export DEACTIVATE_SCRIPT=$DEACTIVATE_PATH/env_vars.sh

#delete existing activation and deactivation scripts
test -e $ACTIVATE_SCRIPT && rm $ACTIVATE_SCRIPT
test -e $DEACTIVATE_SCRIPT && rm $DEACTIVATE_SCRIPT

#create new activation script
mkdir -p $ACTIVATE_PATH
touch $ACTIVATE_SCRIPT
echo "#!/bin/sh" >> $ACTIVATE_SCRIPT
echo "export BELVO_SECRET_ID=\"$(op read "op://Personal/Belvo/add more/Secret ID")\"" >> $ACTIVATE_SCRIPT
echo "export BELVO_SECRET_PASSWORD=\"$(op read "op://Personal/Belvo/add more/Secret password")\"" >> $ACTIVATE_SCRIPT
echo "export CODA_API_KEY=\"$(op read "op://Personal/Coda/add more/automation")\"" >> $ACTIVATE_SCRIPT
echo "export GOOGLE_APPLICATION_CREDENTIALS=\"/Users/jmbenedetto/code/secrets/gcp_automation_service_account_key.json\"" >> $ACTIVATE_SCRIPT

#create deactivate script
mkdir -p $DEACTIVATE_PATH
touch $DEACTIVATE_SCRIPT
echo "#!/bin/sh" >> $DEACTIVATE_SCRIPT
echo "unset BELVO_SECRET_ID" >> $DEACTIVATE_SCRIPT
echo "unset BELVO_SECRET_PASSWORD" >> $DEACTIVATE_SCRIPT
echo "unset CODA_API_KEY" >> $DEACTIVATE_SCRIPT
echo "unset GOOGLE_APPLICATION_CREDENTIALS" >> $DEACTIVATE_SCRIPT
test -e ./file_path/file_name && echo 1 || echo 2
SINCE=`date --date '-2 weeks +2 days' +%F 2>/dev/null || date -v '-2w' -v '+2d' +%F`
bucket=<bucketname>
aws s3api list-objects-v2 --bucket "$bucket" \
    --query 'Contents[?LastModified > `'"$SINCE"'`]'
[root@mysql-in-servicecloud-consolidated-slave-1 ~]# cat /usr/local/scripts/check_slave_status.py
import commands
import os
import time

for x in range(0, 4):
        status = commands.getoutput("mysql --login-path=statuser -sN -e \"show slave status\"")
#        SLACK_URL="https://hooks.slack.com/services/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"
        SLACK_URL="https://hooks.slack.com/services/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"       

        for row in status.split("\n"):
                SERVER_NAME = "mysql-in-servicecloud-consolidated-slave-01"
                SLACK_MESSAGE = "<!channel> Problem in \`[Azure] "+SERVER_NAME+" \`: "
                Slave_IO_Running = row.split("\t")[10]
                Slave_SQL_Running = row.split("\t")[11]
                Seconds_Behind_Master = row.split("\t")[32]
                if Slave_IO_Running.find("No")!=-1 or Slave_SQL_Running.find("No")!=-1 or int(Seconds_Behind_Master)>5:
                        SLACK_MESSAGE = SLACK_MESSAGE + "\`Slave_SQL_Running: "+Slave_SQL_Running+"\`; \`Slave_IO_Running: "+Slave_IO_Running+"\`; \`Seconds_Behind_Master: "+Seconds_Behind_Master+"\`"
                        os.system("curl -X POST --data \"payload={\'text\':\'"+SLACK_MESSAGE+"\', \'username\':\'gcp-watchman\', \'icon_emoji\':\':bangbang:\'}\" "+SLACK_URL)
                os.system("curl -i -XPOST 'http://gcp-in-int-grafana.onedirect.in:8086/write?db=collectd' --data-binary 'mysql_slave_lag,slave_name='"+SERVER_NAME+"' value='"+Seconds_Behind_Master+"''")
                time.sleep(10);#!/usr/bin/python
#!/usr/bin/env bash

/usr/bin/docker exec -it $(docker ps -q --filter ancestor=200890773558.dkr.ecr.ap-southeast-2.amazonaws.com/vtrack/web) bash
mydate=`date +"%m/%d/%Y -%H:%M:%S"`
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
mytime=`date +%T`

USER=anirban
PW=Bose9711
filename="/var/lib/mysql-files/VFS_Ticket_data.csv"


rm -rf "/var/lib/mysql-files/VFS_Ticket_data.csv"
reportname="/tmp/VFS_Ticket_data_$current_time.csv"
mysql -u$USER -p$PW -e"call onedirect.get_export_to_excel_summary(8112,current_timestamp - interval 2 hour,current_timestamp)">/var/lib/mysql-files/VFS_Ticket_data.csv


mv /var/lib/mysql-files/VFS_Ticket_data.csv $reportname
echo " ****TRANSFER START**** "
echo $reportname
azcopy cp "$reportname" "https://prjdwuatadls.dfs.core.windows.net/vfsbiproject?sp=rwle&st=2022-08-05T05:47:28Z&se=2022-09-05T13:47:28Z&spr=https&sv=2021-06-08&sr=c&sig=GuyhDRcueFwQUdtL7%2FQ%2Bq5IdRFnd3QKpud1dusF%2Bu0E%3D"

echo " ****TRANSFER END**** "
mydate=`date +"%m/%d/%Y -%H:%M:%S"`
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
mytime=`date +%T`

USER=anirban
PW=Bose9711
filename="/var/lib/mysql-files/VFS_Ticket_data.csv"


rm -rf "/var/lib/mysql-files/VFS_Ticket_data.csv"
reportname="/tmp/VFS_Ticket_data_$current_time.csv"
mysql -u$USER -p$PW -e"call onedirect.get_export_to_excel_summary(8112,current_timestamp - interval 2 hour,current_timestamp)">/var/lib/mysql-files/VFS_Ticket_data.csv


mv /var/lib/mysql-files/VFS_Ticket_data.csv $reportname
echo " ****TRANSFER START**** "
echo $reportname
azcopy cp "$reportname" "https://prjdwuatadls.dfs.core.windows.net/vfsbiproject?sp=rwle&st=2022-08-05T05:47:28Z&se=2022-09-05T13:47:28Z&spr=https&sv=2021-06-08&sr=c&sig=GuyhDRcueFwQUdtL7%2FQ%2Bq5IdRFnd3QKpud1dusF%2Bu0E%3D"

echo " ****TRANSFER END**** "
grep -r <pattern> "dir/*/dir/dir/file"

or

grep -r <pattern> "*/dir/dir"

or

// generic
grep -r <pattern> *
//#########################################################################################//
/* -------------------------------------------------------------------

Name : Anon_Resampling

----------------------------------------------------------------------
Original Rule :	Replace with other values from the same domain:
1 - Table Name
2 - Field Name

-------------------------------------------------------------------*/

SUB Anon_Resampling (P_TABLENAME , P_FIELDNAME)


TRACE ##################################################;
TRACE ## Starting Function : Anon_Resampling  ##;
TRACE ## Anonymizing Field : $(P_FIELDNAME) #;
TRACE ##################################################;

//---------------------------------------//

[DistinctValues]:
Load Distinct 
[$(P_FIELDNAME)] as [OldDistinctValue],
RowNo() as [RowID],
Rand() as [Random]
Resident $(P_TABLENAME);

[AnonDistinctMapping]:
Mapping
Load
RowNo(),
[OldDistinctValue];
Load
[OldDistinctValue],
[Random]
Resident [DistinctValues]
Order By [Random];

[AnonDistinctValues]:
LOAD
*,
ApplyMap('AnonDistinctMapping',RowID,'Anon_Error') as [NewDistinctValue]
Resident DistinctValues;

Drop table DistinctValues;

[AnonMapping]:
Mapping
Load
[OldDistinctValue],
[NewDistinctValue]
Resident [AnonDistinctValues];

Drop table AnonDistinctValues;

[AnonValues]:
LOAD
*,
ApplyMap('AnonMapping',[$(P_FIELDNAME)],'Anon_Error') as [Anon_$(P_FIELDNAME)]
Resident $(P_TABLENAME);

Drop table $(P_TABLENAME);

Rename table AnonValues to $(P_TABLENAME);


END SUB

//#########################################################################################//
import pandas as pd
from codaio import Coda, Document, Cell

doc=Document.from_environment('XWykP50uN-')
transactions_table=doc.get_table('grid-bsHZ_AO1l5')

df_new=pd.DataFrame([
    {'Name':'Ricardo','transaction_id':'12dgt'},
    {'Name':'Manoel','transaction_id':'fklsod'},
])
df_new

mapping_dict={
    'Name':'Name',
    'transaction_id':'transaction_id'
}
all_data=[]
for i in range(len(df_new)):
    row_data=[]
    for j in range(len(df_new.columns)):
        row_data.append(Cell(column=mapping_dict[df_new.columns[j]],value_storage=df_new.iloc[i,j]))
    all_data.append(row_data)
transactions_table.upsert_rows(all_data)
#!/bin/sh

#create activate script
export ACTIVATE_PATH=$CONDA_PREFIX/etc/conda/activate.d
mkdir -p $ACTIVATE_PATH
touch $ACTIVATE_PATH/env_vars.sh
echo "#!/bin/sh" >> $ACTIVATE_PATH/env_vars.sh
echo "export VAR_NAME=\"VAR_VALUE\"" >> $ACTIVATE_PATH/env_vars.sh

#create deactivate script
export DEACTIVATE_PATH=$CONDA_PREFIX/etc/conda/deactivate.d
mkdir -p $DEACTIVATE_PATH
touch $DEACTIVATE_PATH/env_vars.sh
echo "#!/bin/sh" >> $DEACTIVATE_PATH/env_vars.sh
echo "unset VAR_NAME" >> $DEACTIVATE_PATH/env_vars.sh
Let vSource='lib://LoB demos:DataFiles/';
Let vDestination= 'lib://LoB demos:DataFiles/';

let vStoreType='qvd';

[Parameters]:
LOAD * INLINE [
    original_file_name, target_file_name
    Employee_Master, A001_Employee Master
	Employee All Regions, A001_Employee All Regions
	Employee Retention Predictions_v3, A001_Employee Retention Predictions
];

FOR i = 0 TO NoOfRows('Parameters') - 1
LET vOriginalFileName = peek('original_file_name', $(i), 'Parameters');
LET vTargetFileName = peek('target_file_name', $(i), 'Parameters');


[$(vOriginalFileName)]: LOAD * from [$(vSource)$(vOriginalFileName)] ($(vStoreTypeSourceFile));
STORE [$(vOriginalFileName)] INTO [$(vDestination)$(vTargetFileName)] ($(vStoreTypeDestinationFile));
DROP TABLE [$(vOriginalFileName)];

NEXT i

exit Script
branchName=$(git branch --show-current)
baseURL="https://$branchName-bmc-org.pantheonsite.io/"
npx percy exec -- cypress run --config baseUrl=$baseURL
Let vSource='lib://LoB demos:DataFiles/';
Let vDestination= 'lib://LoB demos:DataFiles/';

let vExtensionSourceFile='.csv';
let vExtensionDestinationFile='.qvd';
let vStoreTypeSourceFile='txt';
let vStoreTypeDestinationFile='qvd';

[Parameters]:
LOAD * INLINE [
    original_file_name, target_file_name
    Employee_Master, A001_Employee Master
	Employee All Regions, A001_Employee All Regions
	Employee Retention Predictions_v3, A001_Employee Retention Predictions


];

FOR i = 0 TO NoOfRows('Parameters') - 1
LET vOriginalFileName = peek('original_file_name', $(i), 'Parameters');
LET vTargetFileName = peek('target_file_name', $(i), 'Parameters');


[$(vOriginalFileName)]: LOAD * from [$(vSource)$(vOriginalFileName)$(vExtensionSourceFile)] ($(vStoreTypeSourceFile));
STORE [$(vOriginalFileName)] INTO [$(vDestination)$(vTargetFileName)$(vExtensionDestinationFile)] ($(vStoreTypeDestinationFile));
DROP TABLE [$(vOriginalFileName)];

NEXT i

exit Script
// parameters is the table to iterate over
// it contains 
FOR i = 0 TO NoOfRows('Table_Name') - 1
// save field1 value into vField
LET vField = peek('Field1', $(i), 'Table_Name');
NEXT i

[Parameters]:
LOAD * INLINE [
    Table_name, File_name
    Countries, AR_Countries V1
    Invoice Item Detail,AR_Invoice Item Detail V1
    Product Lines, AR_Product Lines V1
    Invoices, AR_Invoices V1        
    Items,AR_Items V1
    Comments,AR_Comments V1
    DSO,AR_DSO V1
    Link Table,AR_Link Table V1
    Subsidiaries,AR_Subsidiaries V1
    ExchangeRates,AR_ExchangeRates V1
    Accountants,AR_Accountants V1
];
conda env remove --name corrupted_env
#!/usr/bin/expect
set timeout 60
spawn ssh [lindex $argv 1]@[lindex $argv 0]
expect "*?assword" {
    send "[lindex $argv 2]\r"
    }
expect ":~$ " {
    send "
        mkdir -p /home/tools/baeldung/auto-test;
        cd /home/tools/baeldung/auto-test;
        tree
        sshpass -p 'Baels@123' scp -r tools@10.45.67.11:/home/tools/cw/baeldung/get_host_info.sh ./;
        tree
        bash get_host_info.sh\r"
    }
expect ":~$ " {
    send "exit\r"
    }
expect eof
# sudo loop
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
## starts with ## for comments
## starts with # for commented packages
## optional inline comment after package name with #

## resource monitors
dstat
iotop
sysstat # includes iostat
htop
ncdu
s-tui
ranger
## ranger dep https://github.com/ranger/ranger
python3-chardet
caca-utils
imagemagick
ffmpegthumbnailer
bat
atool
## atool depends on these already
# unrar
# p7zip-full
# unzip
lynx
w3m
elinks
poppler-utils
mupdf-tools
calibre
transmission-cli
mediainfo
libimage-exiftool-perl
odt2txt
jq
fontforge-nox
glances
## glances dep https://github.com/nicolargo/glances
python3-psutil
python3-bottle
hddtemp
python3-netifaces
python3-cpuinfo
python3-pysnmp4
python3-pystache
python3-zeroconf

## system
caffeine
gnome-shell-extensions
gnome-tweak-tool

## shell
neofetch
exa
openssh-server
mosh
tmux
tree
xsel
zsh
curl
git
hub # github

## command line utils
opencc
texlive # https://tex.stackexchange.com/a/504566/73420
lilypond
gitit
graphviz

## filesystem
cifs-utils
samba
# sshfs
zfsutils-linux
cryptsetup # for manually unlock full disk encrypted drives

## programming
cmake
mpich
parallel

## font
fonts-cwtex-kai
fonts-linuxlibertine

## hardware
gsmartcontrol
idle3-tools # WD Green HDD config
lm-sensors
psensor
smartmontools
vainfo # video acceleration
acpi
f3
fancontrol
hardinfo
input-utils # for lsinput

## GUI
keepassxc
chromium-browser
# google-chrome-stable # in pop OS's repo. May need more steps on Ubuntu: https://linuxhint.com/install_google_chrome_ubuntu_ppa/
kitty

## Video
ffmpeg
libbluray-bdj
kodi
vlc
mkvtoolnix # mkvinfo
mpv

## network
nmap
iperf3
wakeonlan
ifenslave
ethtool

## for sanoid
debhelper
libcapture-tiny-perl
libconfig-inifiles-perl
pv
lzop
mbuffer
{{ if ((.Values.foo).bar) }}
{{ .Values.foo.bar }}
{{ end }}
##install packages 
#step 1: see the version required clearly
#step 2: go to git hub and use: curl -Lo [name of the package] [link]
#step3: move the file to /usr/local/bin
#step4: check permission of the file using ls -al [file]

curl -Lo "deno.zip" "https://github.com/denoland/deno/releases/latest/download/deno-x86_64-unknown-linux-gnu.zip"

# Make file immutable
chattr +i filename

# Make file mutable
chattr -i filename
yarn add @babel/plugin-transform-exponentiation-operator --dev
npm install react-icons --save
function hex() {
    printf "%%%02x\n" "'$1"
}

hex -   # Outputs %2d
hex _   # Outputs %5f
hex .   # Outputs %2e
#!/bin/bash
if hash ntpdate 2>/dev/null; then
    ntpdate pool.ntp.org
else
    echo "'ntpdate' is not installed. Aborting..."; exit 1
fi
#!/bin/sh
set -e
 
echo "Deploying application ..."
 
# Enter maintenance mode
(php artisan down --message 'The app is being (quickly!) updated. Please try again in a minute.') || true
    # Update codebase
    git fetch origin deploy
    git reset --hard origin/deploy
 
    # Install dependencies based on lock file
    composer install --no-interaction --prefer-dist --optimize-autoloader
 
    # Migrate database
    php artisan migrate --force
 
    # Note: If you're using queue workers, this is the place to restart them.
    # ...
 
    # Clear cache
    php artisan optimize
 
    # Reload PHP to update opcache
    echo "" | sudo -S service php7.4-fpm reload
# Exit maintenance mode
php artisan up
 
echo "Application deployed!"
npx cap open ios #abrir o projecto no xcode

npx cap open android #abrir o projecto no android
<dict>
+  <key>NSCameraUsageDescription</key>
+  <string>To be able to scan barcodes</string>
</dict>
<?xml version="1.0" encoding="utf-8"?>
<manifest
  xmlns:android="http://schemas.android.com/apk/res/android"
+  xmlns:tools="http://schemas.android.com/tools" <-- adicionas esta linha não removendo nada e seguindo esta lógica

  package="com.example">

  <application
+    android:hardwareAccelerated="true" <-- adicionas esta linha não removendo nada e seguindo esta lógica
  >
  </application>

+  <uses-permission android:name="android.permission.CAMERA" /><-- adicionas esta linha não removendo nada e seguindo esta lógica

+  <uses-sdk tools:overrideLibrary="com.google.zxing.client.android" /><-- adicionas esta linha não removendo nada e seguindo esta lógica
</manifest>
ionic build --prod

#caso quiseres colocar a app para android dás o seguinte comando:
npm install @capacitor/android
npx cap add android

#caso quiseres colocar a app para iOS dás o seguinte comando:
npm install @capacitor/ios
npx cap add ios


#no final dão estes dois ultimos comandos

npx cap sync
npx cap copy Android ou ios #dependendo do qual escolheram
...

<ion-content class="scanner-hide" *ngIf="scanStatus == false">
  <div class="padding-container center">
    <ion-button color="primary" (click)="scanCode()"><ion-icon slot="start" name="qr-code-outline"></ion-icon> Scanear Código</ion-button> <!-- Botão que chama a função do scanner-->
  </div>
  <ion-card>
    <ion-card-content><h1>{{ result }}</h1></ion-card-content> <!-- mostra o resultado do scan -->
  </ion-card>
  
  <div class="scanner-ui"> <!-- Quando estamos a scanear, chama esta classe-->
    ...Scanner Interface
    </div>
    <div class="ad-spot"></div>
</ion-content>
...
import { BarcodeScanner } from '@capacitor-community/barcode-scanner';



...



export class HomePage {
  public scanStatus:boolean = false; // no inicio da página, coloca o estado do código qr para falso
  public result:any;

  constructor() {}


  async scanCode () {

    this.setPermissions(); /* chama as permissões à camera */
  
    BarcodeScanner.hideBackground(); // coloca o fundo transparente
    this.scanStatus = true; // ao mudar esta variável para true, estamos a puxar o qr code scanner 
    document.body.classList.add("qrscanner"); // adiciona a classe css que fizemos no global
    const result = await BarcodeScanner.startScan(); // começa a fazer scan e espera por um result
  
  // se o qr scanner detectou algum número, então ele faz o código abaixo
    if (result.hasContent) {

        
        this.scanStatus = false; //como é obvio, ele tem de desligar o scanner ao obter o resultado
        BarcodeScanner.stopScan(); //para o scan
        this.result = result.content; // passa o resultado para a variável global result
        BarcodeScanner.showBackground(); //volta a mostrar o fundo
        document.body.classList.remove("qrscanner"); //remove a classe css que criamos no global
    
    }
  }

  async setPermissions(){
    const status = await BarcodeScanner.checkPermission({ force: true }); /* força a permissão para true, caso o utilizador não aceite, o scanner não funciona */
    if (status.granted) {
      // the user granted permission
      return true; // se o utilizador aceitou as permissões retorna true
    }
  
      return false; // se o utilizador não aceitou retorna false
  }
}
.scanner-ui { display: none; }
.scanner-hide { visibility: visible; }

body.qrscanner { background-color: transparent; }
body.qrscanner .scanner-ui { display: block; }
body.qrscanner .scanner-hide { visibility: hidden; }
ionic start qrcode blank --type=ionic-angular
#variáveis

nome = "Meu Nome" #Sempre que colocas o valor entre "" significa que a variável é do tipo string
#variável do tipo string é um tipo de variável que usa texto como base

idade = "28" #variável do tipo string

x = 2
y = 5
#Sempre que defines um valor numérico sem "" significa que a variável passa a ser do tipo int
#variáveis do tipo int, só aceitam valores numéricos inteiros, não podendo colocar texto à mistura



#exemplos do que não podes fazer
!var@ = 1 
#não podes, aliás não consegues usar pontuação na definição das variáveis

total = x + idade 
#não podes juntar diferentes tipos de variáveis numa só
#ou seja não consegues somar a idade "28" que está no formato string, com uma do formato int
#que é específicamente um número, é como se aquele 28 fosse escrito assim "vinte e oito"





#resultados retornados
print(x+y) #vai retornar na tela o valor 7
- mkdir work_dir_company
- nano work_dir_company/.gitignore_company

```
[user]
        email = user@mail.com
        name = userName
[core]
        sshCommand = ssh -i ~/.ssh/id_ed25519_company
```

- nano ~/.gitconfig

```
[includeIf "gitdir:~/work_dir_company/"]
	path = ~/work_dir_company/.gitignore_company
[user]
        email = user@mail.com
        name = userName
[core]
        sshCommand = ssh -i ~/.ssh/id_ed25519_company
```

- Verify with `git config --list`
Settings | Tools | Python Integrated Tools | Docstring format
echo "$USER ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/dont-prompt-$USER-for-sudo-password"
#!/bin/bash

set -e

if [ -d ~/.local/share/JetBrains/Toolbox ]; then
    echo "JetBrains Toolbox is already installed!"
    exit 0
fi

echo "Start installation..."

wget --show-progress -qO ./toolbox.tar.gz "https://data.services.jetbrains.com/products/download?platform=linux&code=TBA"

TOOLBOX_TEMP_DIR=$(mktemp -d)

tar -C "$TOOLBOX_TEMP_DIR" -xf toolbox.tar.gz
rm ./toolbox.tar.gz

"$TOOLBOX_TEMP_DIR"/*/jetbrains-toolbox

rm -r "$TOOLBOX_TEMP_DIR"

echo "JetBrains Toolbox was successfully installed!"
# Reset
Color_Off='\033[0m'       # Text Reset
 
# Regular Colors
Black='\033[0;30m'        # Black
Red='\033[0;31m'          # Red
Green='\033[0;32m'        # Green
Yellow='\033[0;33m'       # Yellow
Blue='\033[0;34m'         # Blue
Purple='\033[0;35m'       # Purple
Cyan='\033[0;36m'         # Cyan
White='\033[0;37m'        # White
 
# Bold
BBlack='\033[1;30m'       # Black
BRed='\033[1;31m'         # Red
BGreen='\033[1;32m'       # Green
BYellow='\033[1;33m'      # Yellow
BBlue='\033[1;34m'        # Blue
BPurple='\033[1;35m'      # Purple
BCyan='\033[1;36m'        # Cyan
BWhite='\033[1;37m'       # White
 
# Underline
UBlack='\033[4;30m'       # Black
URed='\033[4;31m'         # Red
UGreen='\033[4;32m'       # Green
UYellow='\033[4;33m'      # Yellow
UBlue='\033[4;34m'        # Blue
UPurple='\033[4;35m'      # Purple
UCyan='\033[4;36m'        # Cyan
UWhite='\033[4;37m'       # White
 
# Background
On_Black='\033[40m'       # Black
On_Red='\033[41m'         # Red
On_Green='\033[42m'       # Green
On_Yellow='\033[43m'      # Yellow
On_Blue='\033[44m'        # Blue
On_Purple='\033[45m'      # Purple
On_Cyan='\033[46m'        # Cyan
On_White='\033[47m'       # White
 
# High Intensity
IBlack='\033[0;90m'       # Black
IRed='\033[0;91m'         # Red
IGreen='\033[0;92m'       # Green
IYellow='\033[0;93m'      # Yellow
IBlue='\033[0;94m'        # Blue
IPurple='\033[0;95m'      # Purple
ICyan='\033[0;96m'        # Cyan
IWhite='\033[0;97m'       # White
 
# Bold High Intensity
BIBlack='\033[1;90m'      # Black
BIRed='\033[1;91m'        # Red
BIGreen='\033[1;92m'      # Green
BIYellow='\033[1;93m'     # Yellow
BIBlue='\033[1;94m'       # Blue
BIPurple='\033[1;95m'     # Purple
BICyan='\033[1;96m'       # Cyan
BIWhite='\033[1;97m'      # White
 
# High Intensity backgrounds
On_IBlack='\033[0;100m'   # Black
On_IRed='\033[0;101m'     # Red
On_IGreen='\033[0;102m'   # Green
On_IYellow='\033[0;103m'  # Yellow
On_IBlue='\033[0;104m'    # Blue
On_IPurple='\033[0;105m'  # Purple
On_ICyan='\033[0;106m'    # Cyan
On_IWhite='\033[0;107m'   # White
sudo apt install rename
rename 's/$/\.parquet/' *
# Reset
Color_Off='\033[0m'       # Text Reset

# Regular Colors
Black='\033[0;30m'        # Black
Red='\033[0;31m'          # Red
Green='\033[0;32m'        # Green
Yellow='\033[0;33m'       # Yellow
Blue='\033[0;34m'         # Blue
Purple='\033[0;35m'       # Purple
Cyan='\033[0;36m'         # Cyan
White='\033[0;37m'        # White

# Bold
BBlack='\033[1;30m'       # Black
BRed='\033[1;31m'         # Red
BGreen='\033[1;32m'       # Green
BYellow='\033[1;33m'      # Yellow
BBlue='\033[1;34m'        # Blue
BPurple='\033[1;35m'      # Purple
BCyan='\033[1;36m'        # Cyan
BWhite='\033[1;37m'       # White

# Underline
UBlack='\033[4;30m'       # Black
URed='\033[4;31m'         # Red
UGreen='\033[4;32m'       # Green
UYellow='\033[4;33m'      # Yellow
UBlue='\033[4;34m'        # Blue
UPurple='\033[4;35m'      # Purple
UCyan='\033[4;36m'        # Cyan
UWhite='\033[4;37m'       # White

# Background
On_Black='\033[40m'       # Black
On_Red='\033[41m'         # Red
On_Green='\033[42m'       # Green
On_Yellow='\033[43m'      # Yellow
On_Blue='\033[44m'        # Blue
On_Purple='\033[45m'      # Purple
On_Cyan='\033[46m'        # Cyan
On_White='\033[47m'       # White

# High Intensity
IBlack='\033[0;90m'       # Black
IRed='\033[0;91m'         # Red
IGreen='\033[0;92m'       # Green
IYellow='\033[0;93m'      # Yellow
IBlue='\033[0;94m'        # Blue
IPurple='\033[0;95m'      # Purple
ICyan='\033[0;96m'        # Cyan
IWhite='\033[0;97m'       # White

# Bold High Intensity
BIBlack='\033[1;90m'      # Black
BIRed='\033[1;91m'        # Red
BIGreen='\033[1;92m'      # Green
BIYellow='\033[1;93m'     # Yellow
BIBlue='\033[1;94m'       # Blue
BIPurple='\033[1;95m'     # Purple
BICyan='\033[1;96m'       # Cyan
BIWhite='\033[1;97m'      # White

# High Intensity backgrounds
On_IBlack='\033[0;100m'   # Black
On_IRed='\033[0;101m'     # Red
On_IGreen='\033[0;102m'   # Green
On_IYellow='\033[0;103m'  # Yellow
On_IBlue='\033[0;104m'    # Blue
On_IPurple='\033[0;105m'  # Purple
On_ICyan='\033[0;106m'    # Cyan
On_IWhite='\033[0;107m'   # White
#We will activate the service, to activate synchronization between the computer and the servers on the internet:

sudo systemctl enable systemd-timesyncd.service
# We start the service :
timedatectl set-ntp true
a=(a b c); x=`echo ${!a[@]}` ;echo ${x: -1} # ArrayMaxIdxNo
Press CTRL + V to enable Visual Mode.

Using the up and down arrow key, highlight the lines you wish to comment out.

Once you have the lines selected, press the SHIFT + I keys to enter insert mode.

Enter your command symbol, for example, # sign, and press the ESC key. Vim will comment out all the highlighted lines.
sudo ufw allow 8000
sudo ufw delete allow 8000
sudo ufw allow 'Nginx Full'


sudo ufw status #kontrol
zsh -xl
zsh -xl | tee output.file
grep -r "VARIABLE" /* [directories to search] | tee ~/[output file and path] 
timezsh() {
  shell=${1-$SHELL}
  for i in $(seq 1 10); do /usr/bin/time $shell -i -c exit; done
}
$ mkdir pytest_project
$ cd pytest_project
$ python3 -m venv pytest-env
# Download
# Create a folder
$ mkdir actions-runner && cd actions-runner
# Download the latest runner package
$ curl -o actions-runner-linux-x64-2.291.1.tar.gz -L https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-linux-x64-2.291.1.tar.gz# Optional: Validate the hash
$ echo "1bde3f2baf514adda5f8cf2ce531edd2f6be52ed84b9b6733bf43006d36dcd4c  actions-runner-linux-x64-2.291.1.tar.gz" | shasum -a 256 -c
# Extract the installer
$ tar xzf ./actions-runner-linux-x64-2.291.1.tar.gz
# Configure
# Create the runner and start the configuration experience
$ ./config.sh --url https://github.com/TousssaintThomas/wren.v1.0.0 --token AB7YEM2R2HZDVVBJ3VEFFLLCSI5U6# Last step, run it!
$ ./run.sh
# Using your self-hosted runner
# Use this YAML in your workflow file for each job
# runs-on: self-hosted
kubectl get pods <-n namespace> <--all-namespace> -o jsonpath="{.items[*].spec.containers[*].name}" |tr -s '[[:space:]]' '\n' |sort |wc -l

kubectl get pods <-n namespace> <--all-namespace> -o jsonpath="{.items[*].spec.initContainers[*].name}" |tr -s '[[:space:]]' '\n' |sort |wc -l
while IFS= read -r line; do
    echo "Text read from file: $line"
done < my_filename.txt
curl -v -X GET https://api-m.sandbox.paypal.com/v1/catalogs/products?page_size=2&page=1&total_required=true \
-H "Content-Type: application/json" \
-H "Authorization: Bearer Access-Token"
curl -v -X GET https://api-m.sandbox.paypal.com/v1/catalogs/products?page_size=2&page=1&total_required=true \
-H "Content-Type: application/json" \
-H "Authorization: Bearer Access-Token"
npx wrangler init my-worker
# try it out

cd my-worker && npx wrangler dev
# and then publish it
npx wrangler publish
# visit https://my-worker.<your workers subdomain>.workers.dev
$ npm uninstall -g @cloudflare/wrangler
npx @11ty/eleventy
npx wrangler init my-worker
# try it out

cd my-worker && npx wrangler dev
# and then publish it
npx wrangler publish
# visit https://my-worker.<your workers subdomain>.workers.dev
sudo apt update; sudo apt upgrade -y; sudo apt autoremove -y; clear;
npx wrangler init my-worker
# try it out

cd my-worker && npx wrangler dev
# and then publish it
npx wrangler publish
# visit https://my-worker.<your workers subdomain>.workers.dev
#!/bin/bash
find $1 -type f -exec stat --format '%Y :%y %n' "{}" \; | sort -nr | cut -d: -f2- | head
# in source repo

git checkout -b <new branch> <source branch>

git filter-branch --subdirectory-filter <sub-directory path> -- -- all

git remote add <remote name> <remote URL>
  
git fetch <remote name>
  
git push <-U> <new remote> <new branch>
  
# or merge unrelated history and then push

git merge <new remote> --allow-unrelated-histories
sudo to root
yum install tcpdump
tcpdump -s1500 -ieth0 -vv 'host <IPaddr>' 
i=$((i+1))

((i=i+1))

let "i=i+1"
echo ${SEMVER} | sed 's/\..*//'
if git show-ref --quiet <branch_name>; then
	echo branch exists
fi
// delete branch locally
git branch -d localBranchName

// delete branch remotely
git push origin --delete remoteBranchName
az role definition list --query "sort_by([].{Name:roleName,Id:name}, &Name)" --output table
# Raspberry Pi Tips & Tricks - https://raspberrytips.nl

import Adafruit_DHT

humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4)

humidity = round(humidity, 2)
temperature = round(temperature, 2)

if humidity is not None and temperature is not None:

  print 'Temperatuur: {0:0.1f}*C'.format(temperature)
  print 'Luchtvochtigheid: {0:0.1f}%'.format(humidity)

else:

  print 'Geen data ontvangen'
#!/bin/sh

curl -s https://status.slack.com/api/v2.0.0/current | \
  jq -r '"Status: " + (if (.status == "active") then "Active Incident" else "Ok" end),"Last Updated: " + .date_updated,if (.active_incidents[] | length) > 0 then "Active Incidents\n" + .active_incidents[] .title else "" end'
gcloud builds submit --pack image=us-central1-docker.pkg.dev/analytics-dev-308300/functions/talentcard-reports-to-landing-zone,env=GOOGLE_FUNCTION_TARGET=start
#!/bin/bash

set -eu -o pipefail # fail on error and report it, debug all lines

sudo -n true
test $? -eq 0 || exit 1 "you should have sudo privilege to run this script"

echo installing the must-have pre-requisites
while read -r p ; do sudo apt-get install -y $p ; done < <(cat << "EOF"
    perl
    zip unzip
    exuberant-ctags
    mutt
    libxml-atom-perl
    postgresql-9.6
    libdbd-pgsql
    curl
    wget
    libwww-curl-perl
EOF
)

echo installing the nice-to-have pre-requisites
echo you have 5 seconds to proceed ...
echo or
echo hit Ctrl+C to quit
echo -e "\n"
sleep 6

sudo apt-get install -y tig
ssh -i mykeypair_openssh.ppk <user>@<host ip>
  
# with port
ssh -i mykeypair_openssh.ppk <user>@<host ip> -P 50055
puttygen ~/.ssh/my.ppk -O private-openssh -o ~/.ssh/my_openssh.ppk
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"

mkdir /mnt
mount /dev/vda /mnt
chroot /mnt

touch /etc/cloud/cloud-init.disabled

echo 'root:root' | chpasswd

ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa
ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa
ssh-keygen -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519

cat <<EOF > /etc/netplan/01-dhcp.yaml 
network:
    version: 2
    ethernets:
        enp0s1:
            dhcp4: true
            addresses: [192.168.64.2/24]
            nameservers:
                addresses: [8.8.8.8, 8.8.4.4]    
EOF

exit
umount /dev/vda
curl -o initrd https://cloud-images.ubuntu.com/focal/current/unpacked/focal-server-cloudimg-arm64-initrd-generic
curl -o kernel.gz https://cloud-images.ubuntu.com/focal/current/unpacked/focal-server-cloudimg-arm64-vmlinuz-generic
gunzip kernel.gz
curl -o disk.tar.gz https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-arm64.tar.gz
youtube-dl --merge-output-format mp4 --postprocessor-args "-strict experimental" -f "bestvideo+bestaudio/best" --embed-thumbnail --add-metadata [url]
if [[ $var ]]; then   # var is set and it is not empty
if [[ ! $var ]]; then # var is not set or it is set to an empty string
wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh
bash Miniconda3-py39_4.11.0-Linux-x86_64.sh
conda install mamba -n base -c conda-forge
from flask import Flask, jsonify, request

from cashman.model.expense import Expense, ExpenseSchema
from cashman.model.income import Income, IncomeSchema
from cashman.model.transaction_type import TransactionType

app = Flask(__name__)

transactions = [
  Income('Salary', 5000),
  Income('Dividends', 200),
  Expense('pizza', 50),
  Expense('Rock Concert', 100)
]


@app.route('/incomes')
def get_incomes():
  schema = IncomeSchema(many=True)
  incomes = schema.dump(
    filter(lambda t: t.type == TransactionType.INCOME, transactions)
  )
  return jsonify(incomes.data)


@app.route('/incomes', methods=['POST'])
def add_income():
  income = IncomeSchema().load(request.get_json())
  transactions.append(income.data)
  return "", 204


@app.route('/expenses')
def get_expenses():
  schema = ExpenseSchema(many=True)
  expenses = schema.dump(
      filter(lambda t: t.type == TransactionType.EXPENSE, transactions)
  )
  return jsonify(expenses.data)


@app.route('/expenses', methods=['POST'])
def add_expense():
  expense = ExpenseSchema().load(request.get_json())
  transactions.append(expense.data)
  return "", 204


if __name__ == "__main__":
    app.run()
# start the cashman application
./bootstrap.sh &

# get incomes
curl http://localhost:5000/incomes

# add new income
curl -X POST -H "Content-Type: application/json" -d '{
  "description": "lottery",
  "amount": 1000.0
}' http://localhost:5000/incomes

# check if lottery was added
curl localhost:5000/incomes
#!/bin/sh
export FLASK_APP=./cashman/index.py
source $(pipenv --venv)/bin/activate
flask run -h 0.0.0.0
https://maltego-downloads.s3.us-east-2.amazonaws.com/linux/Maltego.v4.3.0.linux.zip
82FCD54A-74bf-2CE93-41d6-A4389de1bd2
npx tsc --init --rootDir src --outDir build \
--esModuleInterop --resolveJsonModule --lib es6 \
--module commonjs --allowJs true --noImplicitAny true
git remote set-url origin <new-url>
git push --set-upstream origin <branch-name>
pip freeze > requirements.txt
adduser USER_NAME
usermod -aG sudo USER_NAME

# Verify new user
grep '^sudo' /etc/group
npx create-html5-boilerplate new-site
#!/usr/bin/env bash

# install ZSH
sudo apt -y install zsh

# Install oh-my-zsh
git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh

# Install some external plugins:
git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugings/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-completions ~/.oh-my-zsh/custom/plugins/zsh-completions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting

# Set Zsh as your default shell:
chsh -s /bin/zsh
#!/bin/bash

curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh

source $HOME/.cargo/env
#!/usr/bin/env bash

# Update the list of packages
sudo apt-get update

# Install pre-requisite packages.
sudo apt-get install -y wget apt-transport-https software-properties-common

# Download the Microsoft repository GPG keys
wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb

# Register the Microsoft repository GPG keys
sudo dpkg -i packages-microsoft-prod.deb

# Update the list of packages after we added packages.microsoft.com
sudo apt-get update

# Install PowerShell
sudo apt-get install -y powershell

# Start PowerShell
pwsh
© 2022 GitHub, Inc.
#!/usr/bin/env bash

sudo apt-get -y update

# set config
XDG_CONFIG_HOME = ~/.config

# install nvm
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash

export NVM_DIR="$XDG_CONFIG_HOME/nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"  # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"  # This loads nvm bash_completion

# config
nvm alias default node # Always default to the latest available node version on a shell
nvm set-colors "yMeBg"

# install latest node and npm
nvm install node --latest-npm
nvm install-latest-npm

nvm use node
npm install -g npm

# setup npm
npm login
npm install -g eslint jshint prettier yarn npm-check doctoc tldr speedtest-cli serve
#!/bin/bash

wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin
sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget https://developer.download.nvidia.com/compute/cuda/11.5.1/local_installers/cuda-repo-wsl-ubuntu-11-5-local_11.5.1-1_amd64.deb
sudo dpkg -i cuda-repo-wsl-ubuntu-11-5-local_11.5.1-1_amd64.deb
sudo apt-key add /var/cuda-repo-wsl-ubuntu-11-5-local/7fa2af80.pub
sudo apt-get -y update 
sudo apt-get -y install cuda
#!/bin/bash

curl -fsSL https://deb.nodesource.com/setup_17.x | sudo -E bash -
sudo apt-get install -y nodejs
#!/usr/bin/env sh

sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y autoremove
sudo apt-get -y install neovim
sudo apt-get install python-neovim python3-neovim

mkdir -p ~/.dotfiles/neovim/.config/nvim
touch ~/.dotfiles/neovim/.config/nvim/init.vim

echo 'set runtimepath^=/.vim runtimepath+=~./vim/after\nlet &packpath = &runtimepath\nsource ~/.vimrc' >> ~/.dotfiles/neovim/.config/nvim/init.vim
cd .dotfiles
stow neovim
cd ~
!#/usr/bin/env bash

# update and ensure build-essentials/git
sudo apt update
sudo apt-get install build-essential curl file git

# install homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"

# add to PATH
echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"' >> /home/jimbrig/.profile
eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
source ~/.profile

# test
test -d /home/linuxbrew/.linuxbrew && eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
test -r ~/.bash_profile && echo "eval \$($(brew --prefix)/bin/brew shellenv)" >>~/.bash_profile
echo "eval \$($(brew --prefix)/bin/brew shellenv)" >>~/.profile

brew doctor

# gcc installation
brew install gcc

# initial installations
brew install topgrade git-crypt git-cliff
#!/bin/bash

wget https://release.gitkraken.com/linux/gitkraken-amd64.deb
sudo dpkg -i ./gitkraken-amd64.deb
sudo apt-get install -f
gitkraken
#!/usr/bin/env bash

# install github-cli
VERSION=`curl  "https://api.github.com/repos/cli/cli/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/' | cut -c2-`
echo $VERSION
mkdir ~/downloads
curl -sSL https://github.com/cli/cli/releases/download/v${VERSION}/gh_${VERSION}_linux_amd64.tar.gz -o ~/downloads/gh_${VERSION}_linux_amd64.tar.gz
cd ~/downloads
tar xvf gh_${VERSION}_linux_amd64.tar.gz
sudo cp gh_${VERSION}_linux_amd64/bin/gh /usr/local/bin/
gh version
sudo cp -r ~/downloads/gh_${VERSION}_linux_amd64/share/man/man1/* /usr/share/man/man1/
# man gh
gh auth login

rm -r ~/downloads
#!/usr/bin/env bash

# install cargo
sudo apt-get update -y
sudo apt-get install -y cargo

echo '\n# Add .cargo to $PATH\nexport PATH="~/.cargo/bin:$PATH"\n' >> ~/.zshrc

cargo install cargo-update
#!/usr/bin/env bash

curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az --version
az login
#!/usr/bin/env bash

# Install R on WSL
sudo apt-get update -qq -y
sudo apt-get install -y wget git
OS_DISTRIBUTION=$(lsb_release -cs)
wget -O- http://neuro.debian.net/lists/${OS_DISTRIBUTION}.us-nh.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
sudo apt-key adv --recv-keys --keyserver hkp://pool.sks-keyservers.net:80 0xA5D32F012649A5A9
sudo apt-get update

sudo apt-get install libopenblas-base r-base
sudo apt-get update -qq -y
sudo apt-get install -y libgit2-dev
sudo apt-get install -y libcurl4-openssl-dev libssl-dev
sudo apt-get install -y zlib1g-dev libssh2-1-dev libpq-dev libxml2-dev
curl -X POST -H "Content-Type: application/json" -H "x-api-key: YOUR_API_KEY" -d "{ 'name': 'Homer Simpson', 'email': 'homer@gmail.com', 'customer_id': '1234' }" https://api.trysend.com/customers
dmesg | grep "Intel Corporation" -A 1 | grep SerialNumber
#!/bin/bash
File="domainList.txt"
while IFS= read -r line
do
  cat sample.yml | sed 's/foo.com/'$line'/g' > /path_to_dir/dir_name/$line.yml

echo "domain: $line"
done < "$File"
#!/usr/bin/env bash
#
# Returns Magento document root if sourced or prints if launched as standalone
#

mageRoot="$(dirname $(realpath "${0}"))"

while [[ ! -e ${mageRoot}/app/Mage.php && ! -e ${mageRoot}/bin/magento ]]
do
    mageRoot="$(dirname $(realpath ${mageRoot}))"
done

return "${mageRoot}" 2>/dev/null || printf '%s' "${mageRoot}" && exit
sudo apt-get -y install xfce4 && sudo apt-get -y install xubuntu-desktop

sudo apt-get -y install xrdp

echo xfce4-session > ~/.xsession

sudo service xrdp restart

ifconfig | grep inet

<i>Then connect to the IP the is returned by the last command</i>

<img src="https://adamtheautomator.com/wp-content/uploads/2019/09/windows-subsystem-linux-gui.png"></img>
LOG=nightly-`date '+%Y-%m-%d_%H:%M:%S'`.log

rsync -av --delete --exclude '$RECYCLE.BIN' --exclude 'System\ Volume\ Information' --exclude 'found.000' --exclude 'Recovery' $SOURCE $DESTINATION | tee ~/logs/$LOG
wsl --shutdown
diskpart
# open window Diskpart
select vdisk file="C:\WSL-Distros\…\ext4.vhdx"
attach vdisk readonly
compact vdisk
detach vdisk
exit
# Stopping Zigbee2MQTT
sudo systemctl stop zigbee2mqtt

# Starting Zigbee2MQTT
sudo systemctl start zigbee2mqtt

# View the log of Zigbee2MQTT
sudo journalctl -u zigbee2mqtt.service -f
function show_colors() {
  color=16;
  
  while [ $color -lt 245 ]; do
    echo -e "$color: \\033[38;5;${color}mhello\\033[48;5;${color}mworld\\033[0m"
	((color++));
  done  
}
function colorgrid() {
    iter=16
    while [ $iter -lt 52 ]
    do
        second=$[$iter+36]
        third=$[$second+36]
        four=$[$third+36]
        five=$[$four+36]
        six=$[$five+36]
        seven=$[$six+36]
        if [ $seven -gt 250 ];then seven=$[$seven-251]; fi

        echo -en "\033[38;5;$(echo $iter)m█ "
        printf "%03d" $iter
        echo -en "   \033[38;5;$(echo $second)m█ "
        printf "%03d" $second
        echo -en "   \033[38;5;$(echo $third)m█ "
        printf "%03d" $third
        echo -en "   \033[38;5;$(echo $four)m█ "
        printf "%03d" $four
        echo -en "   \033[38;5;$(echo $five)m█ "
        printf "%03d" $five
        echo -en "   \033[38;5;$(echo $six)m█ "
        printf "%03d" $six
        echo -en "   \033[38;5;$(echo $seven)m█ "
        printf "%03d" $seven

        iter=$[$iter+1]
        printf '\r\n'
    done
}
git stash                       # skip if all changes are committed
git branch my_feature
git reset --hard origin/master
git checkout my_feature
git stash pop                   # skip if all changes were committed
<a class="twitter-share-button"
  href="https://twitter.com/intent/tweet"
  data-size="large"
  data-text="custom share text"
  data-url="https://dev.twitter.com/web/tweet-button"
  data-hashtags="example,demo"
  data-via="twitterdev"
  data-related="twitterapi,twitter">
Tweet
</a>
echo n > /sys/class/backlight/rpi_backlight/brightness
sudo bash -c "echo 0 > /sys/class/backlight/rpi_backlight/brightness" # if permission denied in above line
#### FIRST ####
sudo nano ~/.bash_profile

#Add this in your .bash_profile
if [ -r ~/.bashrc ]; then
   source ~/.bashrc
fi

#### SECOND ####
sudo nano ~/.bashrc
    
#Add this in your .bashrc
alias sail='bash vendor/bin/sail'
alias composer="/Users/username/composer.phar"
    
#!/bin/bash
# Bash Menu Script Example

PS3='Please enter your choice: '
options=("Option 1" "Option 2" "Option 3" "Quit")
select opt in "${options[@]}"
do
    case $opt in
        "Option 1")
            echo "you chose choice 1"
            ;;
        "Option 2")
            echo "you chose choice 2"
            ;;
        "Option 3")
            echo "you chose choice $REPLY which is $opt"
            ;;
        "Quit")
            break
            ;;
        *) echo "invalid option $REPLY";;
    esac
done
@echo off

SET %DOCKER%=”docker”

SET CONTAINER_NAME=”linux_sandbox”
SET IMAGE_TO_USE=”centos:latest”
SET IMAGE_TO_USE_SANDBOX=”sandbox:%CONTAINER_NAME%”

echo Container – %CONTAINER_NAME%
echo Select an Option to Continue:
echo [0] – Container – Create
echo [1] – Container – Start
echo [2] – Container – Stop
echo [3] – Container – Terminal
echo [4] – Container – Destroy

set /p CHOICE=”Enter Selection: ”

IF “%CHOICE%” == “0” (
%DOCKER% pull “%IMAGE_TO_USE%”

start /MIN “” “%DOCKER%” run -it –privileged –name %CONTAINER_NAME% %IMAGE_TO_USE% bash

timeout 10

%DOCKER% exec -it %CONTAINER_NAME% bash -c “yum -y update;yum clean all”
%DOCKER% exec -it %CONTAINER_NAME% bash -c “yum -y install openssh-server passwd; yum clean all”
%DOCKER% exec -it %CONTAINER_NAME% bash -c “mkdir /var/run/sshd”
%DOCKER% exec -it %CONTAINER_NAME% bash -c “ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ””
%DOCKER% exec -it %CONTAINER_NAME% bash -c “echo ‘root:password’ | chpasswd”

%DOCKER% commit %CONTAINER_NAME% %IMAGE_TO_USE_SANDBOX%
%DOCKER% stop %CONTAINER_NAME%
%DOCKER% rm %CONTAINER_NAME%

%DOCKER% run -d –privileged –name %CONTAINER_NAME% -p “22:22” %IMAGE_TO_USE_SANDBOX% /usr/sbin/sshd -D
)

IF “%CHOICE%” == “1” (
%DOCKER% start %CONTAINER_NAME%
)

IF “%CHOICE%” == “2” (
%DOCKER% stop %CONTAINER_NAME%
)

IF “%CHOICE%” == “3” (
%DOCKER% exec -it %CONTAINER_NAME% /bin/bash
)

IF “%CHOICE%” == “4” (
%DOCKER% stop %CONTAINER_NAME%
%DOCKER% rm %CONTAINER_NAME%
%DOCKER% rmi %IMAGE_TO_USE_SANDBOX%
)

pause
2 steps:
1/ Ctrl+ W
2/ Ctrl+ V
# WSL2 network port forwarding script v1
#   for enable script, 'Set-ExecutionPolicy -ExecutionPolicy Bypass -Scope CurrentUser' in Powershell,
#   for delete exist rules and ports use 'delete' as parameter, for show ports use 'list' as parameter.
#   written by Daehyuk Ahn, Aug-1-2020

# Display all portproxy information
If ($Args[0] -eq "list") {
    netsh interface portproxy show v4tov4;
    exit;
} 

# If elevation needed, start new process
If (-NOT ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator))
{
  # Relaunch as an elevated process:
  Start-Process powershell.exe "-File",('"{0}"' -f $MyInvocation.MyCommand.Path),"$Args runas" -Verb RunAs
  exit
}

# You should modify '$Ports' for your applications 
$Ports = (22,80,443,8080)

# Check WSL ip address
wsl hostname -I | Set-Variable -Name "WSL"
$found = $WSL -match '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}';
if (-not $found) {
  echo "WSL2 cannot be found. Terminate script.";
  exit;
}

# Remove and Create NetFireWallRule
Remove-NetFireWallRule -DisplayName 'WSL 2 Firewall Unlock';
if ($Args[0] -ne "delete") {
  New-NetFireWallRule -DisplayName 'WSL 2 Firewall Unlock' -Direction Outbound -LocalPort $Ports -Action Allow -Protocol TCP;
  New-NetFireWallRule -DisplayName 'WSL 2 Firewall Unlock' -Direction Inbound -LocalPort $Ports -Action Allow -Protocol TCP;
}

# Add each port into portproxy
$Addr = "0.0.0.0"
Foreach ($Port in $Ports) {
    iex "netsh interface portproxy delete v4tov4 listenaddress=$Addr listenport=$Port | Out-Null";
    if ($Args[0] -ne "delete") {
        iex "netsh interface portproxy add v4tov4 listenaddress=$Addr listenport=$Port connectaddress=$WSL connectport=$Port | Out-Null";
    }
}

# Display all portproxy information
netsh interface portproxy show v4tov4;

# Give user to chance to see above list when relaunched start
If ($Args[0] -eq "runas" -Or $Args[1] -eq "runas") {
  Write-Host -NoNewLine 'Press any key to close! ';
  $null = $Host.UI.RawUI.ReadKey('NoEcho,IncludeKeyDown');
}
ssh-keygen
cat ~/.ssh/id_rsa.pub
git checkout receiving-branch
git merge --squash branch-to-squash
git commit --amend 'commit message'
git push
git stash --include-untracked # stashing with untracked files
git stash push -m "stash name" # name stash
git stash list # list stashes
git stash pop stash@{n} # pop stash
python3 manage.py runserver
heroku run python manage.py db upgrade --app name_of_your_application # upgrade db to heroku
heroku local # run locally on heroku and debug
ocrmypdf “$1” “$1”
$ find . -name './test' -prune -o -name 'file_*' -print
docker run -it -v /var/run/docker.sock:/var/run/docker.sock --volume=/Users/steve/dev/my-project:/my-project --workdir="/node-api" --memory=2g --memory-swap=2g --memory-swappiness=0 --entrypoint=/bin/bash node:9.3.0



set -x
VER="17.12.0-ce"
curl -L -o /tmp/docker-$VER.tgz https://download.docker.com/linux/static/stable/x86_64/docker-$VER.tgz
tar -xz -C /tmp -f /tmp/docker-$VER.tgz
mv /tmp/docker/* /usr/bin
for f in * ; do mv -- "$f" "my-prefix-$f" ; done
Normal Version: 
echo 'nice12343game' | sed -n 's/nice\(.*\)game/\1/p'

Jenkins Version:
sed -n 's/.*exited with code \\(.*\\)/\\1/p' stdout
find . -iname '*.jpg' -exec convert \{} -verbose -sampling-factor 4:2:0 -strip -quality 85\> \{} \;
find . -iname '*.jpg' -exec convert \{} -verbose -resize 400x400\> \{} \;
$fire.auth().createUserWithEmailAndPassword('email', 'password')
/*
Accenture - 573.0, ACN 
Microsoft -  20601.0, MSFT 
SalesForce - 69444.0, CRM 
Apple - 2355.0, AAPL 
Alphabet - 116351.0, GOOGL 
Amazon - 1703.0, AMZN 
Tesla - 1650150.0, TSLA 
Facebook- 1820060.0, FB 
Adobe - 808.0, ADBE 
Oracle - 23295.0, ORCL 
*/

parm = {'boardids': (573.0, 20601.0, 69444.0, 2355.0, 116351.0, 1703.0, 1650150.0, 1820060.0, 808.0, 23295.0)}

//query
comp_net = conn.raw_sql('SELECT * FROM boardex.na_wrds_company_networks WHERE boardid in %(boardids)s limit 100000', params=parm)
import re
bods = conn.get_table('boardex','na_wrds_company_names', columns=['boardid', 'boardname','ticker'])
acn_ser = bods['boardname'].str.contains('accenture', regex=True)
count = 0
for i in acn_ser:
  if i is True:
    count += 1
print(count)
ssh suibhne@wrds-cloud.wharton.upenn.edu
CcSgD.V:grKp7Ct
scp -r <username>@<host>:/chemin/source/fichier /home/user/Desktop/local
Get-Content ~/.ssh/id_rsa.pub | Set-Clipboard
defaults write com.apple.finder AppleShowAllFiles TRUE
killall Finder
defaults write com.apple.dashboard mcx-disabled -boolean YES
killall Dock
defaults write com.apple.screencapture location <location>
killall SystemUIServer
kill -9 $(lsof -ti tcp:8080)
# folder: .idea
git rm --cached -r .idea
# file: myfile.log
git rm --cached myfile.log
rm -r Directory/
rm -rf Directory/ (Fill ignore file permissions)
rm index.html app.js
sudo find / -name "libgsl.so.0"
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<Location>
export LD_LIBRARY_PATH
samtools view -b -F 4 file.bam > mapped.bam
seqkit head -n 100000 input.fa
seqkit range -r 1:100000 input.fa
samtools view -b -f 4 sample.bam > sample.unmapped.bam
if [ "$#" -ne 1 ]
then
  echo "Usage: ..."
  exit 1
fi
#!/bin/bash
# Bash Menu Script Example

PS3='Please enter your choice: '
options=("Option 1" "Option 2" "Option 3" "Quit")
select opt in "${options[@]}"
do
    case $opt in
        "Option 1")
            echo "you chose choice 1"
            ;;
        "Option 2")
            echo "you chose choice 2"
            ;;
        "Option 3")
            echo "you chose choice $REPLY which is $opt"
            ;;
        "Quit")
            break
            ;;
        *) echo "invalid option $REPLY";;
    esac
done
# List all networks a container belongs to
docker inspect -f '{{range $key, $value := .NetworkSettings.Networks}}{{$key}} {{end}}' [container]
# List all containers belonging to a network by name
docker network inspect -f '{{range .Containers}}{{.Name}} {{end}}' [network]
curl -fsSL https://deb.nodesource.com/setup_current.x | sudo -E bash -
sudo apt-get install -y nodejs
use Drupal\Core\DrupalKernel;
use Symfony\Component\HttpFoundation\Request;

if (PHP_SAPI !== 'cli') {
  return;
}

if (version_compare(PHP_VERSION, '5.4.5') < 0) {
  $version = PHP_VERSION;
  echo <<<EOF

ERROR: This script requires at least PHP version 5.4.5. You invoked it with
       PHP version {$version}.
\n
EOF;
  exit;
Esc # exit insert mode
:wq | :x # write and quit
gunzip -c /Users/sethshapiro/Downloads/20210305-KochavaTransactions000000000000.gz | awk 'NR==1 {print; exit}'
#!/bin/bash

if [ ! -f composer.json ]; then
    echo "Please make sure to run this script from the root directory of this repo."
    exit 1
fi

composer install
cp .env.example .env
php artisan key:generate
source "$(dirname "$0")/checkout_latest_docs.sh"
npm install
npm run dev
/home/rezaeir/canu-2.1.1/bin/canu -p lambda -d lambda-assembly genomeSize=50000 -nanopore-raw lambda_subsample.fastq.gz
rasusa --input control_lambda_7_2_2021.fastq.gz --coverage 30 --genomes-size 50000 --output lambda_subsample.fastq.gz
cat *.fastq.gz > control_lambda_7_2_2021.fastq.gz
./guppy_basecaller --compress_fastq -i <input dir> -s <output dir> --flowcell FLO-MIN106 --kit SQK-LSK109 -x "cuda:0" --gpu_runners_per_device 4 --num_callers 4 --chunks_per_runner 2048 --barcode_kits "EXP-NBD104" --trim_barcodes
for file in *
do
file=${file//.bam/}
samtools sort -@ 14 $file.bam > $file.sorted.bam
samtools index -@ 14 -b $file.sorted.bam
samtools idxstats -@ 14 $file.sorted.bam > $file.idxstats.txt
done
#!/usr/bin/env python3
import sys
import subprocess

with open(sys.argv[1]) as f:
        for line in f:
                old_name = line.strip()
                new_name = old_name.replace("jane","jdoe")
                subprocess.run(["mv",old_name,new_name])
        f.close()
#!/bin/bash

>oldFiles.txt
files=$(grep " jane " ../data/list.txt | cut -d ' ' -f 3)
for file in $files; do
        if [ -e $HOME$file ]; then
        echo $HOME$file >> oldFiles.txt;
        fi
done
#!/bin/bash

for logfile in /var/log/*log; do
	echo "Processing: $logfile"
    # the following line splits the logfile entry line with an empty space as delimiter and then keeps only everything from field five onwards
    cut -d" " -f5- $logfile | sort | uniq -c | sort -nr | head -5
done
#!/bin/bash

for file in *.HTM; do
  name=$(basename "$file" .HTM) # surround the filename with double quotes to account for file names with spaces
  echo mv "$file" "$name.html" # put echo in front when testing to see what the program would do without actually changing anything yet
done
#!bin/bash

for fruit in peach orange apple; do # we represent a list in Bash by simply listing valkues/variables with spaces in between
  	echo "I like $fruit!"
done
#!/bin/bash

n=1 # in Bash, there are no spaces allowed when declaring variables
while [ $n -le 5]; do # the "[ condition ]"-syntax is equivalent to the "test" command
  echo "Iteration number $n"
  ((n+=1)) # in Bash, we use double parentheses to perform arithmetic operations on variables
done
#!/bin/bash

echo "Starting at: $(date)" # the dollar sign tells the program to execute this file inside of the string and then convert the output of it into a string
echo # print empty line

# add separating line
line="-----------------------------------"

echo "UPTIME"
uptime
echo $line

echo "FREE"
free
echo $line

echo "WHO"; echo; free # you can write commands on the same line separating them by semicolons

echo "Finishing at: $(date)"
# print message
echo "Test!"

# create new directory
mkdir new_directory

# change directory
cd new_director

# print current working directory
pwd

# copy file
cp ../spider.txt .

# create empty file
touch myfile.txt

# list files and directories in current directory (with additional information using -l, including hidden files -a)
ls
ls -l
ls- la

# combine ls via a pipe to the less command to show only so many entries at a time
ls -l | less # you can quit with "q"

# rename a file
mv my.file.txt emptyfile.txt

# delete a file
rm *
  
# delete an empty directory
rmdir new_directory/
  
# list all running process on the computer
ps-ax
ps-ax | grep ping # filter process names through grep

# kill a process
kill 4619 # where 4619 is the process ID (PID)
ffmpeg -i input.mp4 -vcodec libx265 -crf 28 output.mp4 
ffmpeg -i input.mkv -codec copy output.mp4
$ find /home/sk/ostechnix/ -type f -printf '%T+ %p\n' | sort | head -n 1
sudo service postgresql status # check db status
sudo service postgresql start # start running db
sudo service postgresql stop # stop running db
ffmpeg -ss 6.0 -t 70.0 -i /path/to/file.mp4 -filter_complex "[0:v] fps=12,scale=w=640:h=-1,setpts=0.5*PTS,split [a][b];[a] palettegen [p];[b][p] paletteuse" /path/to/output.gif
#!/bin/bash
#PBS -l nodes=1:ppn=16,walltime=0:00:59
#PBS -l mem=62000mb
#PBS -m abe

bar=${foo}
echo "${bar}"


qsub -v foo='qux' myRunScript.sh

git push heroku master
heroku addons:create heroku-postgresql:hobby-dev # create postgres db in heroku app
heroku run python # run python repl with heroku
from app import db
db.create_all()
sudo curl https://raw.githubusercontent.com/TheRemote/PiBenchmarks/master/Storage.sh | sudo bash
#!/usr/bin/env bash

set -e

# Dotfiles' project root directory
ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Host file location
HOSTS="$ROOTDIR/hosts"
# Main playbook
PLAYBOOK="$ROOTDIR/dotfiles.yml"

# Installs ansible
apt-get update && apt-get install -y ansible

# Runs Ansible playbook using our user.
ansible-playbook -i "$HOSTS" "$PLAYBOOK" --ask-become-pass

exit 0
# login/ssh to the machine that should host the webserver, then run:
> frecklecute hello-world.frecklet --domain example.com

# or, install the remote target machine from your local session:
> frecklecute --target admin@example.com \
      hello-world.frecklet --domain example.com
# configuration, save as: hello-world.frecklet

- static-website-from-folder:
    hostname: "{{:: domain ::}}"     # vhost config
    path: /var/www/html
    webserver: apache
    use_https: true
    server_admin: hello@example.com  # for let's Encrypt
- file-with-content:
    owner: www-data
    path: /var/www/html/index.html
    content: |
      <h1><i>{{:: domain ::}}</i> says "hello", World!</h1>
pip3 install --trusted-host pypi.org --trusted-host files.pythonhosted.org flask-wtf
sudo mkdir /mnt/z
sudo mount -t drvfs Z: /mnt/z
git branch --merged | egrep -v "(^\*|release|dev)" | xargs git branch -d
# Set URL for your scoped packages.
# For example package with name `@foo/bar` will use this URL for download
npm config set @my-org:registry https://private-gitlab.com/api/v4/projects/<your_project_id>/packages/npm/

# Add the token for the scoped packages URL. Replace <your_project_id>
# with the project where your package is located.
npm config set '//private-gitlab.com/api/v4/projects/<your_project_id>/packages/npm/:_authToken' "<your_token>"
defaults write com.apple.dt.Xcode IDEAdditionalCounterpartSuffixes -array-add "Router" "Interactor" "Builder" && killall Xcode
defaults write com.apple.dt.Xcode IDEAdditionalCounterpartSuffixes -array-add "ViewModel" "View" && killall Xcode
defaults write com.apple.screencapture include-date -bool true
wget https://raw.githubusercontent.com/composer/getcomposer.org/76a7060ccb93902cd7576b67264ad91c8a2700e2/web/installer -O - -q | php -- --quiet
sudo nano /etc/default/grub

# There is a line in that: GRUB_CMDLINE_LINUX_DEFAULT="quiet splash" (like this), replace with: GRUB_CMDLINE_LINUX_DEFAULT="quiet splash intel_idle.max_cstate=1"
# Save it (CTRL+O)

sudo update-grub
sudo reboot
firebase functions:config:set stripe.secret="STRIPE_SECRET_KEY_HERE"
// BASH
mkdir src
mkdir build
touch src/index.ts
touch .gitignore
touch README.md
tsc -y
npm init -y
npm install nodemon concurrently @types/express --save-dev

// package.json
...
"scripts": {
  "start:build": "tsc -w",
  "start:run": "nodemon ./build/index.js",
  "start": "concurrently npm:start:*"
},
...

// tsconfig.json
...
"outDir": "./build",
"rootDir": "./src",
...

// .gitignore
node_modules
*.env

// README.md
### Start
```bash
npm run start
```

// src/index.ts
import express from 'express'
const port = 3000
const app = express()

console.log("Hello, World!!!")

logSomething("This is a string that I'm logging")

app.listen(port, () => {
  console.log(`Listening on port ${port}`)
})
touch index.ts
mkdir src
mv index.ts ./src
//Error

PHP Fatal error: Uncaught exception 'ErrorException' with message 'proc_open(): fork failed - Cannot allocate memory' in phar

//Fix

/bin/dd if=/dev/zero of=/var/swap.1 bs=1M count=1024
/sbin/mkswap /var/swap.1
/bin/chmod 0600 /var/swap.1
/sbin/swapon /var/swap.1
hello@nobo-prod-server:~$ sudo lsof -i:8082
COMMAND    PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
node\x20/ 2993 root   20u  IPv6  30271      0t0  TCP *:8082 (LISTEN)
hello@nobo-prod-server:~$ ps 2993
  PID TTY      STAT   TIME COMMAND
 2993 ?        Ssl   28:43 node /home/yasas/nobo-angular/server.js
hello@nobo-prod-server:~$
Restricted : aucun script ne peut être exécuté.

AllSigned : seuls les scripts signés peuvent être exécutés.

RemoteSigned : les scripts téléchargés depuis Internet doivent être signés pour être exécutés. 
	Les scripts présents sur votre poste de travail ne sont pas concernés et peuvent être exécutés.

Unrestricted : pas de restrictions. Les scripts peuvent être exécutés.

connaitre le mode d'execution
	Get-ExecutionPolicy
   
changer le mode d'execution
	Set-ExecutionPolicy <mode>

cree un utilisateur 
	New-ADUser 
		-Name <login> 
		-SamAccountName <login> 
		-UserPrincipaleName <mail> 
		-AccountPassword (ConvertTo-SecureString -AsPlainText <mdp> -Force ) 
		-PasswordNeverExpires $true
		-CannotChangePassword $true

activer Utilisateur
	Enable-ADAccount <samaccountname>
	Desable-ADAccount <samaccountname>

lancer script 
	powershell .\PATH

rechercher des utilisateur
	Get-ADUser -Filter *
	Get-ADUser -Filter * | select samAccountName, Name, UserPrincipalName | Export-Csv UserAdUTF8.csv -Encoding UTF8


cree un groupe 
	New-ADGroup $groupe -GroupScope Global

inserer des utilisateur dans un groupe
	Add-ADGroupMember -identity $groupe -Members <samAccountName>

recherche groupe
	Get-ADGroup -Filter *

afficher les utilisateur dans un groupe
	 Get-ADGroupMember $group | Select-Object name  | Export-Csv SEC.csv -Encoding UTF8

ide for powershell
	Notepad.exe	
git rev-parse --show-toplevel

could be enough if executed within a git repo.
From git rev-parse man page:

--show-toplevel
# WARNING: This will LOCK ALL PUBLIC REPOSITORIES ON YOUR GITHUB ACCOUNT
#
# dependencies hub and jq
# - hub: hub.github.com
# - jq: https://stedolan.github.io/jq/
#
# A better alternative would be to pipe the repos into a temporary file:
# $ hub api --paginate users/amingilani/repos | jq -r '.[]."full_name"' > repos.txt
# Then manually remove your active repositories
# And archive the remaining:
# $ cat repos.txt | xargs -I {} -n 1 hub api -X PATCH -F archived=true /repos/{}
#
# Anyways, to archive all public repositories in your GitHub account:
#
hub api --paginate users/amingilani/repos | jq -r '.[]."full_name"' | xargs -I {} -n 1 hub api -X PATCH -F archived=true /repos/{}
 https://cronhub.io/ping/1f5e3410-254c-11e8-b61d-55875966d031
star

Sat Sep 17 2022 23:14:03 GMT+0000 (UTC) https://www.digitalocean.com/community/tutorials/how-to-push-an-existing-project-to-github

#bash #git
star

Thu Sep 15 2022 17:58:38 GMT+0000 (UTC) https://linuxhint.com/xargs-find-linux/

#bash
star

Thu Sep 15 2022 17:51:14 GMT+0000 (UTC)

#bash
star

Thu Sep 15 2022 17:49:31 GMT+0000 (UTC)

#bash
star

Thu Sep 15 2022 17:47:48 GMT+0000 (UTC)

#bash
star

Tue Sep 13 2022 21:38:55 GMT+0000 (UTC)

#bash
star

Tue Sep 13 2022 04:21:04 GMT+0000 (UTC) https://gist.github.com/henrik242/65d26a7deca30bdb9828e183809690bd

#bash
star

Thu Sep 08 2022 18:14:59 GMT+0000 (UTC) https://stackoverflow.com/questions/37971961/docker-error-bind-address-already-in-use

#bash #docker
star

Wed Sep 07 2022 06:51:12 GMT+0000 (UTC) https://fordodone.com/2016/02/24/start-screen-session-with-4-way-split-screen/

#bash #linux #screen
star

Tue Sep 06 2022 12:44:01 GMT+0000 (UTC) https://www.codewithharry.com/blogpost/mongodb-cheatsheet/

#bash
star

Tue Aug 30 2022 14:27:34 GMT+0000 (UTC)

#bash
star

Tue Aug 30 2022 13:51:16 GMT+0000 (UTC)

#javascript #bash
star

Mon Aug 29 2022 21:06:32 GMT+0000 (UTC)

#bash
star

Mon Aug 29 2022 20:08:43 GMT+0000 (UTC)

#bash
star

Mon Aug 29 2022 20:03:10 GMT+0000 (UTC)

#bash
star

Mon Aug 29 2022 19:53:39 GMT+0000 (UTC)

#bash #sql
star

Mon Aug 29 2022 19:44:01 GMT+0000 (UTC)

#bash
star

Mon Aug 29 2022 19:41:01 GMT+0000 (UTC)

#bash
star

Mon Aug 29 2022 19:19:41 GMT+0000 (UTC)

#bash #docker #sql
star

Mon Aug 29 2022 19:10:43 GMT+0000 (UTC)

#bash #sql
star

Mon Aug 29 2022 19:09:59 GMT+0000 (UTC)

#bash #sql
star

Mon Aug 29 2022 19:08:02 GMT+0000 (UTC)

#bash #docker
star

Mon Aug 29 2022 18:42:20 GMT+0000 (UTC)

#bash #sql
star

Mon Aug 29 2022 18:38:18 GMT+0000 (UTC)

#bash #docker
star

Mon Aug 29 2022 18:33:43 GMT+0000 (UTC)

#sql #bash #docker