Snippets Collections
pip install prowler
prowler -v
# Check if at least one file name is provided
if ($args.Count -eq 0) {
    Write-Host "Please provide at least one filename."
    exit
}

# Loop through each file name passed as an argument
foreach ($file in $args) {
    # Extract filename without extension for the component name
    $filename = [System.IO.Path]::GetFileNameWithoutExtension($file)

    # Define the content to be written to the file
    $content = @"
import { View, Text } from 'react-native'
import React from 'react'

const $filename = () => {
  return (
    <View>
      <Text>$filename</Text>
    </View>
  )
}

export default $filename
"@

    # Create the file and write the content
    $filePath = ".\$file"
    Set-Content -Path $filePath -Value $content
    Write-Host "$file created with component $filename"
}
pip list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1  | xargs -n1 pip install -U
#!/bin/bash
####################################
#
# Rolling 7 day backup to local directory
#
####################################

mysqldump_location="path_to_place_mysql_dump_file"

/usr/bin/mysqldump --opt your_database_name --single-transaction --default-character-set=utf8mb4 > $mysqldump_location/your_dump_file_name.SQL

# What to backup.
backup_files="path_to_your_website_directory"

# Where to backup to.
dest="path_to_place_your_backup_files"

# Create archive filename.
day=$(date +%A)
hostname=$(hostname -s)
archive_file="your_domain_name-$day.zip"


# Backup the files using zip.
zip -r $dest/$archive_file $backup_files
rm -f $mysqldump_location/your_dump_file.SQL
find . -maxdepth 1 -mtime +3900 -printf '%CY-%Cm-%Cd  %f\n' | sort
sudo apt install kube-linter
docker pull stackrox/kube-linter:latest
go install golang.stackrox.io/kube-linter/cmd/kube-linter@latest
npx create-next-app@latest --typescript
cd my-app
curl -L https://gitignore.io/api/[好きな言語や環境名。カンマ区切りで複数にも対応] -o .gitignore
docker run --rm \
    -v $PWD:/local openapitools/openapi-generator-cli generate \
    -i /local/petstore.yaml \
    -g go \
    -o /local/out/go
                
# install the latest version of "openapi-generator-cli"
npm install @openapitools/openapi-generator-cli -g

# use a specific version of "openapi-generator-cli"
openapi-generator-cli version-manager set 6.2.0

# Or install it as dev-dependency in your node.js projects
npm install @openapitools/openapi-generator-cli -D
                
# install the latest version of "openapi-generator-cli"
npm install @openapitools/openapi-generator-cli -g

# use a specific version of "openapi-generator-cli"
openapi-generator-cli version-manager set 6.2.0

# Or install it as dev-dependency in your node.js projects
npm install @openapitools/openapi-generator-cli -D
                
$ cd /lib/x86_64-linux-gnu/
$ sudo ln -s libreadline.so.7.0 libreadline.so.6
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
#NoEnv  ; Recommended for performance and compatibility with future AutoHotkey releases.
; #Warn  ; Enable warnings to assist with detecting common errors.
SendMode Input  ; Recommended for new scripts due to its superior speed and reliability.
SetWorkingDir %A_ScriptDir%  ; Ensures a consistent starting directory.
#Persistent

; Hotkeys
^Numpad1::Copy(1)
^Numpad4::Paste(1)
^Numpad7::Clear(1)

^Numpad2::Copy(2)
^Numpad5::Paste(2)
^Numpad8::Clear(2)

^Numpad3::Copy(3)
^Numpad6::Paste(3)
^Numpad9::Clear(3)

Copy(clipboardID)
{
	global ; All variables are global by default
	local oldClipboard := ClipboardAll ; Save the (real) clipboard
	
	Clipboard = ; Erase the clipboard first, or else ClipWait does nothing
	Send ^c
	ClipWait, 2, 1 ; Wait 1s until the clipboard contains any kind of data
	if ErrorLevel 
	{
		Clipboard := oldClipboard ; Restore old (real) clipboard
		return
	}
	
	ClipboardData%clipboardID% := ClipboardAll
	
	Clipboard := oldClipboard ; Restore old (real) clipboard
}

Cut(clipboardID)
{
	global ; All variables are global by default
	local oldClipboard := ClipboardAll ; Save the (real) clipboard
	
	Clipboard = ; Erase the clipboard first, or else ClipWait does nothing
	Send ^x
	ClipWait, 2, 1 ; Wait 1s until the clipboard contains any kind of data
	if ErrorLevel 
	{
		Clipboard := oldClipboard ; Restore old (real) clipboard
		return
	}
	ClipboardData%clipboardID% := ClipboardAll
	
	Clipboard := oldClipboard ; Restore old (real) clipboard
}

Paste(clipboardID)
{
	global
	local oldClipboard := ClipboardAll ; Save the (real) clipboard

	Clipboard := ClipboardData%clipboardID%
	Send ^v

	Clipboard := oldClipboard ; Restore old (real) clipboard
	oldClipboard = 
}

Clear(clipboardID)
{
	global
	local oldClipboard := ClipboardAll ; Save the (real) clipboard

	Clipboard := ClipboardData%clipboardID%
	ClipboardData%clipboardID% :=

	Clipboard := oldClipboard ; Restore old (real) clipboard
	oldClipboard = 
}
#!/bin/bash

if : >/dev/tcp/8.8.8.8/53; then
  echo 'Internet available.'
else
  echo 'Offline.'
fi
npx create-react-app my-app --template typescript
git pull origin [branch name]
git add .
git commit -m ""
git push origin [branch name]
git remote -v
git remote add origin 
git remote remove origin
git checkout -b (create a branch)
git checkout (switch branch)
git rebase --continue 
git merge --continue
git push origin [branch name] --force-with-lease
ipconfig getoption en0 domain_name_server
# Remove break line in sequences
awk '/^>/ { print (NR==1 ? "" : RS) $0; next } { printf "%s", $0 } END { printf RS }' input.fa > output.fa
 docker image prune -a --force --filter "until=2160h"
dataLayer.push({
  
    "action": "onInitialPageLoad",
    "event": "consent_status",
    "type": "explicit",
    "ucCategory": {
        "essential": true,
        "marketing": true,
        "functional": true,
        "customCategory-da1466e9-42f7-4845-88ee-14d3080feb09": true
    },
    "Usercentrics Consent Management Platform": true,
    "Amazon Pay": true,
    "Cloudflare": true,
    "Google Fonts": true,
    "Google Maps": true,
    "Google Tag Manager": true,
    "PayPal": true,
    "Wordpress": true,
    "Sentry": true,
    "Amazon Web Services": true,
    "hCaptcha": true,
    "Kundenaccount": true,
    "Ory Hydra": true,
    "Datadog": true,
    "Freshdesk": true,
    "Emarsys": true,
    "Facebook Pixel": true,
    "Sovendus": true,
    "Google Analytics": true,
    "Trustpilot": true,
    "TradeDoubler": true,
    "QualityClick": true,
    "Pinterest": true,
    "TikTok": true,
    "Adtriba": true,
    "Microsoft Advertising": true,
    "AWIN": true,
    "Google Ads Conversion Tracking": true,
    "Google Ads Remarketing": true,
    "DoubleClick Floodlight": true,
    "Freewheel": true,
    "DoubleClick Ad": true,
    "tcid": true,
    "jsg_attribution": true,
    "jsg_lc": true,
    "tsid": true,
    "Impact Radius": true,
    "TimingCounter": true,
    "Outbrain": true,
    "Movable Ink": true,
    "Criteo OneTag": true,
    "YouTube Video": true,
    "Zopim": true,
    "Optimizely": true,
    "trbo": true,
    "RUMvision": true

  
})
npm install --save @stripe/react-stripe-js @stripe/stripe-js
npm i @next-auth/prisma-adapter
npm install next-auth
npm install @prisma/client @auth/prisma-adapter
npm install prisma --save-dev
npm install react-hook-form
# Plot Box plots.
ff = cbind( cellType = rownames(f1), color = colorCodes[1:13], f1[, 2:10] )
ff = melt( ff, measure.vars = 3:11)
marker.genes = lapply( marker.genes, FUN = function(x) {x[x %in% geneNames]})
marker.genes.2 <- list(
  Gene1 = c(1, 2, 3),
  Gene2 = c(4, 5, 6, 7),
  Gene3 = c(8, 9, 10)
)

# Find the maximum length among all vectors
max_length <- max(sapply(marker.genes.2, length))

# Pad shorter vectors with NA to make them consistent
marker.genes.2 <- lapply(marker.genes.2, function(x) {
  if (length(x) < max_length) {
    c(x, rep(NA, max_length - length(x)))
  } else {
    x
  }
})

# Create a data frame from the corrected list
my_data_frame <- data.frame(marker.genes.2)

# Display the resulting data frame
print(my_data_frame)
gg_dot <- plot_grid(gg_dot_1, gg_dot_2, labels = LETTERS[1:2], ncol = 1 )
saveRDS(marker.genes.2, file = "Tabula_Lung_Total8.rds")
RUN sudo apt update && sudo apt install -y zsh \
&& sh -c "$(wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" \
&& git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions \
&& git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting \
&& sed -i "s/plugins=(git)/plugins=(\n\tgit\n\tzsh-autosuggestions\n\tzsh-syntax-highlighting\n)/" 

~/.zshrc && \
    sed -i "s/ZSH_THEME=.*/ZSH_THEME='<theme-name>'/" ~/.zshrc && \
    sudo chsh -s $(which zsh) $USERNAME
kubectl get no -owide

# List containers running
docker ps

# Enter kind node
docker exec -it kind-control-plane sh

# Show node info
ip link

# Show containers inside node
crictl ps

# Get container process id
crictl inspect <CONTAINER>
  
# Enter container network namespace
nsenter -n -t <PROC_ID>

# Show container network info
ip addr
curl --request POST \
  --url https://{{your-gtm-ss-url}}/com.snowplowanalytics.snowplow/enriched \
  --header 'Content-Type: application/json' \
  --header 'x-gtm-server-preview: {{your-preview-header}}' \
  --data '{
  "app_id": "example-website",
  "platform": "web",
  "etl_tstamp": "2021-11-26T00:01:25.292Z",
  "collector_tstamp": "2021-11-20T00:02:05Z",
  "dvce_created_tstamp": "2021-11-20T00:03:57.885Z",
  "event": "unstruct",
  "event_id": "c6ef3124-b53a-4b13-a233-0088f79dcbcb",
  "txn_id": null,
  "name_tracker": "sp1",
  "v_tracker": "js-3.1.6",
  "v_collector": "ssc-2.3.0-stdout$",
  "v_etl": "snowplow-micro-1.1.2-common-2.0.1",
  "user_id": "jon.doe@email.com",
  "user_ipaddress": "92.231.54.234",
  "user_fingerprint": null,
  "domain_userid": "de81d764-990c-4fdc-a37e-adf526909ea6",
  "domain_sessionidx": 3,
  "network_userid": "ecdff4d0-9175-40ac-a8bb-325c49733607",
  "geo_country": "US",
  "geo_region": "CA",
  "geo_city": "San Francisco",
  "geo_zipcode": "94109",
  "geo_latitude": 37.443604,
  "geo_longitude": -122.4124,
  "geo_location": "37.443604,-122.4124",
  "geo_region_name": "San Francisco",
  "ip_isp": "AT&T",
  "ip_organization": "AT&T",
  "ip_domain": "att.com",
  "ip_netspeed": "Cable/DSL",
  "page_url": "https://snowplowanalytics.com/use-cases/",
  "page_title": "Snowplow Analytics",
  "page_referrer": null,
  "page_urlscheme": "https",
  "page_urlhost": "snowplowanalytics.com",
  "page_urlport": 443,
  "page_urlpath": "/use-cases/",
  "page_urlquery": "",
  "page_urlfragment": "",
  "refr_urlscheme": null,
  "refr_urlhost": null,
  "refr_urlport": null,
  "refr_urlpath": null,
  "refr_urlquery": null,
  "refr_urlfragment": null,
  "refr_medium": null,
  "refr_source": null,
  "refr_term": null,
  "mkt_medium": null,
  "mkt_source": null,
  "mkt_term": null,
  "mkt_content": null,
  "mkt_campaign": null,
  "contexts_org_w3_performance_timing_1": [
    {
      "navigationStart": 1415358089861,
      "unloadEventStart": 1415358090270,
      "unloadEventEnd": 1415358090287,
      "redirectStart": 0,
      "redirectEnd": 0,
      "fetchStart": 1415358089870,
      "domainLookupStart": 1415358090102,
      "domainLookupEnd": 1415358090102,
      "connectStart": 1415358090103,
      "connectEnd": 1415358090183,
      "requestStart": 1415358090183,
      "responseStart": 1415358090265,
      "responseEnd": 1415358090265,
      "domLoading": 1415358090270,
      "domInteractive": 1415358090886,
      "domContentLoadedEventStart": 1415358090968,
      "domContentLoadedEventEnd": 1415358091309,
      "domComplete": 0,
      "loadEventStart": 0,
      "loadEventEnd": 0
    }
  ],
  "se_category": null,
  "se_action": null,
  "se_label": null,
  "se_property": null,
  "se_value": null,
  "unstruct_event_com_snowplowanalytics_snowplow_link_click_1": {
    "targetUrl": "http://www.example.com",
    "elementClasses": [
      "foreground"
    ],
    "elementId": "exampleLink"
  },
  "tr_orderid": null,
  "tr_affiliation": null,
  "tr_total": null,
  "tr_tax": null,
  "tr_shipping": null,
  "tr_city": null,
  "tr_state": null,
  "tr_country": null,
  "ti_orderid": null,
  "ti_sku": null,
  "ti_name": null,
  "ti_category": null,
  "ti_price": null,
  "ti_quantity": null,
  "pp_xoffset_min": null,
  "pp_xoffset_max": null,
  "pp_yoffset_min": null,
  "pp_yoffset_max": null,
  "useragent": null,
  "br_name": null,
  "br_family": null,
  "br_version": null,
  "br_type": null,
  "br_renderengine": null,
  "br_lang": null,
  "br_features_pdf": true,
  "br_features_flash": false,
  "br_features_java": null,
  "br_features_director": null,
  "br_features_quicktime": null,
  "br_features_realplayer": null,
  "br_features_windowsmedia": null,
  "br_features_gears": null,
  "br_features_silverlight": null,
  "br_cookies": null,
  "br_colordepth": null,
  "br_viewwidth": null,
  "br_viewheight": null,
  "os_name": null,
  "os_family": null,
  "os_manufacturer": null,
  "os_timezone": null,
  "dvce_type": null,
  "dvce_ismobile": null,
  "dvce_screenwidth": null,
  "dvce_screenheight": null,
  "doc_charset": null,
  "doc_width": null,
  "doc_height": null,
  "tr_currency": null,
  "tr_total_base": null,
  "tr_tax_base": null,
  "tr_shipping_base": null,
  "ti_currency": null,
  "ti_price_base": null,
  "base_currency": null,
  "geo_timezone": null,
  "mkt_clickid": null,
  "mkt_network": null,
  "etl_tags": null,
  "dvce_sent_tstamp": null,
  "refr_domain_userid": null,
  "refr_dvce_tstamp": null,
  "contexts_com_snowplowanalytics_snowplow_ua_parser_context_1": [
    {
      "useragentFamily": "IE",
      "useragentMajor": "7",
      "useragentMinor": "0",
      "useragentPatch": null,
      "useragentVersion": "IE 7.0",
      "osFamily": "Windows XP",
      "osMajor": null,
      "osMinor": null,
      "osPatch": null,
      "osPatchMinor": null,
      "osVersion": "Windows XP",
      "deviceFamily": "Other"
    }
  ],
  "domain_sessionid": "2b15e5c8-d3b1-11e4-b9d6-1681e6b88ec1",
  "derived_tstamp": "2021-11-20T00:03:57.886Z",
  "event_vendor": "com.snowplowanalytics.snowplow",
  "event_name": "link_click",
  "event_format": "jsonschema",
  "event_version": "1-0-0",
  "event_fingerprint": "e3dbfa9cca0412c3d4052863cefb547f",
  "true_tstamp": "2021-11-20T00:03:57.886Z"
}'
#!/bin/sh

for i in {1979..1980}
do
    echo "output: $i"
    dir2=$((i+1))
    cp /from/$i/FILE:$i-08* /from/$dir2/
    mv  /from/$i/FILE:$i-09* /from/$dir2/
    
    
done
iptables -t nat -I PREROUTING -p tcp -d 192.168.1.0/24 --dport 2222 -j DNAT --to-destination 127.0.0.1:2222

sysctl -w net.ipv4.conf.eth0.route_localnet=1
# check version
aws --version

# update CLI
pip install --upgrade awscli

# list S3 buckets
aws s3api list-buckets
aws s3 ls
aws s3 ls s3://dir_name/subdir_name

# set up config
# touch ~/.aws/config
[sso-session my_session]
sso_start_url = https://xxxx-login.awsapps.com/start/
sso_region = us-east-2
sso_registration_scopes = sso:account:access

[profile cwb-d]
output = json
region = us-east-2
sso_session = my_session
sso_account_id = 123456789
sso_role_name = my_role

# authenticate
export AWS_PROFILE=cwb-d
aws sso login --sso-session my_session --no-browser

concurrent = 1
check_interval = 0
shutdown_timeout = 0

[session_server]
  session_timeout = 3600
[[runners]]
  name = "Runner Name - Docker"
  url = "https://gitlab.?.com/"
  token = "<Gitlab-Runner-Token"
  executor = "docker"
  # Path to the custom CA certificate
  tls-ca-file = "path to certs"
  [runners.docker]
    gpus = "all"
    privileged = false
    tls_verify = false
    image = "docker:stable"  # Specify the default Docker image for running jobs
    disable_cache = false
    volumes = ["/cache"]
    shm_size = 0  # Disable Docker build sharing
    [runners.docker.auth]
      username = "<gitlab-Token>"
      password = "<Token-Password>"

[[runners]]
  name = "Runner Name - Shell"
  url = "https://gitlab.?.com/"
  token = "<Gitlab-Runner-Token>"
  executor = "shell"
  # Path to the custom CA certificate
  tls-ca-file = "path to certs"
#!/bin/bash
# Short script to split videos by filesize using ffmpeg by LukeLR
# source:https://stackoverflow.com/a/52158160
# usage: . ./split-video.sh huge-video.mov 90000000 "-c:v libx264 -crf 23 -c:a copy -vf scale=640:-2"

if [ $# -ne 3 ]; then
    echo 'Illegal number of parameters. Needs 3 parameters:'
    echo 'Usage:'
    echo './split-video.sh FILE SIZELIMIT "FFMPEG_ARGS'
    echo
    echo 'Parameters:'
    echo '    - FILE:        Name of the video file to split'
    echo '    - SIZELIMIT:   Maximum file size of each part (in bytes)'
    echo '    - FFMPEG_ARGS: Additional arguments to pass to each ffmpeg-call'
    echo '                   (video format and quality options etc.)'
    exit 1
fi

FILE="$1"
SIZELIMIT="$2"
FFMPEG_ARGS="$3"

# Duration of the source video
DURATION=$(ffprobe -i "$FILE" -show_entries format=duration -v quiet -of default=noprint_wrappers=1:nokey=1 | cut -d. -f1)

# Duration that has been encoded so far
CUR_DURATION=0

# Filename of the source video (without extension)
BASENAME="${FILE%.*}"

# Extension for the video parts
#EXTENSION="${FILE##*.}"
EXTENSION="mp4"

# Number of the current video part
i=1

# Filename of the next video part
NEXTFILENAME="$BASENAME-$i.$EXTENSION"

echo "Duration of source video: $DURATION"

# Until the duration of all partial videos has reached the duration of the source video
while [[ $CUR_DURATION -lt $DURATION ]]; do
    # Encode next part
    echo ffmpeg -i "$FILE" -ss "$CUR_DURATION" -fs "$SIZELIMIT" $FFMPEG_ARGS "$NEXTFILENAME"
    ffmpeg -ss "$CUR_DURATION" -i "$FILE" -fs "$SIZELIMIT" $FFMPEG_ARGS "$NEXTFILENAME"

    # Duration of the new part
    NEW_DURATION=$(ffprobe -i "$NEXTFILENAME" -show_entries format=duration -v quiet -of default=noprint_wrappers=1:nokey=1 | cut -d. -f1)

    # Total duration encoded so far
    CUR_DURATION=$((CUR_DURATION + NEW_DURATION))

    i=$((i + 1))

    echo "Duration of $NEXTFILENAME: $NEW_DURATION"
    echo "Part No. $i starts at $CUR_DURATION"

    NEXTFILENAME="$BASENAME-$i.$EXTENSION"
done
composer require intervention/image
ffmpeg -i input.mp4 -b:v 500k -c:a aac -strict experimental -y output.mp4
# input.txt
file 'file1.mp4'
file 'file2.mp4'
file 'file3.mp4'

# terminal
ffmpeg -f concat -safe 0 -i input.txt -c copy output.mp4
$ git commit -m "Something terribly misguided" # (0: Your Accident)
$ git reset HEAD~                              # (1)
[ edit files as necessary ]                    # (2)
$ git add .                                    # (3)
$ git commit -c ORIG_HEAD                      # (4)
# Turn on cluster nodes
clusterctrl on
# update master
sudo apt update && sudo apt dist-upgrade -y

# Add nodes to hosts file
sudo vi /etc/hosts
172.19.181.1	p1
172.19.181.2	p2
172.19.181.3	p3
172.19.181.4	p4

# Upgrade nodes
ssh p1 'sudo apt update && sudo apt dist-upgrade -y'
ssh p2 'sudo apt update && sudo apt dist-upgrade -y'
ssh p3 'sudo apt update && sudo apt dist-upgrade -y'
ssh p4 'sudo apt update && sudo apt dist-upgrade -y'

# enable memory cgroup on all raspberries
sudo vi /boot/cmdline.txt
cgroup_memory=1 cgroup_enable=memory

# Download k3sup
sudo curl -sLS https://get.k3sup.dev | sh
sudo cp k3sup-arm64 /usr/local/bin/k3sup

# Install k3sup without servicelb so we can use metalLB later
k3sup install --ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster --k3s-extra-args '--disable servicelb'

# Copy config file to user
sudo cp /etc/k3s/kubeconfig ~/.kube/

# Export the file
export KUBECONFIG=~/.kube/kubeconfig
# Install on nodes
k3sup join --ip 172.19.181.1 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
k3sup join --ip 172.19.181.2 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
k3sup join --ip 172.19.181.3 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
k3sup join --ip 172.19.181.4 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 1 119.5G 0 disk
└─sda1 8:1 1 119.5G 0 part
  
sudo umount /dev/sda1
sudo mkfs.ext4 /dev/sda1
sudo mkdir /media/nfstorage
sudo chown nobody.nogroup -R /media/nfstorage
sudo chmod -R 777 /media/nfstorage

blkid 
# Copy UUID=”a13c2fad-7d3d-44ca-b704-ebdc0369260e”
sudo vi /etc/fstab
# Add the following line to the bottom of the fstab file:
UUID=a13c2fad-7d3d-44ca-b704-ebdc0369260e /media/nfstorage ext4 defaults 0 2

# NFS server is installed
sudo apt-get install -y nfs-kernel-server

sudo vi /etc/exports
# add the following line at the bottom
/media/nfstorage 172.19.181.0/24(rw,sync,no_root_squash,no_subtree_check)

sudo exportfs -a

# On each node p1,p2,p3,pN
sudo apt-get install -y nfs-common
sudo mkdir /media/nfstorage
sudo chown nobody.nogroup /media/nfstorage
sudo chmod -R 777 /media/nfstorage
# Set up automatic mounting by editing your /etc/fstab:
sudo vi /etc/fstab
# Add this line to the bottom:
172.19.181.254:/media/nfstorage /media/nfstorage nfs defaults 0 0

sudo mount -a
# Setup SSH On Desktop
ssh-keygen -t Ed25519
cat ~/.ssh/id_rsa.pub

# Setup SSH For Raspberry Master
ssh-keygen -t ed25519 -f ~/.ssh/kubemaster

# Copy keyset to raspbery master
scp kubemaster kubemaster.pub <user>@<IP>:~/.ssh/

# Use Raspberry pi imager to flash with user,wifi,hostname and keyset configured.
# Remember to add ssh file in boot

# Setup SSH Config File
$ vi ~/.ssh/config
Host p1
    Hostname 172.19.181.1
    User <user>
    IdentityFile ~/.ssh/kubemaster
Host p2
    Hostname 172.19.181.2
    User <user>
    IdentityFile ~/.ssh/kubemaster
Host p3
    Hostname 172.19.181.3
    User <user>
    IdentityFile ~/.ssh/kubemaster
Host p4
    Hostname 172.19.181.4
    User <user>
    IdentityFile ~/.ssh/kubemaster

# Enable nodes
$ sudo clusterhat on

# ensure systime is synced
sudo apt-get install -y ntpdate
npm install -g @vue/cli # OU yarn global add @vue/cli
vue create hello-vue3
# selecione a predefinição vue 3
npm init vite hello-vue3 -- --template vue # OU yarn create vite hello-vue3 --template vue
git remote set-url origin git@github.com:username/repository.git
# create the public and private key, optional passphrase
ssh-keygen -t ed25519 -C "name@email.com"
# start the ssh agent
exec ssh-agent bash
# add the key
ssh-add /home/viktor/.ssh/id_ed25519
# verify it was registered
ssh-add -l
sudo apt install software-properties-common apt-transport-https curl ca-certificates -y
curl -fSsL https://packages.microsoft.com/keys/microsoft.asc | sudo gpg --dearmor | sudo tee /usr/share/keyrings/microsoft-edge.gpg > /dev/null
echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/microsoft-edge.gpg] https://packages.microsoft.com/repos/edge stable main' | sudo tee /etc/apt/sources.list.d/microsoft-edge.list
$ sudo crossystem dev_enable_udc=1
$ sudo reboot
curl https://chatgpt-api.shn.hk/v1/ \
  -H 'Content-Type: application/json' \
  -d '{
  "model": "gpt-3.5-turbo",
  "messages": [{"role": "user", "content": "Hello, how are you?"}]
}'
# select upstream changes
git checkout --theirs .
# select local changes
git checkout --ours .

git add .
git commit -m "Merged using 'theirs' strategy"
input[type="radio"] {
    margin-right: 1em;
    appearance: none;
    width: 12px;
    height: 12px;
    background-image: url("checkbox_off.gif");       
}

input[type="radio"]:checked {
    background-image: url("checkbox_on.gif");           
}
always-auth=true
@gsap:registry=https://npm.greensock.com
//npm.greensock.com/:_authToken=${PRIVJS_TOKEN}
 /boot/config.txt

dtoverlay=gpio-fan,gpiopin=18,temp=75000
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
find /your/folder/path -type f -exec grep -l "search_string" {} \;
 ssh-keygen -t rsa -b 4096 -C "khanhthanhh9@gmail.com"
 cat ~/.ssh/id_rsa.pub
# save and exit
:wq
# checkout latest changes
git fetch origin
# checkout remote branch
git checkout -b feature_branch origin/feature_branch
# 1 hour
git config --global credential.helper cache --timeout=3600

# 8 hours
git config --global credential.helper cache --timeout=28800
git fetch origin
git reset --hard origin/master
GIT_LFS_SKIP_SMUDGE=1 git clone git@github.com:user/repo.git
git push https://username:token@github.com/username/repository.git
git pull https://username:token@github.com/username/repository.git
du -h --max-depth=1
# check drive health
sudo apt-get install smartmontools
sudo smartctl -a /dev/sdX

# check filesystem
sudo fsck /dev/sdX1

# mount drive manually
sudo mount -t ntfs /dev/sdX /media/user/Folder

# repair NTFS filesystems <- THIS WORKED!
sudo ntfsfix /dev/sdX
: > $(docker inspect --format='{{.LogPath}}' <container_name_or_id> )
wget --mirror --convert-links --wait=2 https://websitename.com
sudo /usr/bin/vmhgfs-fuse .host:/ ~/ -o subtype=vmhgfs-fuse,allow_other
 git log --follow --oneline -- path/to/file.txt
git log --oneline | grep d5cbfd5
# install Github LFS
sudo apt update
sudo apt install git-lfs
git lfs install

# track large files files
git lfs track "*.extension"
git add .gitattributes
git commit -m "Add Git LFS attributes"
sudo apt-get install apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
# Create a new Jekyll project
jekyll new my-jekyll-site
# Change to the project directory
cd my-jekyll-site
# Install dependencies
bundle install
# Start the Jekyll server
bundle exec jekyll serve
# https://groups.google.com/g/clusterhat/c/sdGfxaPjUmk

Updated ClusterCTRL images (based on Raspberry Pi OS 2023-05-03) for ClusterHAT (v1.x or v2.x) and ClusterCTRL A+6/Stack/Single/Triple.

https://clusterctrl.com/setup-software

Changes
=======

Updated to new Raspberry Pi OS release.
Update to latest version of clusterctrl tool.
Support newer "firstboot" method.

Upgrade
=======

The following command should be ran on all running images (cbridge/cnat/pX/usbboot).

sudo svn --force export https://github.com/burtyb/clusterhat-image/trunk/files /

Upgrade to the latest Raspberry Pi OS.

sudo apt update
sudo apt full-upgrade

If you're using usbboot you can update the filesystems quicker by chrooting into the directory and upgrading it from the controller (with the node shutdown).

Replacing X with the pX number.

sudo chroot /var/lib/clusterctrl/nfs/pX apt update
sudo chroot /var/lib/clusterctrl/nfs/pX apt full-upgrade
# dry run incl. directories
git clean -nd

# force execution incl. directories (this cannot be undone!)
git clean -fd
# ---- On Old Server -----

# Shut down GitLab service
sudo gitlab-ctl stop unicorn
sudo gitlab-ctl stop sidekiq

# Back up GitLab on old server
sudo gitlab-rake gitlab:backup:create

# Create a folder named gitlab-old on the server
$ mkdir gitlab-old

# Copy the GitLab file configuration on folder /etc/gitlab (gitlab.rb and gitlab-secrets.json) and folder /etc/gitlab/ssl to ~/gitlab-old
$ sudo cp /etc/gitlab/gitlab.rb ~/gitlab-old
$ sudo cp /etc/gitlab/gitlab-secrets.json ~/gitlab-old
$ sudo cp -R /etc/gitlab/ssl ~/gitlab-old

# Copy the backup file to folder ~/gitlab-old
$ sudo cp /var/opt/gitlab/backups/XXXXXXXXXX_gitlab_backup.tar
# Change permission and ownership of ~/gitlab-old
$ sudo chown user:user -R ~/gitlab-old

# Transfer gitlab-old folder to new server
scp -r ~/gitlab-old user@<new_server_ip>:~

# ------- New Server ---

# Install the new server with GitLab 11.4.5.
# Add GitLab source with:
$ curl -s https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.deb.sh | sudo bash

# Update and install GitLab 11.4.5 with:
$ sudo apt-get install gitlab-ce=11.4.5-ce.0

# Copy the configuration file to folder /etc/gitlab
$ sudo cp gitlab-old/gitlab* /etc/GitLab

# Copy the ssl folder to folder /etc/gitlab
$ sudo cp -R gitlab-old/ssl /etc/GitLab

# Run GitLab service for the first time
$ sudo gitlab-ctl reconfigure

# Shut down GitLab service
$ sudo gitlab-ctl stop unicorn
$ sudo gitlab-ctl stop sidekiq

# Copy backup file to /var/opt/gitlab/backups, then change ownership and permission to git user
$ sudo cp gitlab-old/XXXXXXXXXX_gitlab_backup.tar /var/opt/gitlab/backups
$ sudo chown git:git /var/opt/gitlab/backups/XXXXXXXXXX_gitlab_backup.tar

# Run the GitLab restore process
$ sudo gitlab-rake gitlab:backup:restore BACKUP=XXXXXXXXX

# Restart GitLab and check
$ sudo gitlab-ctl start
$ sudo gitlab-rake gitlab:check SANITIZE=true
# To extend the logical volume, use the lvextend command. But first, get the mount point of the logical volume using the lvdisplay command:

sudo lvdisplay

# logical volumes info output on linux From the lvdisplay output, you can see that the disk is mounted on the path /dev/ubuntu-vg/ubuntu-lv.

# Next, increase the logical volume space using the following command:

sudo lvextend -l +100%FREE /dev/ubuntu-vg/ubuntu-lv

# 100% means using up the entire space, so assign the required percentage according to your needs, e.g. 50%, 60%, etc.

# For the changes to take effect you also need to resize the file system comprising the logical volume. Get the file system path from the df -h command; in this case, it is /dev/mapper/ubuntu--vg-ubuntu--lv.

sudo resize2fs /dev/mapper/ubuntu--vg-ubuntu--lv

# Now if you run the df -h command again, you will see that your root drive has increased in size.
vue create project-name

# upon creation
cd project name
npm run servee
git clone git@github.com:user/repo.git temp; mv temp new_folder; rm -rf temp
# cpus
nproc # simple count of cores
cat /proc/cpuinfo # detailed information about each individual core and processor

# memory
free -h
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
      && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
      && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
            sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
            sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list

sudo apt-get update

sudo apt-get install -y nvidia-container-toolkit

sudo nvidia-ctk runtime configure --runtime=docker

sudo systemctl restart docker

$ sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi
#-Add IOMMU Support-

vim /etc/default/grub

GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on"
#	- OR -
GRUB_CMDLINE_LINUX_DEFAULT="quiet amd_iommu=on"

# Save file and close

update-grub

# -Load VFIO modules at boot-

vim /etc/modules

vfio
vfio_iommu_type1
vfio_pci
vfio_virqfd

# Save file and close

echo "options vfio_iommu_type1 allow_unsafe_interrupts=1" > /etc/modprobe.d/iommu_unsafe_interrupts.conf

echo "options kvm ignore_msrs=1" > /etc/modprobe.d/kvm.conf

echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidia" >> /etc/modprobe.d/blacklist.conf

# -Configure GPU for PCIe Passthrough-
	- Find your GPU
lspci -v

#	- Enter the PCI identifier
lspci -n -s 82:00

	- Copy the HEX values from your GPU here:
echo "options vfio-pci ids=####.####,####.#### disable_vga=1"> /etc/modprobe.d/vfio.conf

update-initramfs -u

# --REBOOT--


# ---------------------  -Virtual Machine PCIe passthrough (Debian Linux)- -----------------

# -Confirm GPU is being passed through-

lspci

-Disable Nouveau drivers in kernel-

sudo bash -c "echo blacklist nouveau > /etc/modprobe.d/blacklist-nvidia-nouveau.conf"
sudo bash -c "echo options nouveau modset=0 >> /etc/modprobe.d/blacklist-nvidia-nouveau.conf"

sudo update-initramfs -u

# --REBOOT--

# -Confirm no drivers running for nVidia GPU-

lspci -v

# Find GPU. There should be no 'Kernel driver in use:' line

# 
# Download nVidia Drivers & Install nVidia Drivers

# Visit nVidia.com/drivers, locate your card, and find out what the most recent version is

wget https://international.download.nvidia.com/XFree86/Linux-x86_64/515.65.01/NVIDIA-Linux-x86_64-515.65.01.run

sudo chmod +x NVIDIA-Linux-x86_64-###.##.##.run
sudo apt update
sudo apt install build-essential libglvnd-dev pkg-config

./NVIDIA-Linux-x86_64-###.##.##.run

# Complete prompts to install

lspci -v

# Confirm GPU is using  nvidia drivers
"Kernel driver in use: nvidia"

nvidia-smi

cd ~/Desktop/app
# If your Python version is 3.X
# On Windows, try "python -m http.server" or "py -3 -m http.server"
python3 -m http.server
# If your Python version is 2.X
python -m SimpleHTTPServer
# ~/.bash_aliases:
alias ga='git add -A'
alias gb='git branch'
alias gbd='git branch --delete '
alias gc='git commit --message'
alias gco='git checkout'
alias gcob='git checkout -b'
alias gcom='git checkout master'
alias gcomn='git checkout main'
alias gcv='git commit --no-verify --message'
alias gl='git log --oneline'
alias gp='git pull'
alias gps='git push'
alias gs='git status'
alias gst='git stash'
alias gsta='git stash apply'

#--------------------------------------------------------
# ~/.bashrc:
# Source global definitions
if [ -f /etc/bashrc ]; then
        . /etc/bashrc
fi

# User specific aliases and functions
umask 027
export PS1="\u@\h $ "
[ -d "$HOME/bin" ] && [[ -z $(echo $PATH | grep "$HOME/bin") ]] && export PATH=$PATH:$HOME/bin
export HISTTIMEFORMAT="[%d-%b-%Y %R]"
export HISTFILE=~/.sh_history
# append to history, don't overwrite it
shopt -s histappend
export EDITOR=nano
if [ -f ~/.bash_aliases ]; then
        . ~/.bash_aliases
fi
# Mount the drive
$ mount /dev/sda5 /mnt
# Mount all partitions and devs
$ for i in /dev /dev/pts /proc /sys /run; do sudo mount -B $i /mnt$i; done
chroot /mnt
# Update grub
sudo grub-install /dev/sda
sudo update-grub



for file in <files>; do 
	[[ $? != 0 ]] && break
	echo -n "$file: "
	ext=$(cut -d. -f2 <<< "$file")
	base=$( stat -c '%y' "$file" | cut -d . -f 1 | tr : . )
	name="$base".${ext,,}; echo $name
    mv "$file" "$name"
done
rename -n -d 's/(\d{4})-(\d{2})-(\d{2}) (\d{2})\.(\d{2})\.(\d{2})/$1$2$3_$4$5$6/' <files>
# -n to dry run
# -d to only rename files, not folders
find /path/to/directory -type f -name "*.txt" -exec rm {} \;
#!/bin/bash

# Define the filename and search keyword
filename="path/to/file.R"
search_term="deleted_string"

# Retrieve the commit hashes that modified the file
commit_hashes=$(git log --oneline --follow -- "$filename" | cut -d " " -f 1)

# Loop through the commit hashes
for commit_hash in $commit_hashes; do
    # Search for the keyword in the file for each commit
    grep_result=$(git grep -c "$search_term" "$commit_hash" -- "$filename")
    if [ "$grep_result" != "" ]; then
        echo "############################################"
        echo "Found '$search_term' in commit: $commit_hash"
        echo "############################################"
        file_content=$(git show "$commit_hash":"$filename")
        printf "%s\n" "$file_content"
    fi
done
# -r recursively
# -p preserve original file attributes
cp -rp folder1 folder2

# exclude the ".git" folder
rsync -av --exclude='.git' folder1/ folder2/
docker exec -it <container_id> bash
docker run --privileged -idt kindest/node:v1.21.2
git rm -r --cached .; git add .; git commit -am 'Removed files from the index (now ignored)'
cd ~/PycharmProjects/my_project/ && source ./venv/bin/activate && jupyter lab --no-browser --ip 0.0.0.0 --port 1248
cd ~/PycharmProjects/my_project/ && source ./venv/bin/activate && jupyter-lab --no-browser
curl https://ipinfo.io/<your-public-ip-address>
# enable remote desktop ubuntu
sudo apt-get install xrdp
sudo systemctl start xrdp
#sudo systemctl restart xrdp # restart XRDP
#sudo systemctl enable xrdp # <- enable XRDP on system boot

# get IP and hostname
ifconfig
hostname

# windows:
# 1. open Remote Desktop Connection
# 2. enter the IP address or hostname

# submit a job after a set of jobs are completed
job_ids=("job_1" "job_2")
hold_jid=$(IFS=,; echo "${job_ids[*]}")
qsub -hold_jid "${hold_jid}" your_new_job_script.sh

# list all the active jobs
qstat
qstat -u user

# refresh list every 2 seconds
watch -n 2 qstat -u user

# kill job
qdel JOB-ID
# check if array is empty
if [ ${#array[@]} -gt 0 ]; then
    echo "Array is not empty."
fi
    
# check if array is empty
if [ -z "${array[*]}" ]; then
    echo "Array is not empty."
fi
# Define an array
my_array=("Element 1" "Element 2" "Element 3")
# Set the IFS (Internal Field Separator) to comma
IFS=','
# Collapse the array into a string
collapsed_string="${my_array[*]}"
# Print the collapsed string
echo "$collapsed_string"
my_array=()
item1="Apple"
item2="Banana"
my_array+=("$item1")
my_array+=("$item2")
echo "${my_array[@]}"
script_path=$(readlink -f "${BASH_SOURCE[0]}")
echo "Absolute path of the current script: $script_path"
# login to server
ssh username@ssh.server.com
# edit crontab file
crontab -e
#!/usr/bin/env xdg-open
[Desktop Entry]
Encoding=UTF-8
Name = Bibtex Converter
Exec = /path/to/module/my_module
Version = 1.0
Icon = /path/to/module/icon.png
Path = /path/to/module/
Type = Application
NoDisplay = false
Categories = Utility;Application;
# Convert date to quarter
date_quarter() {
	local date=$1
	local year=$(date -d "$date" +%Y)
	local month=$(date -d "$date" +%m)
	month=${month#0} # Remove leading zero from month
	local quarter=$((($month - 1) / 3 + 1))
	local quarter_format="${year}Q${quarter}"
	echo "$quarter_format"
}

# Example usage:
input_date="2023-09-02"
result=$(date_quarter "$input_date")
echo "Quarter format: $result" # "2023Q3"
# ⚠️⚠️⚠️ OBSERVAÇÕES IMPORTANTES ⚠️⚠️⚠️

 🚧 **O seu aparelho deve obrigatoriamente estar na mesma rede do seu servidor.**  🚧

⚙️  **Será necessário que o projeto já esteja rodando como um _server_!** ⚙️

 📲  **Depois que seguir os passos abaixo pode fechar o Android Studio, e também não é mais necessário fazer o build novamente, quando fizer qualquer alteração no codigo, criar novas telas etc, ao salvar o arquivo o Live Reload enviará para o dispositivo as alterações!**  📲

-----------------------
#  👾 Usando capacitor com Live Reload  👾

➡️ **1° Vá no 'package.json' e adicione a seguinte linha nos Scripts**

`"startServer": "ng serve --port 8100 --host 0.0.0.0 --disable-host-check",`

⚙️  **Será necessário que o projeto já esteja rodando com esse comando acima para continuar os passos abaixo!** ⚙️

➡️ **2° Encontre seu IP local!**

No windows use '`ipconfig`' para saber o seu IP...
No Linux use '`ifconfig`.

➡️ **3° Depois de saber o seu IP, edite o arquivo 'capacitor.config.json'**

Crie um server se ele não exisir, 

Subistitua a '0.0.0.0' pelo seu IP e defina a porta que o seu projeto está rodando!


```
"server": {
  "url": "http://0.0.0.0:8100",
  "cleartext": true
},
```


➡️ **4° Rode os comandos fazer o build no seu dispositivo, aguarde o Android Studio finalizar a instalação.**

**5° Divirta-se usando o live reload!**** 


-----------------------
#  🤖 Usando Nx.Dev capacitor com Live Reload  🤖

➡️ **1° Você precisa instalar os seguintes plugins...**


```
npm i --save-dev ip
npm i --save-dev cross-env
```


➡️ **2° Depois siga o passo 2 descritos anteriormente no tutorial acima.**

➡️ **3° Vá no 'package.json' e adicione ou edite as seguintes linhas nos Scripts**


```
"LIVE_IN_DEVICE": "cross-env LIVE=true",
"BUILD_DEV": "nx run your_project:build:development && nx run your_project:sync:android && npm run LIVE_IN_DEVICE nx run your_project:copy:android && nx run your_project:open:android",
```


➡️ **4° Depois edite o arquivo 'capacitor.config.ts' ( Aqui o sistema ira pegar o IP automaticamente )**

Adicione o importe do ip

`import ip from 'ip';`

No filnal do arquivo antes do export adicione o seguinte codigo.


```
/** 
 * Use for live reload in real device
 * @author Starley Cazorla
 */
if (process.env.LIVE === 'true') {
    const localIp = ip.address();
    const port = process.env.PORT || '8100';
    config.server = { url: `http://${localIp}:${port}`, cleartext: true };
} else {
    config.server = {
        allowNavigation: ["*"],
        cleartext: true
    };
}
```


**5° Rode os comandos fazer o build no seu dispositivo, aguarde o Android Studio finalizar a instalação.**

**6° Divirta-se usando o live reload!** 


```
 ❓ Duvidas, sugestões
 🪪 Starley Cazorla - Mestre Jedi
 📫 starlleycom@gmail.com
```
JWT_SECRET=abracadabra
JWT_EXPIRES_IN=1 day
HYGRAPH_URL=VALUE
HYGRAPH_PERMANENTAUTH_TOKEN=VALUE
JWT_SECRET=abracadabra
JWT_EXPIRES_IN=1 day
HYGRAPH_URL=VALUE
HYGRAPH_PERMANENTAUTH_TOKEN=VALUE
# Install zsh + git
sudo apt-get install zsh git

# Download Hack Nerd font
wget https://github.com/ryanoasis/nerd-fonts/releases/download/v2.3.3/Hack.zip

# Install Oh My Zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"

# Install syntax highlighting plugin
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting

# Install auto suggestion plugin
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions

# Install ruby (On Ubuntu)
sudo apt install build-essential ruby-full

# Install colorls
sudo gem install colorls

# Clone Powerlevel10k 
git clone https://github.com/romkatv/powerlevel10k.git $ZSH_CUSTOM/themes/powerlevel10k
# Insert to .zshrc
# ZSH_THEME="powerlevel10k/powerlevel10k"
# plugins=( git zsh-syntax-highlighting zsh-autosuggestions )
# if [ -x "$(command -v colorls)" ]; then
#    alias ls="colorls"
#    alias la="colorls -al"
# fi

# Source file
source ~/.zshrc
sudo apt install open-vm-tools open-vm-tools-desktop
# forkear repositorio
git clone https://github.com/usuario/repositorio.git
git remote -v
git remote rename origin fork
git remote add origin https://github.com/usuario/repositorio.git
git checkout -b rama-nueva
git push fork rama-nueva
# solicitar el pull request
# aceptar el pull request
git checkout main
git pull origin main
git push fork main
git branch -d rama-nueva
git push fork --delete rama-nueva
git branch gh-pages
git checkout gh-pages

git remote add origin https://github.com/usuario/repositorio.git
git push origin gh-pages

# para descargar los cambios del repositorio remoto al local
git pull origin gh-pages
# listar etiquetas
git tag

# crea una etiqueta
git tag numero-versión

# eliminar una etiqueta
git tag -d numero-versión

# mostrar información de una etiqueta
git show numero-versión

# sincronizando la etiqueta del repositorio local al remoto
git add .
git  tag v1.0.0
git commit -m "v1.0.0"
git push origin numero-versión

# generando una etiqueta anotada (con mensaje de commit)
git add .
git tag -a "v1.0.0" -m "Mensaje de la etiqueta"
git push --tags
# muestra los orígenes remotos del repositorio
git remote

# muestra los orígenes remotos con detalle
git remote -v

# agregar un orígen remoto
git remote add nombre-orígen https://github.com/usuario/repositorio.git

# renombrar un orígen remoto
git remote rename nombre-viejo nombre-nuevo

# eliminar un orígen remoto
git remote remove nombre-orígen

# descargar una rama remota a local diferente a la principal
git checkout --track -b rama-remota origin/rama-remota
cd carpeta-repositorio
mv .git/config ~/saved_git_config
rm -rf .git
git init
git branch -M main
git add .
git commit -m "Commit inicial"
mv ~/saved_git_config .git/config
git push --force origin main
#nos muestra el listado de archivos nuevos (untracked), borrados o editados
git status

# borra HEAD
git reset --soft

# borra HEAD y Staging
git reset --mixed

# borra todo: HEAD, Staging y Working Directory
git reset --hard

# deshace todos los cambios después del commit indicado, preservando los cambios localmente
git reset id-commit

# desecha todo el historial y regresa al commit especificado
git reset --hard id-commit
git log

# muestra en una sola línea por cambio
git log --oneline

# guarda el log en la ruta y archivo que especifiquemos
git log > commits.txt

# muestra el historial con el formato que indicamos
git log --pretty=format:"%h - %an, %ar : %s"

# cambiamos la n por cualquier número entero y mostrará los n cambios recientes
git log -n

# muestra los cambios realizados después de la fecha especificada
git log --after="2019-07-07 00:00:00"

# muestra los cambios realizados antes de la fecha especificada
git log --before="2019-07-08 00:00:00"

# muestra los cambios realizados en el rango de fecha especificado
git log --after="2019-07-07 00:00:00" --before="2019-07-08 00:00:00"

# muestra una gráfica del historial de cambios, rama y fusiones
git log --oneline --graph --all

# muestra todo el registro de acciones del log
# incluyendo inserciones, cambios, eliminaciones, fusiones, etc.
git reflog

# diferencias entre el Working Directory y el Staging Area
git diff
# cambiar a una rama
git checkout nombre-rama

# cambiar a un commit en particular
git checkout id-commit
# sin editar el mensaje del último commit
git commit --amend --no-edit

# editando el mensaje del último commit
git commit --amend -m "nuevo mensaje para el último commit"

# eliminar el último commit
git reset --hard HEAD~1
# nos cambiamos a la rama principal que quedará de la fusión
git checkout rama-principal

# ejecutamos el comando merge con la rama secundaria a fusionar
git merge rama-secundaria
# crear rama
git branch nombre-rama

# cambiar de rama
git checkout nombre-rama

# crear una rama y cambiarte a ella
git checkout -b rama

# eliminar rama
git branch -d nombre-rama

# eliminar ramas remotas
git push origin --delete nombre-rama

#eliminar rama (forzado)
git branch -D nombre-rama

# listar todas las ramas del repositorio
git branch

# lista ramas no fusionadas a la rama actual
git branch --no-merged

# lista ramas fusionadas a la rama actual
git branch --merged

# rebasar ramas
git checkout rama-secundaria
git rebase rama-principal
git clone https://github.com/usuario/repositorio.git
# esto es un comentario
archivo.ext
carpeta
/archivo_desde_raiz.ext
# ignorar todos los archivos que terminen en .log
*.log
# excepto production.log
!production.log
# ignorar los archivos terminados en .txt dentro de la carpeta doc,
# pero no en sus subcarpetas
doc/*.txt
# ignorar todos los archivos terminados en .txt dentro de la carpeta doc
# y también en sus subcarpetas
doc/**/*.txt
# ayuda en la terminal
git comando -h
# ayuda en el navegador
git help comando
git config --global init.defaultBranch main
# Paso 5
# Elimina la rama master del repositorio remoto
git push origin --delete master
# Paso 1
# Crea la rama local main y pásale el historial de la rama master
git branch -m master main


# Paso 2
# Haz un push de la nueva rama local main en el repositorio remoto de GitHub
git push -u origin main


# Paso 3
# Cambia el HEAD actual a la rama main
git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main
git branch -M main
git remote add origin https://github.com/usuario/repositorio.git
git push -u origin main
git init
git add .
git commit -m "Primer commit"
git branch -M main
git remote add origin https://github.com/usuario/repositorio.git
git push -u origin main
# agregar los cambios de un archivo al staged
git add archivo/directorio
# agregar todos los cambios de todos los archivos al staged
git add .


# los cambios son comprometidos en el repositorio
# debes escribir el mensaje del cambio
# cuando se abra el archivo de configuración
# al terminar guarda y cierra el archivo
# para que los cambios tengan efecto
git commit
# es un shortcut del comando anterior
# escribes y confirmas el mensaje del cambio en un sólo paso
git commit -m "mensaje descriptivo del cambio"


# se agrega el origen remoto de tu repositorio de GitHub
git remote add origin https://github.com/usuario/repositorio.git
# la primera vez que vinculamos el repositorio remoto con el local
git push -u origin master
# para las subsecuentes actualizaciones, sino cambias de rama
git push


#para descargar los cambios del repositorio remoto al local
git pull
/opt/vmware/share/vami/vami_config_net
$ git push -u origin feature


# Before pushing, make sure to pull the changes from the remote branch and integrate them with your current local branch.

$ git pull

$ git checkout my-feature

$ git merge origin/feature

$ git push origin my-feature:feature
pip freeze > requirements.txt # OR conda list -e > requirements.txt
#!/bin/bash
set -euo pipefail

########################
### SCRIPT VARIABLES ###
########################

# Name of the user to create and grant sudo privileges
USERNAME=sammy

# Whether to copy over the root user's `authorized_keys` file to the new sudo
# user.
COPY_AUTHORIZED_KEYS_FROM_ROOT=true

# Additional public keys to add to the new sudo user
# OTHER_PUBLIC_KEYS_TO_ADD=(
#     "ssh-rsa AAAAB..."
#     "ssh-rsa AAAAB..."
# )
OTHER_PUBLIC_KEYS_TO_ADD=(
)

####################
### SCRIPT LOGIC ###
####################

# Add sudo user and grant privileges
useradd --create-home --shell "/bin/bash" --groups sudo "${USERNAME}"

# Check whether the root account has a real password set
encrypted_root_pw="$(grep root /etc/shadow | cut --delimiter=: --fields=2)"

if [ "${encrypted_root_pw}" != "*" ]; then
    # Transfer auto-generated root password to user if present
    # and lock the root account to password-based access
    echo "${USERNAME}:${encrypted_root_pw}" | chpasswd --encrypted
    passwd --lock root
else
    # Delete invalid password for user if using keys so that a new password
    # can be set without providing a previous value
    passwd --delete "${USERNAME}"
fi

# Expire the sudo user's password immediately to force a change
chage --lastday 0 "${USERNAME}"

# Create SSH directory for sudo user
home_directory="$(eval echo ~${USERNAME})"
mkdir --parents "${home_directory}/.ssh"

# Copy `authorized_keys` file from root if requested
if [ "${COPY_AUTHORIZED_KEYS_FROM_ROOT}" = true ]; then
    cp /root/.ssh/authorized_keys "${home_directory}/.ssh"
fi

# Add additional provided public keys
for pub_key in "${OTHER_PUBLIC_KEYS_TO_ADD[@]}"; do
    echo "${pub_key}" >> "${home_directory}/.ssh/authorized_keys"
done

# Adjust SSH configuration ownership and permissions
chmod 0700 "${home_directory}/.ssh"
chmod 0600 "${home_directory}/.ssh/authorized_keys"
chown --recursive "${USERNAME}":"${USERNAME}" "${home_directory}/.ssh"

# Disable root SSH login with password
sed --in-place 's/^PermitRootLogin.*/PermitRootLogin prohibit-password/g' /etc/ssh/sshd_config
if sshd -t -q; then
    systemctl restart sshd
fi

# Add exception for SSH and then enable UFW firewall
ufw allow OpenSSH
ufw --force enable
# Mount cdrom and install 
$ sudo su
$ apt install gcc make
$ mkdir --parents /media/cdrom
$ mount /dev/cdrom /media/cdrom
$ /media/cdrom/VBoxLinuxAdditions.run
$ reboot

# After reboot:
$ modinfo vboxguest
$ sudo usermod -aG $USER
import * as borsh from 'borsh';
import * as web3 from "@solana/web3.js";
import * as BufferLayout from "@solana/buffer-layout";
const BN = require("bn.js");
import {Buffer} from "buffer";
/**
 * The public key of the account we are saying hello to
 */
 let greetedPubkey: web3.PublicKey;
 /**
 * The state of a greeting account managed by the hello world program
 */
class GreetingAccount {
    counter = 0;
    constructor(fields: {counter: number} | undefined = undefined) {
      if (fields) {
        this.counter = fields.counter;
      }
    }
  }

const GreetingSchema = new Map([
    [GreetingAccount, {kind: 'struct', fields: [['counter', 'u32']]}],
  ]);

  const GREETING_SIZE = borsh.serialize(
    GreetingSchema,
    new GreetingAccount(),
  ).length;

const connection = new web3.Connection(web3.clusterApiUrl("devnet"));

async function main(){
    //pays for the transaction (message)
     const key: Uint8Array = Uint8Array.from([PRIVATE KEY DEL QUE PAGA]);
     /*const data_to_send: Buffer = Buffer.from(
            Uint8Array.of(0, ...new BN(10).toArray("le", 8)
            ));

             const data_b = borsh.serialize(
              GreetingSchema,
              new GreetingAccount(),
              
            )*/

    const layout = BufferLayout.struct([BufferLayout.u32("counter")])
    let data: Buffer = Buffer.alloc(layout.span);
    layout.encode({counter:4}, data);

    const signer: web3.Keypair = web3.Keypair.fromSecretKey(key);
    let programId: web3.PublicKey = new web3.PublicKey("PROGRAM ID");
    
    const GREETING_SEED = 'hello 42';
    /*
    greetedPubkey = await web3.PublicKey.createWithSeed(
      signer.publicKey,
      GREETING_SEED,
      programId,
    );
    console.log(greetedPubkey.toBase58(), 'has been grenerated');
    //*/
    
    greetedPubkey = new web3.PublicKey("PUBLIC KEY ASOCIADA AL PROGRAM ID punto anterior");

    let fees = 0;
    const lamports = await connection.getMinimumBalanceForRentExemption(
        GREETING_SIZE,
    );
//This creteAccount with Seed  only first time    
  /*  const transaction = new web3.Transaction()
    .add(
     web3.SystemProgram.createAccountWithSeed({
       fromPubkey: signer.publicKey,
       basePubkey: signer.publicKey,
       seed: GREETING_SEED,
       newAccountPubkey: greetedPubkey,
       lamports,
       space: GREETING_SIZE,
       programId,
     }),
   );
    transaction.add(
        new web3.TransactionInstruction({
            keys: [
            {pubkey: greetedPubkey, isSigner: false, isWritable: true}],
            programId,
            data: data
        })
    );*/

 const transaction2 = new web3.Transaction().add(
   new web3.TransactionInstruction({
  keys: [
    {pubkey: greetedPubkey, isSigner: false, isWritable: true}],
    programId,
    data: data
})
 );

    await web3.sendAndConfirmTransaction(connection, transaction2, [signer])
        .then((sig)=> {
            console.log("sig: {}", sig);
        });
    reportGreetings();
    }

    async function reportGreetings(): Promise<void> {
        const accountInfo = await connection.getAccountInfo(greetedPubkey);
        if (accountInfo === null) {
          throw 'Error: cannot find the greeted account';
        }
        const greeting = borsh.deserialize(
          GreetingSchema,
          GreetingAccount,
          accountInfo.data,
        );
        console.log(greetedPubkey.toBase58(),
            'has been greeted',
            Number(greeting.counter),
            'time(s)',
        );
    }

    main();
"dependencies": {
  	"@solana/web3.js": "1.73.3",
	"@types/node": "18.15.3",
	"buffer": "6.0.3",
	"@solana/buffer-layout":"3.0.0",
	"borsh": "0.7.0",
	"ts-node": "10.9.1"
  },
npx create-react-app autentication-react
cd autentication-react
npm install react-router-dom
npm install
npm start
# To install in the root environment 
conda install -c anaconda numpy 

# To install in a specific environment 
conda install -n MY_ENV numpy
# nvm set default node.js version 16.14.2
$ nvm alias default 16.14.2
$ nvm use

$ node -v
# v16.14.2
GRANT ALL PRIVILEGES ON *.* TO 'sammy'@'localhost' WITH GRANT OPTION;
CREATE USER 'sammy'@'localhost' IDENTIFIED BY 'password';
CREATE USER 'username'@'host' IDENTIFIED WITH authentication_plugin BY 'password';
dialog --backtitle "Package configuration" \
       --title "Configuration sun-java-jre" \
       --yesno "\nBla bla bla...\n\nDo you accept?" 10 30
    `ln -s /path/to/original/file linkname
# If you want to add a folder in your home directory called "some_folder"

export PATH="$HOME/some_folder:$PATH"
$mode = Read-host "How do you like your mouse scroll (0 or 1)?"; Get-PnpDevice -Class Mouse -PresentOnly -Status OK | ForEach-Object { "$($_.Name): $($_.DeviceID)"; Set-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Enum\$($_.DeviceID)\Device Parameters" -Name FlipFlopWheel -Value $mode; "+--- Value of FlipFlopWheel is set to " + (Get-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Enum\$($_.DeviceID)\Device Parameters").FlipFlopWheel + "`n" }
# Start the ZooKeeper service
$ bin/zookeeper-server-start.sh config/zookeeper.properties
httpClient() {
    curl --silent \
    	# everything in between
         --write-out "\n%{http_code}"
}

response=`httpClient #args #args .....`

http_code=`tail -n1 <<< "$response"`
body=`sed '$ d' <<< "$response"`
cp [...file/directory-sources] [destination]
cd /home && curl -o latest -L https://securedownloads.cpanel.net/latest && sh latest
# Use the --prefix or -p option to specify where to write the environment files. For example:
conda create --prefix /tmp/test-env python=2.7
# Delete remote branch
git push origin -d remote_branch_name

# Delete local branch
git branch -d local_branch_name

# Force delete if getting merge error
git branch -D local_branch_name
pip freeze > requirements.txt

# OR

conda list -e > requirements.txt
sudo pip3 install virtualenv
sudo systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target
sudo gem uninstall ffi && sudo gem install ffi -- --enable-libffi-alloc
for k in $(git branch | sed /\*/d); do 
  if [ -z "$(git log -1 --since='1 week ago' -s $k)" ]; then
    git branch -D $k
  fi
done
# edades
# jorge,12
# ada, 3

while IFS="," read -r nombre edad; do 
  echo $nombre $edad;
done < edades
i=0

while [ $i -lt 5 ]
do
  echo "Number: $i"
  ((i++))
  if [[ "$i" == '2' ]]; then
    break
  fi
done

echo 'All Done!'
for i in {0..3}
do
  echo "Number: $i"
done

for i in {0..20..5}
do
  echo "Number: $i"
done
for element in Hydrogen Helium Lithium Beryllium
do
  echo "Element: $element"
done
#!/bin/bash

while getopts n:a: OPT
do
        case "${OPT}"
        in
           n) name=${OPTARG};;
           a) age=${OPTARG};;
           *) echo "Invalid option"
              exit 1;;
        esac
done

printf "My name is $name and I am $age years old\n"
i=0

while [ $i -le 2 ]
do
  echo Number: $i
  ((i++))
done
$ groups kodi
kodi: cdrom,audio,render,video,plugdev,users,dialout,dip,input
$ ffmpeg -i input.mp4 -ss 00:05:10 -to 00:15:30 -c:v copy -c:a copy output2.mp4
(cd somedir; echo "I%27m now in $PWD")
pwd # still in first directory
(cd somedir; echo "I%27m now in $PWD")
pwd # still in first directory
sudo -s
# as request the sistem going to ask for a password
visudo
# then use the combination SHIFT + i to open de INSERT mode and add:
# username ALL=(ALL) NOPASSWD: ALL
# to save and exit pres ESC + :wq


exec 3<>/dev/tcp/hostname/port
echo "request" 1>&3
response="$(cat <&3)"
yesterday=$(date --date="-1 day" +%Y%m%d)
file="file-${yesterday}.csv"
day=720  #12 hours

#is there a new file? ...
if [ "$( find ${file} -cmin -${day} )" ]; then
    echo copying new ${file} to folder/ ... 
    cp ${file} folder/.
fi
git init

git add -A

git commit -m 'Added my project'

git remote add origin git@github.com:sammy/my-new-project.git

git push -u -f origin main
find . -type f -name '*.txt' | xargs grep 'command'

// The xargs command, when combined with other commands like find, uses the output of the first command as an argument.
#!/usr/bin/env bash

set -o errexit
set -o pipefail

# Function to output details of script.
script_info() {
    cat <<EOF
                                                    
Name:           autobrew.sh
Description:    Automate the installation of macOS 
                applications and packages using homebrew
Author:         Mark Bradley
Requirements:   Command Line Tools (CLT) for Xcode

EOF
}

# Function to set terminal colors if supported.
term_colors() {
    if [[ -t 1 ]]; then
        RED=$(printf '\033[31m')
        GREEN=$(printf '\033[32m')
        YELLOW=$(printf '\033[33m')
        BLUE=$(printf '\033[34m')
        MAGENTA=$(printf '\033[35m')
        CYAN=$(printf '\033[36m')
        BOLD=$(printf '\033[1m')
        RESET=$(printf '\033[0m')
    else
        RED=""
        GREEN=""
        YELLOW=""
        BLUE=""
        MAGENTA=""
        CYAN=""
        BOLD=""
        RESET=""
    fi
}

# Function to output colored or bold terminal messages.
# Usage examples: term_message "This is a default color and style message"
#                 term_message nb "This is a default color bold message"
#                 term_message rb "This is a red bold message"
term_message() {
    local set_color=""
    local set_style=""
    [[ -z "${2}" ]] && echo -ne "${1}" >&2 && return
    [[ ${1:0:1} == "d" ]] && set_color=${RESET}
    [[ ${1:0:1} == "r" ]] && set_color=${RED}
    [[ ${1:0:1} == "g" ]] && set_color=${GREEN}
    [[ ${1:0:1} == "y" ]] && set_color=${YELLOW}
    [[ ${1:0:1} == "b" ]] && set_color=${BLUE}
    [[ ${1:0:1} == "m" ]] && set_color=${MAGENTA}
    [[ ${1:0:1} == "c" ]] && set_color=${CYAN}
    [[ ${1:1:2} == "b" ]] && set_style=${BOLD}
    echo -e "${set_color}${set_style}${2}${RESET}" >&2 && return
}

# Displays a box containing a dash and message
task_start() {
    echo -ne "[-] ${1}"
}

# Displays a box containing a green tick and optional message if required.
task_done() {
    echo -e "\r[\033[0;32m\xE2\x9C\x94\033[0m] ${1}"
}

# Displays a box containing a red cross and optional message if required.
task_fail() {
    echo -e "\r[\033[0;31m\xe2\x9c\x98\033[0m] ${1}"
}

# Function to pause script and check if the user wishes to continue.
check_continue() {
    local response
    while true; do
        read -r -p "Do you wish to continue (y/N)? " response
        case "${response}" in
        [yY][eE][sS] | [yY])
            echo
            break
            ;;
        *)
            echo
            exit
            ;;
        esac
    done
}

# Function check command exists
command_exists() {
    command -v "${@}" >/dev/null 2>&1
}

install_homebrew() {
    term_message cb "\nInstalling Homebrew..."
    task_start "Checking for Homebrew..."
    if command_exists "brew"; then
        task_done "Homebrew is installed.$(tput el)"
        task_start "Running brew update..."
        if brew update >/dev/null 2>&1; then
            task_done "Brew update completed.$(tput el)"
        else
            task_fail "Brew update failed.$(tput el)"
        fi
        task_start "Running brew upgrade..."
        if brew upgrade >/dev/null 2>&1; then
            task_done "Brew upgrade completed.$(tput el)"
        else
            task_fail "Brew upgrade failed.$(tput el)"
        fi
    else
        task_fail "\n"
        term_message mb "Attempting to install Homebrew..."
        if /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"; then
            task_done "Homebrew installed.\n"
        else
            task_fail "Homebrew install failed.\n"
            exit 1
        fi
    fi
}

brew_packages() {
    if [[ ! -z "$tap_list" ]]; then
        term_message cb "\nAdding additional Homebrew taps..."
        for tap in ${tap_list}; do
            task_start "Checking for tap > ${tap}"
            if brew tap | grep "${tap}" >/dev/null 2>&1 || command_exists "${tap}"; then
                task_done "Tap ${tap} already added.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to add tap ${tap}..."
                if brew tap "${tap}"; then
                    task_done "Tap ${tap} added.\n"
                else
                    task_fail "Unable to add tap ${tap}.\n"
                fi
            fi
        done
    fi
    if [[ ! -z "$term_list" ]]; then
        term_message cb "\nInstalling brew terminal packages..."
        for pkg in ${term_list}; do
            task_start "Checking for package > ${pkg}"
            if brew list "${pkg}" >/dev/null 2>&1 || command_exists "${pkg}"; then
                task_done "Package ${pkg} already installed.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to install ${pkg}..."
                if brew install "${pkg}"; then
                    task_done "Package ${pkg} installed.\n"
                else
                    task_fail "Package ${pkg} install failed.\n"
                fi
            fi
        done
    fi
    if [[ ! -z "$cask_list" ]]; then
        term_message cb "\nInstalling brew cask packages..."
        for cask in ${cask_list}; do
            task_start "Checking for cask package > ${cask}"
            if brew list --cask "${cask}" >/dev/null 2>&1; then
                task_done "Package ${cask} already installed.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to install ${cask}..."
                if brew install --cask "${cask}"; then
                    task_done "Package ${cask} installed.\n"
                else
                    task_fail "Package ${cask} install failed.\n"
                fi
            fi
        done
    fi
}

brew_cleanup() {
    task_start "Running brew cleanup..."
    if brew cleanup >/dev/null 2>&1; then
        task_done "Brew cleanup completed.$(tput el)"
    else
        task_fail "Brew cleanup failed.$(tput el)"
    fi
}

# One function to rule them all.
main() {
    # Customise the following list variables (tap_list, term_list and cask_list) 
    # Leave list blank or comment out the list if not required.
    tap_list="qlik-oss/taps"
    term_list="cask git wget mambaforge"
    cask_list="the-unarchiver visual-studio-code google-chrome \
    font-fira-code 1password typora alfred \
    hazel onedrive upic marginnote itau kindle whatsapp zoom \
    noun-project appcleaner"

    clear
    term_colors
    script_info
    check_continue
    install_homebrew
    brew_packages
    brew_cleanup
    term_message gb "\nScript completed."
}

main "${@}"
0. Reboot to Recovery Mode by holding `command-R` during restart

1. Open Utilities → Terminal and type
```
$ csrutil disable
$ reboot
```

4. After rebooting in normal mode, open Terminal, and type
```
$ cd "/etc"
$ echo "0.0.0.0 iprofiles.apple.com" >> hosts
$ echo "0.0.0.0 mdmenrollment.apple.com" >> hosts
$ echo "0.0.0.0 deviceenrollment.apple.com" >> hosts
$ echo "0.0.0.0 gdmf.apple.com" >> hosts
```

5. Reboot to Recovery Mode by holding `command-R` during restart and type
```
$ csrutil enable
$ reboot
```

4. After rebooting in normal mode, open Terminal and type the code below to verify verify the DEP status
```
$ profiles status -type enrollment
Enrolled via DEP: No
MDM enrollment: No
```
docker-compose down # Stop container on current dir if there is a docker-compose.yml
docker rm -fv $(docker ps -aq) # Remove all containers
sudo lsof -i -P -n | grep <port number> # List who's using the port
# sudo kill -9 <process id> (macOS)
# sudo kill <process id> (Linux)
split
split -v
focus down
split -v

screen -t bash /bin/bash
screen -t deploy1 /usr/bin/ssh deploy1
screen -t deploy2 /usr/bin/ssh deploy2
screen -t deploy3 /usr/bin/ssh deploy3
screen -t deploy4 /usr/bin/ssh deploy4

focus up
focus left
select 1
focus right
select 2
focus left
focus down
select 3
focus right
select 4
ls -R | grep ":$" | sed -e 's/:$//' -e 's/[^-][^\/]*\//--/g' -e 's/^/   /' -e 's/-/|/'

# Output will be
# |---folder
# |------file_1
# ...
$ uglifyjs file1.js file2.js ... --compress --mangle --output out.min.js
#sudo apt install poppler-utils

curl -s "<url of pdf file>" | pdftotext -layout - -

sudo adduser brsmt
sudo usermod -aG sudo brsmt
From server console:

$> nano /etc/pve/lxc/{machine id, ex:100}.conf

add: 

lxc.cgroup2.devices.allow: c 10:200 rwm
lxc.mount.entry: /dev/net dev/net none bind,create=dir

$> chown 100000:100000 /dev/net/tun
$> chmod 666 /dev/net/tun

$> ls -l /dev/net/tun

Restart machine
#Backup

gbak -b -v -user SYSDBA -password "masterkey" D:\database.FDB E:\database.fbk

#Restore

gbak -c -user SYSDBA -password masterkey E:\database.fbk E:\database_restore.fdb
#Copy the image

$ docker pull doctorkirk/oracle-19c

#Create local directory

$ mkdir -p /your/custom/path/oracle-19c/oradata
$ cd /your/custom/path/
$ sudo chown -R 54321:54321 oracle-19c/

#Run the Container

docker run --name oracle-19c \
  -p 1521:1521 \
  -e ORACLE_SID=[ORACLE_SID] \
  -e ORACLE_PWD=[ORACLE_PASSWORD] \
  -e ORACLE_CHARACTERSET=[CHARSET] \
  -v /your/custom/path/oracle-19c/oradata/:/opt/oracle/oradata \
doctorkirk/oracle-19c

#Charset: WE8MSWIN1252(*default), AL16UTF8, US7ASCI
#* If omitted in docker run , the default characterset for this build will be WE8MSWIN1252.
You can determine the version of the primary MDF file of a database by looking at the two bytes at offset 0x12064

SQL Server Version	    Internal DB Version     DB Compat Level	    Supported DB Compatibility Levels
SQL Server 2022             ?                           160	                       ?
SQL Server 2019 CTP 3.2 / RC 1 / RC 1.1 / RTM	
                            904	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 3.0 / 3.1	
                            902	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.3 / 2.4 / 2.5	
                            897	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.1 / 2.2	
                            896	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.0	    895	                        150	        150,140,130,120,110,100
SQL Server 2017	            868 / 869	                140	        140,130,120,110,100
SQL Server 2016	            852	                        130         130,120,110,100
SQL Server 2014	            782	                        120	        120,110,100
SQL Server 2012	            706	                        110	        110,100,90
SQL Server 2012 CTP1
(a.k.a. SQL Server 2011 Denali)	
                            684	                        110	        110,100,90
SQL Server 2008 R2	        660 / 661	                100	        100,90,80
SQL Server 2008	            655	                        100	        100,90,80
SQL Server 2005 SP2+
with VarDecimal enabled	    612	                        90	        90,80,70
SQL Server 2005	            611	                        90	        90,80,70
SQL Server 2000	            539	                        80	        80,70
SQL Server 7.0	            515	                        70	        70
SQL Server 6.5	            408                     	65	        65
SQL Server 6.0	            406	                        60	        60
# Enable:

xdg-screensaver activate

# disable 

export DISPLAY=:0.0; xdotool key 27
docker run -v /home/marco:/backup --rm svarcoe/mssql-scripter mssql-scripter -S 172.18.0.3 -d CMUCE -U sa -P CMuce1970@ --schema-and-data -f /backup/mssql-scripter-CMUCE.sql

# BACKUP: 
BACKUP DATABASE [YourDB] TO  DISK = N'C:\xxxxx or /var/opt/mssql/backup/YourDB.bak'
WITH NOFORMAT, NOINIT, NAME = N'YourDB-Full Database Backup',
SKIP, NOREWIND, NOUNLOAD, STATS = 10
GO

# RESTORE:
sqlcmd -S localhost -U SA

RESTORE DATABASE YourDB
FROM DISK = '/var/opt/mssql/backup/YourDB.bak'
WITH MOVE 'YourDB' TO '/var/opt/mssql/data/YourDB.mdf',
MOVE 'YourDB_Log' TO '/var/opt/mssql/data/YourDB_Log.ldf'
GO
curl 'http://router.project-osrm.org/table/v1/driving/13.388860,52.517037;13.397634,52.529407;13.428555,52.523219?annotations=distance,duration'

Response:

{
	"code": "Ok",
	"distances": [
		[0, 1887.3, 3802.9],
		[1903.1, 0, 2845.8],
		[3280.4, 2292.8, 0]
	],
	"durations": [
		[0, 251.5, 384.4],
		[258.1, 0, 363.5],
		[354.7, 301.1, 0]
	],
	"sources": [{
		"hint": "N85xha7OcYUYAAAABQAAAAAAAAAgAAAASjFaQdLNK0AAAAAAsPePQQwAAAADAAAAAAAAABAAAAA_6wAA_kvMAKlYIQM8TMwArVghAwAA7wrV7s3X",
		"distance": 4.231666,
		"location": [13.388798, 52.517033],
		"name": "Friedrichstraße"
	}, {
		"hint": "npYWgHzyeYUGAAAACgAAAAAAAAB2AAAAW7-PQOKcyEAAAAAApq6DQgYAAAAKAAAAAAAAAHYAAAA_6wAAf27MABiJIQOCbswA_4ghAwAAXwXV7s3X",
		"distance": 2.789393,
		"location": [13.397631, 52.529432],
		"name": "Torstraße"
	}, {
		"hint": "oZYWgP___38fAAAAUQAAACYAAAAeAAAAsowKQkpQX0Lx6yZCvsQGQh8AAABRAAAAJgAAAB4AAAA_6wAASufMAOdwIQNL58wA03AhAwMAvxDV7s3X",
		"distance": 2.226595,
		"location": [13.428554, 52.523239],
		"name": "Platz der Vereinten Nationen"
	}],
	"destinations": [{
		"hint": "N85xha7OcYUYAAAABQAAAAAAAAAgAAAASjFaQdLNK0AAAAAAsPePQQwAAAADAAAAAAAAABAAAAA_6wAA_kvMAKlYIQM8TMwArVghAwAA7wrV7s3X",
		"distance": 4.231666,
		"location": [13.388798, 52.517033],
		"name": "Friedrichstraße"
	}, {
		"hint": "npYWgHzyeYUGAAAACgAAAAAAAAB2AAAAW7-PQOKcyEAAAAAApq6DQgYAAAAKAAAAAAAAAHYAAAA_6wAAf27MABiJIQOCbswA_4ghAwAAXwXV7s3X",
		"distance": 2.789393,
		"location": [13.397631, 52.529432],
		"name": "Torstraße"
	}, {
		"hint": "oZYWgP___38fAAAAUQAAACYAAAAeAAAAsowKQkpQX0Lx6yZCvsQGQh8AAABRAAAAJgAAAB4AAAA_6wAASufMAOdwIQNL58wA03AhAwMAvxDV7s3X",
		"distance": 2.226595,
		"location": [13.428554, 52.523239],
		"name": "Platz der Vereinten Nationen"
	}]
}
#Increment timeout and max_children:

/etc/php/7.0/fpm/php.ini  =>   default_socket_timeout = 60000
/etc/php/7.0/fpm/php.ini  =>   pm.max_children = 20
/etc/php/7.0/fpm/pool.d/www.conf  =>   request_terminate_timeout = 60000

#Increment timeout on /etc/nginx/nginx.conf:
keepalive_timeout 65000;

#After Restart php-fpm and nginx:

sudo service php7.0-fpm restart
sudo service nginx restart
export ORACLE_SID=$1
export NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P9
export USUARIO=system/org24h
export PATHBACKUP=/respaldo/o24/export
export FILENAME=CMLGDB`date +%d%m%Y%H%M`.DMP
export FILENAMELOG=CMLGDB`date +%d%m%Y%H%M`.log
echo  $PATHBACKUP

rm $PATHBACKUP/*.* -rf

if [ -a $PATHBACKUP ] ; then
	expdp $USUARIO FULL=yes DUMPFILE=dpump_dir1:$FILENAME LOGFILE=dpump_dir1:$FILENAMELOG
	#exp $USUARIO file=$PATHBACKUP/$FILENAME full=yes compress=yes indexes=no consistent=yes log=$PATHBACKUP/$FILENAMELOG
else
	echo "ERROR: Export no encontro el directorio de Respaldo"
	exit 1
fi
docker run -d --restart=always \
        --name oracle \
        --privileged  \
        -e ORACLE_SID=<custom sid> \
        -v /srv/oradata:/u01/app/oracle \
        -p 8080:8080 -p 1521:1521 \
 absolutapps/oracle-12c-ee
#use oracle user from system:

sqlplus "/ as sysdba"

SQL> ALTER USER SYS IDENTIFIED BY [password]; 
SQL> ALTER USER SYSTEM IDENTIFIED BY [password];
docker run -e 'ACCEPT_EULA=Y' \
    -e 'MSSQL_SA_PASSWORD=<YourStrong!Passw0rd>' \
    -p 1433:1433 -v <host directory>/data:/var/opt/mssql/data \
    -v <host directory>/log:/var/opt/mssql/log \
    -v <host directory>/secrets:/var/opt/mssql/secrets \
    -d mcr.microsoft.com/mssql/server:2019-latest
sudo mkdir -p /your/custom/path/oracle-19c/oradata/
sudo chmod -R 777 /your/custom/path/

docker run -d --name oracle19db \
  -p 1521:1521 \
  -e ORACLE_SID=ORCL \
  -e ORACLE_PDB=ORCLDB \
  -e ORACLE_PWD=Oracle123 \
  -e ORACLE_CHARSET=AL32UTF8 \
  -v /your/custom/path/oracle-19c/oradata:/opt/oracle/oradata \
  banglamon/oracle193db:19.3.0-ee

# Charset Value: WE8MSWIN1252, AL16UTF8

# ALTER SESSION SET NLS_DATE_FORMAT = 'RRRR-MM-DD';
# ALTER SESSION SET NLS_TIME_FORMAT = 'HH24:MI:SS';
# ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'RRRR-MM-DD HH24:MI:SS';
# ALTER SESSION SET NLS_TIME_TZ_FORMAT = 'HH24:MI:SS TZR';
# ALTER SESSION SET NLS_TIMESTAMP_TZ_FORMAT = 'RRRR-MM-DD HH24:MI:SS TZR';

# docker exec -it oracle19db bash -c "source /home/oracle/.bashrc; sqlplus /nolog”
# connect sys as sysdba;

# alter session set "_ORACLE_SCRIPT"=true;
# create user sistemas identified by las36horas;
# GRANT CONNECT, RESOURCE, DBA TO sistemas;
# GRANT UNLIMITED TABLESPACE TO sistemas;
$> docker pull haskell
$> docker run -it haskell stack <parameters>


$> git clone https://github.com/jean-lopes/dfm-to-json.git

$> cd dfm-to-json

$> stack setup
$> stack install
$> dfm-to-json --version
# go to path where .git is

# new branch:

$> git checkout -b "<name_of_new_branch>"

# change branch:

$> git checkout "<name_of_branch>"



$> git add <folder1> ... <foldern>

$> git commit -m "<commentn>"
    
#example: <branch> = main:

$> git push origin <branch>

#---------------------------------------------------------

# download last changes from branch:

$> git pull origin <branch>
#http://cdrtools.sourceforge.net/private/cdrecord.html

#create iso file:

$> mkisofs -J -r -o output.iso dir_with_files/
#backup

gbak -t -v -user <username> -password "<password>" <host>:/path/to/db.fdb path/to/file.gbk

#restore

gbak -c -v -user <username> -password "<password>" path/to/file.gbk <host>:/path/to/db.fdb
:> docker run -it --name fb --rm -v ~/tmp:/tmp almeida/firebird gbak -b -v 192.168.1.251:c:/host/path/database.fdb /tmp/backup.bak -user sysdba -pass XXXXX
gsec -user sysdba -pass masterkey -add billyboy -pw sekrit66 -admin yes
#--> first identify your USB disk:

fdisk list

# --> Example OUTPUT
: '
/dev/disk0 (internal, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      GUID_partition_scheme                        *1.0 TB     disk0
   1:                        EFI EFI                     209.7 MB   disk0s1
   2:                 Apple_APFS Container disk1         1.0 TB     disk0s2

/dev/disk1 (synthesized):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      APFS Container Scheme -                      +1.0 TB     disk1
                                 Physical Store disk0s2
   1:                APFS Volume Macintosh HD - Datos    907.8 GB   disk1s1
   2:                APFS Volume Preboot                 81.5 MB    disk1s2
   3:                APFS Volume Recovery                526.6 MB   disk1s3
   4:                APFS Volume VM                      2.1 GB     disk1s4
   5:                APFS Volume Macintosh HD            11.0 GB    disk1s5

/dev/disk2 (external, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:     Apple_partition_scheme                        *248.7 GB   disk2
   1:        Apple_partition_map                         4.1 KB     disk2s1
   2:                  Apple_HFS                         4.1 MB     disk2s2
'

#--> in this example USB stick is disk2 (external, physical):

# --> let's blank complete pendrive:

sudo dd if=/dev/zero of=/dev/disk2 count=1 bs=4096

# --> let's check again:

diskutil list

: '
/dev/disk0 (internal, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      GUID_partition_scheme                        *1.0 TB     disk0
   1:                        EFI EFI                     209.7 MB   disk0s1
   2:                 Apple_APFS Container disk1         1.0 TB     disk0s2

/dev/disk1 (synthesized):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      APFS Container Scheme -                      +1.0 TB     disk1
                                 Physical Store disk0s2
   1:                APFS Volume Macintosh HD - Datos    907.8 GB   disk1s1
   2:                APFS Volume Preboot                 81.5 MB    disk1s2
   3:                APFS Volume Recovery                526.6 MB   disk1s3
   4:                APFS Volume VM                      2.1 GB     disk1s4
   5:                APFS Volume Macintosh HD            11.0 GB    disk1s5

/dev/disk2 (external, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:                                                   *248.7 GB   disk2
'

# Then you can run disk utility to initialize / format pendrive.
CMD = $dicom:rs --url "http://ip:8080/dcm4chee-arc/aets/DCM4CHEE/rs" -r "&studyUID=uid1" -r "&studyUID=uid2" --query-ext "&includedefaults=false" --accept-ext="transfer-syntax=1.2.840.10008.1.2.4.70"

weasis://url_encode(CMD)

#js: var link = "weasis://" + encodeURIComponent(CMD)
#!/bin/bash
printf "%-10s%-15s%-15s%s\n" "PID" "MEMORY" "OWNER" "COMMAND"

function sysmon_main() {
        RAWIN=$(ps -o pid,user,%mem,command ax | grep -v PID | awk '/[0-9]*/{print $1 ":" $2 ":" $4}') 
        for i in $RAWIN
        do
                PID=$(echo $i | cut -d: -f1)
                OWNER=$(echo $i | cut -d: -f2)
                COMMAND=$(echo $i | cut -d: -f3)
                MEMORY=$(pmap $PID | tail -n 1 | awk '/[0-9]K/{print $2}')

                printf "%-10s%-15s%-15s%s\n" "$PID" "$OWNER" "$MEMORY" "$COMMAND"
        done
}

sysmon_main | sort -bnr -k3 | head -20
docker ps -q | xargs -n 1 docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} {{ .Name }}' | sed 's/ \// /'
sudo ncat --sh-exec "ncat <dest.ip> <dest.port>" -l <local port> --keep-open

#ex:

sudo ncat --sh-exec "ncat 192.168.56.116 8084" -l 8084 --keep-open

#then test: http://localhost:8084
#!/bin/bash

#--- xvfb
sudo apt install -y xvfb

#-- add this into /etc/rc.local:

    #!/bin/sh -e
    Xvfb -ac :99 -screen 0 1024x768x16 &
    exit 0

#-- save & first run:
Xvfb -ac :99 -screen 0 1024x768x16 &

#--- wine
sudo dpkg --add-architecture i386

wget -O- -q https://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_18.04/Release.key | sudo apt-key add -
echo "deb http://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_18.04 ./" | sudo tee /etc/apt/sources.list.d/wine-obs.list

sudo apt update
sudo apt install --install-recommends winehq-stable winetricks

wine --version
wine --help

wineboot -u

winetricks allfonts

#-- install my app at /opt
sudo mkdir -p /opt/report/cache
sudo chmod -R 777 /opt/report
cp ReportService5.exe /opt/report
cd /opt/report

#-- and test it:
DISPLAY=:99 wine ReportService5.exe </dev/null &>/dev/null &

#-- create systemd service:

sudo nano /lib/systemd/system/report-service.service

[Unit]
Description=Reporting service

[Service]
Environment="DISPLAY=:99"
WorkingDirectory=/opt/report
ExecStart=/usr/bin/wine "/opt/report/ReportService5.exe" </dev/null &>/dev/null &
ExecStop=/opt/report/stop.sh
User=autana

[Install]
WantedBy=graphical.target

#-- save.

#-- create stop.sh

nano /opt/report/stop.sh

#!/bin/bash
kill $(pgrep ReportService5.exe)
kill -9 $(pgrep winedevice.exe)

#-- save.

sudo chmod +x /opt/report/stop.sh

#-- start service:
sudo systemctl enable report-service
sudo systemctl start report-service

DISPLAY=:99 import -window root -quality 90 /tmp/screenshot.jpg

# edit /etc/environment
# 
# add:
#
# LANG=es_DO.utf-8
# LC_ALL=es_DO.utf-8

# Then: logout ... login, then run this command:

$ sudo dpkg-reconfigure locales
wget -U "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" -qO - "https://example.com"

# Example
# wget -U "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" -qO - "https://www.infodolar.com.do/precio-dolar-entidad-banco-popular.aspx" | grep colCompraVenta | grep -Eo "([0-9.]+)" | head -1
$> pdftk file1.pdf file2.pdf file3.pdf cat output outputfile.pdf
ls -lct /etc | tail -1 | awk '{print $6, $7, $8}'
#!/bin/bash
#exit

#detect if port 11111 is open, if not do action:

netstat -ln | grep ":11111 " 2>&1 > /dev/null

if [ $? -eq 1 ]; then
    echo "Port is closed. Doing action..."
fi
#add this lines to /etc/mosquitto/mosquitto.conf

listener 1883
protocol mqtt

listener 9001
protocol websockets

#then restart service:  $> sudo service mosquitto restart
# Backup:
docker exec -t -u postgres your-db-container pg_dumpall -c > dump_`date +%d-%m-%Y"_"%H_%M_%S`.sql

# Restore:
cat your_dump.sql | docker exec -i your-db-container psql -U postgres
# =============== first let''s create user/password:
# 1: user

$> sudo sh -c "echo -n 'sammy:' >> /etc/nginx/.htpasswd"

# 2: passsord

$> sudo sh -c "openssl passwd -apr1 >> /etc/nginx/.htpasswd"

# You can repeat this process for additional usernames. You can see how the usernames and encrypted passwords are stored within the file by typing:

# let's see what we did:

$> cat /etc/nginx/.htpasswd

# Output (something like)
# sammy:$apr1$wI1/T0nB$jEKuTJHkTOOWkopnXqC1d1

# then, we need to add configuration:
# at /etc/nginx/sites-available/default (or whatever your configuration is):

server {
    listen 80 default_server;
    listen [::]:80 default_server ipv6only=on;

    root /usr/share/nginx/html;
    index index.html index.htm;

    server_name localhost;
    
    location /myrestrictedfolder {                  #<--- new here
        rewrite ^(.*[^/])$ $1/ permanent;           #<--- new here
        auth_basic "Restricted Content";            #<--- new here
        auth_basic_user_file /etc/nginx/.htpasswd;  #<--- new here
    }                                               #<--- new here

    location / {
        try_files $uri $uri/ =404;
    }
}

# then restart nginx daemon:

$> sudo service nginx restart


#you will be asked for basic user/password when entering: http://localhost/myrestrictedfolder/
$ sudo nano /etc/fstab

#add line:

//<remote host>/<share>  <mount>  cifs  username=<user>,password=<password>,uid=nobody,noperm,file_mode=0777,dir_mode=0777  0  0  

#Ex:
//200.200.0.124/images_autana  /mnt/nas  cifs  username=autana,password=*****,uid=nobody,noperm,file_mode=0777,dir_mode=0777  0  0  
$ find [folder] -type f -exec gdcmscu -L [log] -D --store --call [Target AET] [HOST] [PORT] {} \; &

#Ej:

$ find /mnt/images/dicom/ -type f -exec gdcmscu -L /tmp/output.log -D --store --call AUTANA localhost 11112 {} \; & 
#!/bin/sh

#----------------------------------------------------------------
# Para que funcione colocar en /usr/local/bin:
#
#     $> sudo cp verifica_nr /usr/local/bin
#
# Dar permiso de ejecución:
#
#     $> sudo chmod +x /usr/local/bin/verifica_nr
#
# Luego se agrega en crontab (cada minuto):
#
#     $> sudo crontab -e
#     (ir al final y agregar:)
#     * * * * * /usr/local/bin/verifica_nr
#     (Guardar)
#----------------------------------------------------------------
SERVICE="nrservice"
if ps ax | grep -v grep | grep -v $0 | grep $SERVICE > /dev/null
then
    echo "$SERVICE service running, everything is fine" > /dev/null
else
    sudo service nrservice.sh restart
fi
Over the last few days we've had a couple of issues with Imagick and processing PDFs on our servers. As it turns out, these issues are caused by automatic security updates. Let's look into the issue and its solution.

In Bugsnag, our error reporting service, the following exceptions have been popping up a lot:

not authorized `/path/to/some-pdf.pdf` @ error/constitute.c/ReadImage/412

convert: not authorized `/path/to/some-pdf.pdf` @ error/constitute.c/WriteImage/1028

not authorized `/path/to/some-image.png` @ error/convert.c/ConvertImageCommand/3015

unable to create temporary file `/some/path` Permission denied @ error/pdf.c/ReadPDFImage/465
Upon further investigation it looks like most of our sites and applications dealing with PDFs were actually experiencing issues. The weird thing is, some of these applications are quite old and haven't been updated or even touched for months, whilst others are recent and running the latest versions of packages and OS.

I don't care about your problems, just give me the fix!
A recent ImageMagick security update adds some extra policies regarding PDFs (or more specifcally: Ghostscript). We can actually see the diff for this update right here. Luckily, we can edit the policy.xml file ourselves and loosen up security for working with PDFs.

In /etc/ImageMagick-6/policy.xml (or /etc/ImageMagick/policy.xml) find the following line

<policy domain="coder" rights="none" pattern="PDF" />
and change it to allow reading and writing by the PDF coder in ImageMagick:

<policy domain="coder" rights="read|write" pattern="PDF" />
Finally, don't forget to restart your PHP-FPM and optionally queue workers:

sudo service php7.2-fpm restart
If you're experiencing issues with other file types or manipulations, you might need to change some of the other policies as well. The policy.xml file contains some good documentation in the comments. You can read more about the security policy file on ImageMagick's website.
/////run once:

/Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Resources/javawslauncher.app
#!/usr/bin/env python

'''Converts sequence of images to compact PDF while removing speckles,
bleedthrough, etc.

'''

# for some reason pylint complains about members being undefined :(
# pylint: disable=E1101

from __future__ import print_function

import sys
import os
import re
import subprocess
import shlex

from argparse import ArgumentParser

import numpy as np
from PIL import Image
from scipy.cluster.vq import kmeans, vq

######################################################################

def quantize(image, bits_per_channel=None):

    '''Reduces the number of bits per channel in the given image.'''

    if bits_per_channel is None:
        bits_per_channel = 6

    assert image.dtype == np.uint8

    shift = 8-bits_per_channel
    halfbin = (1 << shift) >> 1

    return ((image.astype(int) >> shift) << shift) + halfbin

######################################################################

def pack_rgb(rgb):

    '''Packs a 24-bit RGB triples into a single integer,
works on both arrays and tuples.'''

    orig_shape = None

    if isinstance(rgb, np.ndarray):
        assert rgb.shape[-1] == 3
        orig_shape = rgb.shape[:-1]
    else:
        assert len(rgb) == 3
        rgb = np.array(rgb)

    rgb = rgb.astype(int).reshape((-1, 3))

    packed = (rgb[:, 0] << 16 |
              rgb[:, 1] << 8 |
              rgb[:, 2])

    if orig_shape is None:
        return packed
    else:
        return packed.reshape(orig_shape)

######################################################################

def unpack_rgb(packed):

    '''Unpacks a single integer or array of integers into one or more
24-bit RGB values.

    '''

    orig_shape = None

    if isinstance(packed, np.ndarray):
        assert packed.dtype == int
        orig_shape = packed.shape
        packed = packed.reshape((-1, 1))

    rgb = ((packed >> 16) & 0xff,
           (packed >> 8) & 0xff,
           (packed) & 0xff)

    if orig_shape is None:
        return rgb
    else:
        return np.hstack(rgb).reshape(orig_shape + (3,))

######################################################################

def get_bg_color(image, bits_per_channel=None):

    '''Obtains the background color from an image or array of RGB colors
by grouping similar colors into bins and finding the most frequent
one.

    '''

    assert image.shape[-1] == 3

    quantized = quantize(image, bits_per_channel).astype(int)
    packed = pack_rgb(quantized)

    unique, counts = np.unique(packed, return_counts=True)

    packed_mode = unique[counts.argmax()]

    return unpack_rgb(packed_mode)

######################################################################

def rgb_to_sv(rgb):

    '''Convert an RGB image or array of RGB colors to saturation and
value, returning each one as a separate 32-bit floating point array or
value.

    '''

    if not isinstance(rgb, np.ndarray):
        rgb = np.array(rgb)

    axis = len(rgb.shape)-1
    cmax = rgb.max(axis=axis).astype(np.float32)
    cmin = rgb.min(axis=axis).astype(np.float32)
    delta = cmax - cmin

    saturation = delta.astype(np.float32) / cmax.astype(np.float32)
    saturation = np.where(cmax == 0, 0, saturation)

    value = cmax/255.0

    return saturation, value

######################################################################

def postprocess(output_filename, options):

    '''Runs the postprocessing command on the file provided.'''

    assert options.postprocess_cmd

    base, _ = os.path.splitext(output_filename)
    post_filename = base + options.postprocess_ext

    cmd = options.postprocess_cmd
    cmd = cmd.replace('%i', output_filename)
    cmd = cmd.replace('%o', post_filename)
    cmd = cmd.replace('%e', options.postprocess_ext)

    subprocess_args = shlex.split(cmd)

    if os.path.exists(post_filename):
        os.unlink(post_filename)

    if not options.quiet:
        print('  running "{}"...'.format(cmd), end=' ')
        sys.stdout.flush()

    try:
        result = subprocess.call(subprocess_args)
        before = os.stat(output_filename).st_size
        after = os.stat(post_filename).st_size
    except OSError:
        result = -1

    if result == 0:

        if not options.quiet:
            print('{:.1f}% reduction'.format(
                100*(1.0-float(after)/before)))

        return post_filename

    else:

        sys.stderr.write('warning: postprocessing failed!\n')
        return None

######################################################################

def percent(string):
    '''Convert a string (i.e. 85) to a fraction (i.e. .85).'''
    return float(string)/100.0

######################################################################

def get_argument_parser():

    '''Parse the command-line arguments for this program.'''

    parser = ArgumentParser(
        description='convert scanned, hand-written notes to PDF')

    show_default = ' (default %(default)s)'

    parser.add_argument('filenames', metavar='IMAGE', nargs='+',
                        help='files to convert')

    parser.add_argument('-q', dest='quiet', action='store_true',
                        default=False,
                        help='reduce program output')

    parser.add_argument('-b', dest='basename', metavar='BASENAME',
                        default='page',
                        help='output PNG filename base' + show_default)

    parser.add_argument('-o', dest='pdfname', metavar='PDF',
                        default='output.pdf',
                        help='output PDF filename' + show_default)

    parser.add_argument('-v', dest='value_threshold', metavar='PERCENT',
                        type=percent, default='25',
                        help='background value threshold %%'+show_default)

    parser.add_argument('-s', dest='sat_threshold', metavar='PERCENT',
                        type=percent, default='20',
                        help='background saturation '
                        'threshold %%'+show_default)

    parser.add_argument('-n', dest='num_colors', type=int,
                        default='8',
                        help='number of output colors '+show_default)

    parser.add_argument('-p', dest='sample_fraction',
                        metavar='PERCENT',
                        type=percent, default='5',
                        help='%% of pixels to sample' + show_default)

    parser.add_argument('-w', dest='white_bg', action='store_true',
                        default=False, help='make background white')

    parser.add_argument('-g', dest='global_palette',
                        action='store_true', default=False,
                        help='use one global palette for all pages')

    parser.add_argument('-S', dest='saturate', action='store_false',
                        default=True, help='do not saturate colors')

    parser.add_argument('-K', dest='sort_numerically',
                        action='store_false', default=True,
                        help='keep filenames ordered as specified; '
                        'use if you *really* want IMG_10.png to '
                        'precede IMG_2.png')

    parser.add_argument('-P', dest='postprocess_cmd', default=None,
                        help='set postprocessing command (see -O, -C, -Q)')

    parser.add_argument('-e', dest='postprocess_ext',
                        default='_post.png',
                        help='filename suffix/extension for '
                        'postprocessing command')

    parser.add_argument('-O', dest='postprocess_cmd',
                        action='store_const',
                        const='optipng -silent %i -out %o',
                        help='same as -P "%(const)s"')

    parser.add_argument('-C', dest='postprocess_cmd',
                        action='store_const',
                        const='pngcrush -q %i %o',
                        help='same as -P "%(const)s"')

    parser.add_argument('-Q', dest='postprocess_cmd',
                        action='store_const',
                        const='pngquant --ext %e %i',
                        help='same as -P "%(const)s"')

    parser.add_argument('-c', dest='pdf_cmd', metavar="COMMAND",
                        default='convert %i %o',
                        help='PDF command (default "%(default)s")')

    return parser

######################################################################

def get_filenames(options):

    '''Get the filenames from the command line, optionally sorted by
number, so that IMG_10.png is re-arranged to come after IMG_9.png.
This is a nice feature because some scanner programs (like Image
Capture on Mac OS X) automatically number files without leading zeros,
and this way you can supply files using a wildcard and still have the
pages ordered correctly.

    '''

    if not options.sort_numerically:
        return options.filenames

    filenames = []

    for filename in options.filenames:
        basename = os.path.basename(filename)
        root, _ = os.path.splitext(basename)
        matches = re.findall(r'[0-9]+', root)
        if matches:
            num = int(matches[-1])
        else:
            num = -1
        filenames.append((num, filename))

    return [fn for (_, fn) in sorted(filenames)]

######################################################################

def load(input_filename):

    '''Load an image with Pillow and convert it to numpy array. Also
returns the image DPI in x and y as a tuple.'''

    try:
        pil_img = Image.open(input_filename)
    except IOError:
        sys.stderr.write('warning: error opening {}\n'.format(
            input_filename))
        return None, None

    if pil_img.mode != 'RGB':
        pil_img = pil_img.convert('RGB')

    if 'dpi' in pil_img.info:
        dpi = pil_img.info['dpi']
    else:
        dpi = (300, 300)

    img = np.array(pil_img)

    return img, dpi

######################################################################

def sample_pixels(img, options):

    '''Pick a fixed percentage of pixels in the image, returned in random
order.'''

    pixels = img.reshape((-1, 3))
    num_pixels = pixels.shape[0]
    num_samples = int(num_pixels*options.sample_fraction)

    idx = np.arange(num_pixels)
    np.random.shuffle(idx)

    return pixels[idx[:num_samples]]

######################################################################

def get_fg_mask(bg_color, samples, options):

    '''Determine whether each pixel in a set of samples is foreground by
comparing it to the background color. A pixel is classified as a
foreground pixel if either its value or saturation differs from the
background by a threshold.'''

    s_bg, v_bg = rgb_to_sv(bg_color)
    s_samples, v_samples = rgb_to_sv(samples)

    s_diff = np.abs(s_bg - s_samples)
    v_diff = np.abs(v_bg - v_samples)

    return ((v_diff >= options.value_threshold) |
            (s_diff >= options.sat_threshold))

######################################################################

def get_palette(samples, options, return_mask=False, kmeans_iter=40):

    '''Extract the palette for the set of sampled RGB values. The first
palette entry is always the background color; the rest are determined
from foreground pixels by running K-means clustering. Returns the
palette, as well as a mask corresponding to the foreground pixels.

    '''

    if not options.quiet:
        print('  getting palette...')

    bg_color = get_bg_color(samples, 6)

    fg_mask = get_fg_mask(bg_color, samples, options)

    centers, _ = kmeans(samples[fg_mask].astype(np.float32),
                        options.num_colors-1,
                        iter=kmeans_iter)

    palette = np.vstack((bg_color, centers)).astype(np.uint8)

    if not return_mask:
        return palette
    else:
        return palette, fg_mask

######################################################################

def apply_palette(img, palette, options):

    '''Apply the pallete to the given image. The first step is to set all
background pixels to the background color; then, nearest-neighbor
matching is used to map each foreground color to the closest one in
the palette.

    '''

    if not options.quiet:
        print('  applying palette...')

    bg_color = palette[0]

    fg_mask = get_fg_mask(bg_color, img, options)

    orig_shape = img.shape

    pixels = img.reshape((-1, 3))
    fg_mask = fg_mask.flatten()

    num_pixels = pixels.shape[0]

    labels = np.zeros(num_pixels, dtype=np.uint8)

    labels[fg_mask], _ = vq(pixels[fg_mask], palette)

    return labels.reshape(orig_shape[:-1])

######################################################################

def save(output_filename, labels, palette, dpi, options):

    '''Save the label/palette pair out as an indexed PNG image.  This
optionally saturates the pallete by mapping the smallest color
component to zero and the largest one to 255, and also optionally sets
the background color to pure white.

    '''

    if not options.quiet:
        print('  saving {}...'.format(output_filename))

    if options.saturate:
        palette = palette.astype(np.float32)
        pmin = palette.min()
        pmax = palette.max()
        palette = 255 * (palette - pmin)/(pmax-pmin)
        palette = palette.astype(np.uint8)

    if options.white_bg:
        palette = palette.copy()
        palette[0] = (255, 255, 255)

    output_img = Image.fromarray(labels, 'P')
    output_img.putpalette(palette.flatten())
    output_img.save(output_filename, dpi=dpi)

######################################################################

def get_global_palette(filenames, options):

    '''Fetch the global palette for a series of input files by merging
their samples together into one large array.

    '''

    input_filenames = []

    all_samples = []

    if not options.quiet:
        print('building global palette...')

    for input_filename in filenames:

        img, _ = load(input_filename)
        if img is None:
            continue

        if not options.quiet:
            print('  processing {}...'.format(input_filename))

        samples = sample_pixels(img, options)
        input_filenames.append(input_filename)
        all_samples.append(samples)

    num_inputs = len(input_filenames)

    all_samples = [s[:int(round(float(s.shape[0])/num_inputs))]
                   for s in all_samples]

    all_samples = np.vstack(tuple(all_samples))

    global_palette = get_palette(all_samples, options)

    if not options.quiet:
        print('  done\n')

    return input_filenames, global_palette

######################################################################

def emit_pdf(outputs, options):

    '''Runs the PDF conversion command to generate the PDF.'''

    cmd = options.pdf_cmd
    cmd = cmd.replace('%o', options.pdfname)
    if len(outputs) > 2:
        cmd_print = cmd.replace('%i', ' '.join(outputs[:2] + ['...']))
    else:
        cmd_print = cmd.replace('%i', ' '.join(outputs))
    cmd = cmd.replace('%i', ' '.join(outputs))

    if not options.quiet:
        print('running PDF command "{}"...'.format(cmd_print))

    try:
        result = subprocess.call(shlex.split(cmd))
    except OSError:
        result = -1

    if result == 0:
        if not options.quiet:
            print('  wrote', options.pdfname)
    else:
        sys.stderr.write('warning: PDF command failed\n')

######################################################################

def notescan_main(options):

    '''Main function for this program when run as script.'''

    filenames = get_filenames(options)

    outputs = []

    do_global = options.global_palette and len(filenames) > 1

    if do_global:
        filenames, palette = get_global_palette(filenames, options)

    do_postprocess = bool(options.postprocess_cmd)

    for input_filename in filenames:

        img, dpi = load(input_filename)
        if img is None:
            continue

        output_filename = '{}{:04d}.png'.format(
            options.basename, len(outputs))

        if not options.quiet:
            print('opened', input_filename)

        if not do_global:
            samples = sample_pixels(img, options)
            palette = get_palette(samples, options)

        labels = apply_palette(img, palette, options)

        save(output_filename, labels, palette, dpi, options)

        if do_postprocess:
            post_filename = postprocess(output_filename, options)
            if post_filename:
                output_filename = post_filename
            else:
                do_postprocess = False

        outputs.append(output_filename)

        if not options.quiet:
            print('  done\n')

    emit_pdf(outputs, options)

######################################################################

def main():
    '''Parse args and call notescan_main().'''
    notescan_main(options=get_argument_parser().parse_args())

if __name__ == '__main__':
    main()
: 'http://www.modbusdriver.com/modpoll.html:

Usage: modpoll [options] serialport|host
    Arguments:
    serialport    Serial port when using Modbus ASCII or Modbus RTU protocol
                  COM1, COM2 ...                on Windows
                  /dev/ttyS0, /dev/ttyS1 ...    on Linux
                  /dev/ser1, /dev/ser2 ...      on QNX
    host          Host name or dotted ip address when using MODBUS/TCP protocol
    General options:
    -m ascii      Modbus ASCII protocol
    -m rtu        Modbus RTU protocol (default)
    -m tcp        MODBUS/TCP protocol
    -m enc        Encapsulated Modbus RTU over TCP
    -a #          Slave address (1-255, 1 is default)
    -r #          Start reference (1-65536, 100 is default)
    -c #          Number of values to poll (1-100, 1 is default)
    -t 0          Discrete output (coil) data type
    -t 1          Discrete input data type
    -t 3          16-bit input register data type
    -t 3:hex      16-bit input register data type with hex display
    -t 3:int      32-bit integer data type in input register table
    -t 3:mod      32-bit module 10000 data type in input register table
    -t 3:float    32-bit float data type in input register table
    -t 4          16-bit output (holding) register data type (default)
    -t 4:hex      16-bit output (holding) register data type with hex display
    -t 4:int      32-bit integer data type in output (holding) register table
    -t 4:mod      32-bit module 10000 type in output (holding) register table
    -t 4:float    32-bit float data type in output (holding) register table
    -i            Slave operates on big-endian 32-bit integers
    -f            Slave operates on big-endian 32-bit floats
    -1            Poll only once, otherwise poll every second
    -e            Use Daniel/Enron single register 32-bit mode
    -0            First reference is 0 (PDU addressing) instead 1
    Options for MODBUS/TCP:
    -p #          TCP port number (502 is default)
    Options for Modbus ASCII and Modbus RTU:
    -b #          Baudrate (e.g. 9600, 19200, ...) (9600 is default)
    -d #          Databits (7 or 8 for ASCII protocol, 8 for RTU)
    -s #          Stopbits (1 or 2, 1 is default)
    -p none       No parity
    -p even       Even parity (default)
    -p odd        Odd parity
    -4 #          RS-485 mode, RTS on while transmitting and another # ms after
    -o #          Time-out in seconds (0.01 - 10.0, 1.0 s is default)
'

# Lectura de Holding Registers
# addess = 4001 (-a 1) count = 10 (-c = 10) port = 5502 (-p 5502)

    modpoll -m tcp -a 1 -c 10 -p 5502 192.168.56.1

# Escritura de Holding Registers
# addess = 4001 (-a 1) count = 3 (-c = 10) port = 5502 (-p 5502) ... value1 value2 value3

    modpoll -m tcp -a 1 -c 3 -p 5502 192.168.56.1 11 32 56

# To retrieve once 5 floating point values starting from reference 100 with Modbus/TCP from slave device with IP 10.0.0.100:
    
    modpoll -m tcp -t4:float -r 100 -c 5 -1 10.0.0.100
    
    
upstream newserver {
  server 172.16.0.1:80;  # this is new server, by IP address
}

server {
  listen 80;
  server_name subdomain.site.com;
  location / {
    proxy_set_header Host $host;
    proxy_pass http://newserver;
  }
}
#from bash command line
#first create folder to save python dependencies:

    > sudo mkdir /var/www/.local
    > sudo mkdir /var/www/.cache
    > sudo chown www-data.www-data /var/www/.local
    > sudo chown www-data.www-data /var/www/.cache

# then install dependencies (imports):

    > sudo -H -u www-data pip install <dep1>
    > sudo -H -u www-data pip install <dep2>
    :
    
# then set user permissions to run your script to www-data user:
# creating a file at /etc/sudoers.d/:

    > sudo nano /etc/sudoers.d/mysudoerfile
    
    www-data ALL=(ALL) NOPASSWD: /usr/bin/python <path of your script here>

# then set execute permissions to your script:

    sudo chmod +x <path of your script here>

# then run your script 
#!/bin/bash
# create_barcode.sh
# sudo apt-get install barcode imagemagick

CODE=$1 #the code ... first parameter
FNAME=$2  #the filename .png .... second parameter (without file extension)

# let's create postscript:
barcode -E -b "$CODE" | convert -density 600 ps:- png:- > $FNAME

# use:
#   
#   bash create_barcode.sh 123456789 output.png  #it autodetect's the preferable encoding
#
#   this creates "output.png"
#
# Security Error:
#
# if you get security error: convert not authorized (ps/png) do this:
#
# edit /etc/ImageMagick-6/policy.xml
#
# disable this:

  <!--policy domain="coder" rights="none" pattern="PS" />-->

# and append this:

  <policy domain="coder" rights="read/write" pattern="PNG,PS" />
# Create Channel
# Create new Bot and get Bot TOKEN (to replace TOKEN_OF_BOT)
# and edit:
# /etc/ssh/sshrc

ip=`echo $SSH_CONNECTION | cut -d " " -f 1`

logger -t ssh-wrapper $USER login from $ip

curl -s -X POST https://api.telegram.org/botTOKEN_OF_BOT/sendMessage \
     -d text="Hello world!" -d chat_id=@autanaChannel > /dev/null
$ sudo ip route add prohibit <ip address to block>/32

#Ex: sudo ip route add prohibit 58.15.238.31/32
$> cd <folder>

$> perl -e 'for(<1.*>){((stat)[9]<(unlink))}'

$> find ./ -name "1.*" -exec rm {} \;

$> for i in 1.*; do rm -rf $i; done
$> sudo apt-get install cifs-utils

$> sudo mkdir /mnt/shared

$> sudo mount -t cifs -o username=guest,password=,rw,iocharset=utf8,file_mode=0777,dir_mode=0777,noperm //<windows address>/the_folder /mnt/shared/
$ ifconfig -a | grep "inet\s" | awk -F'[: ]+' '{ print $4 }'

$ ip addr  | grep "inet\s" | awk -F'[: ]+' '{ print $3 }'
sudo vgdisplay #ver vg/lvm
sudo pvcreate /dev/sdX /dev....
sudo vgextend <name-vg> /dev/sdX
sudo lvextend -l +100%FREE /dev/<name-vg>/root
sudo resize2fs /dev/<name-vg>/root
# create mount folder:

mkdir /tmp/my10mbvirtualdisk

# create file system (filename=filesyst in current folder) (10Mb):

dd if=/dev/zero of=./filesyst bs=10485760 count=1
sudo losetup /dev/loop0 ./filesyst
sudo mkfs.ext3 /dev/loop0

sudo mount /dev/loop0 /tmp/my10mbvirtualdisk


# now you can use /tmp/my10mbvirtualdisk as disk



# destroy:

sudo umount /tmp/my10mbvirtualdisk
sudo losetup -d /dev/loop0
sudo rm ./filesyst
find /src/dir/ -mtime -<n days> -printf %P\\0|rsync --files-from=- --from0 /src/dir/ /dst/dir/
##src: https://www.digitalocean.com/community/tutorials/how-to-set-up-master-slave-replication-on-postgresql-on-an-ubuntu-12-04-vps#configure-the-master-server

############## Master:

psql -c "CREATE USER rep REPLICATION LOGIN CONNECTION LIMIT 1 ENCRYPTED PASSWORD 'yourpassword';"

#//at file /etc/postgresql/9.5/main/pg_hba.conf 

	host    replication     rep     IP_address_of_slave/32   md5

#//at file /etc/postgresql/9.5/main/postgresql.conf

	listen_addresses = 'localhost,IP_address_of_THIS_host'
	wal_level = 'hot_standby'
	archive_mode = on
	archive_command = 'cd .'
	max_wal_senders = 1
	hot_standby = on

service postgresql restart


############### Slave:

service postgresql stop

#//at file /etc/postgresql/9.5/main/pg_hba.conf 

	host    replication     rep     IP_address_of_master/32  md5

#//at file /etc/postgresql/9.5/main/postgresql.conf

	listen_addresses = 'localhost,IP_address_of_THIS_host'
	wal_level = 'hot_standby'
	archive_mode = on
	archive_command = 'cd .'
	max_wal_senders = 1
	hot_standby = on


################## Master:

psql -c "select pg_start_backup('initial_backup');"
rsync -cva --inplace --exclude=*pg_xlog* /var/lib/postgresql/9.5/main/ slave_IP_address:/var/lib/postgresql/9.5/main/
psql -c "select pg_stop_backup();"


################### Slave:

cd /var/lib/postgresql/9.5/main/recovery.conf

	standby_mode = 'on'
	primary_conninfo = 'host=master_IP_address port=5432 user=rep password=yourpassword'
	trigger_file = '/tmp/postgresql.trigger.5432' ##When we want to set SLAVE db to Master (because of original MASTER fail) creating this file is enough. With the existence of this file db will act like MASTER.

service postgresql start

## we check if no problem:

less /var/log/postgresql/postgresql-9.5-main.log
#convert VDI to RAW:
$ vboxmanage clonehd --format RAW ubuntu.vdi ubuntu.img

#mount RAW:
$ mount -t ext3 -o loop,rw ./ubuntu.img /mnt
# duration time of file:
#   ej: sox --i -D test.ogg

  sox --i -D <sound file>

# play sound to default output
#    Linux/OSX?: 

  sox <sound file> -d

#    Windows: 

  sox <sound file> -t waveaudio
  
# record sound from default input:
#    Linux/OSX?: 

  sox -t alsa <output file>
  
#    Windows:
  
  sox -t waveaudio -d <output file>
  
# play sound from starting time (secs) (trim):
#    Linux/OSX?:

  sox <sound file> -d trim <n secs>
  
#    Windows:

  sox <sound file> -t waveaudio trim <n secs>
  
# split big file into small files with equal time fraction:
#    %1n = autoincremental: 1,2,3...

  sox <input file> <output file>_%1n.ogg trim 0 <secs> : newfile : restart
  
# concatenate small files into one:

  sox <input file1> <input file2> ... <input filen> <output file>

# cut silences with tolerance:

  sox in.wav out.wav silence -l 1 0.1 1% -1 2.0 1%
diff -r dir1 dir2 | grep dir1 | awk '{print $4}' > difference1.txt; clear; cat difference1.txt
$ sudo nano /etc/environment

#
# (Append these lines at the end of file:)

http_proxy="http://myproxy.server.com:8080/"
https_proxy="http://myproxy.server.com:8080/"
ftp_proxy="http://myproxy.server.com:8080/"
no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com"
HTTP_PROXY="http://myproxy.server.com:8080/"
HTTPS_PROXY="http://myproxy.server.com:8080/"
FTP_PROXY="http://myproxy.server.com:8080/"
NO_PROXY="localhost,127.0.0.1,localaddress,.localdomain.com"

#
# (save and... )

$ source /etc/environment

# To unset proxies:

# sudo nano /etc/environment
#
# (Remove proxies lines (see above))
#
# (save and them...) 

unset http_proxy
unset https_proxy
unset ftp_proxy
unset no_proxy
unset HTTP_PROXY
unset HTTPS_PROXY
unset FTP_PROXY
unset NO_PROXY

# (that's all)


# ========== using proxies for apt (it does not obey proxy configuration):

# (we create a new file at /etc/apt/apt.conf.d/)
#

$ sudo nano /etc/apt/apt.conf.d/95proxies

# (now append this lines...)

Acquire::http::proxy "http://myproxy.server.com:8080/";
Acquire::ftp::proxy "ftp://myproxy.server.com:8080/";
Acquire::https::proxy "https://myproxy.server.com:8080/";

# (save and run "sudo apt update" for trying...)
#
/etc/php/7.0/fpm/pool.d/www.conf:

pm = dynamic
pm.max_children = 30 (original: 5)
pm.start_servers = 3 (original: 1)
pm.min_spare_servers = 2 (original: 1)
pm.max_spare_servers = 4 (original: 3)
pm.max_requests = 500 (originally commented)
pgrep -af <name of running process>

#who is running last created binary in /usr/bin (attack): (sudo apt-get install inotify-tools)
inotifywait -e create /usr/bin | echo $(awk '{print $3}') | xargs pgrep -af

#which process is calling this ID?
ls -l /proc/<ID>/exe
#just add this line at the end of /etc/ssh/sshd_config

AllowUsers <thelogin> 

#Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a  PDF file from 6 MB to approximately 1 MB:

$ gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -sOutputFile=output.pdf input.pdf

# You can use the following parameters for -dPDFSETTINGS instead of /screen:

# /screen – Lowest quality, lowest size (ugly)
# /ebook – Moderate quality
# /printer – Good quality
# /prepress – Best quality, highest size
#This will report the percentage of memory in use

% free | grep Mem | awk '{print $3/$2 * 100.0}'

#Ex:23.8171

#This will report the percentage of memory that's free

% free | grep Mem | awk '{print $4/$2 * 100.0}'

#Ex:76.5013

#You could create an alias for this command or put this into a tiny shell script. The specific output could be tailored to your needs using formatting commands for the print statement along these lines:

% free | grep Mem | awk '{ printf("free: %.4f %\n", $4/$2 * 100.0) }'
* Open CMD, then type regedit + Enter key.
* Look this route: 

HKEY_CURRENT_USER
 \Control Panel
  \Desktop
  
* And set value:
    
ForegroundLockTimeout DWORD 0x00000000 (0)
#
# first you must stablish iptables rule for keeping port 22 closed
# and ports to use as combination. I used 3030, 55050 and 7070 (is very important
# to use unsorted ports)
#
#  #-- rules to keep open combination ports:
#

sudo iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT

#
#  #-- rules to keep ssh port (22) closed:
#

sudo iptables -A INPUT -p tcp -m tcp --dport 22 -j DROP

#
#  #-- then we save iptables
#

sudo iptables-save

#
#  #-- if you want to know how to make this rules "persistent" search info on google about
#      iptables-persistent package or look at this url 
#
#      http://askubuntu.com/questions/119393/how-to-save-rules-of-the-iptables
#
#      it helped me.
#

# debian and derived distros... install knockd:

sudo apt-get install knockd

# we edit /etc/default/knockd: (knockd confif file)

sudo nano /etc/default/knockd

# and set:

    START_KNOCKD=0
    
# to

    START_KNOCKD=1
    
# let's create our ports sequence: let's say 3030,55050,7070 = open, and 7070,55050,3030 = close.
# for this we edit /etc/knockd.conf:

sudo nano /etc/knockd.conf:
    
[options]
  UseSyslog

[openSSH]
  sequence    = 3030,55050,7070
  seq_timeout = 1
# add our input access to iptables  
  command     = /sbin/iptables -I INPUT -s %IP% -p tcp --dport 22 -j DROP
  tcpflags    = syn

[closeSSH]
  sequence    = 7070,55050,3030
  seq_timeout = 1
# delete our input access to iptables
  command     = /sbin/iptables -D INPUT -s %IP% -p tcp --dport 22 -j DROP
  tcpflags    = syn
  
# we start service:

sudo /etc/init.d/knockd start

# That's all, we're done.
# .. and now... How can I open my host's ssh port (22) from remote location?
# ... just like this (using telnet):

# OPEN:
telnet 192.168.1.33 3030; telnet 192.168.1.33 55050; telnet 192.168.1.33 7070

# you'll this output at syslog (example with 192.168.1.33):

#  knockd: 192.168.1.33: openSSH: Stage 1
#  knockd: 192.168.1.33: openSSH: Stage 2
#  knockd: 192.168.1.33: openSSH: Stage 3
#  knockd: 192.168.1.33: openSSH: OPEN SESAME
#  knockd: openSSH: running command: /sbin/iptables -I INPUT -s 192.168.1.33...



# and then we CLOSE it:
telnet 192.168.1.33 7070; telnet 192.168.1.33 55050; telnet 192.168.1.33 3030

# you'll this output at syslog (example with 192.168.1.33):

#  knockd: 192.168.1.33: closeSSH: Stage 1
#  knockd: 192.168.1.33: closeSSH: Stage 2
#  knockd: 192.168.1.33: closeSSH: Stage 3
#  knockd: 192.168.1.33: closeSSH: OPEN SESAME
#  knockd: closeSSH: running command: /sbin/iptables -D INPUT -s 192.168.1.33...

#bypassing [n] files, (we must use [n+1]):

$ find <folder> -maxdepth 1 -type f -printf "%T@ %Tc %p\n" | grep -v '/\.' | sort -r | tail -n +60 | grep -Po "\./.*"

$ for f in "`find -maxdepth 1 -type f -print0 | xargs -r0 stat -c %y\ %n | grep -v '\.\/\.' | sort -r | grep -Po '\./.*' | tail -n +61`"; do 
    printf "$f\n"
$ done
#install unison

$ sudo apt install unison

# synchronizing from local folder "/home/user/sync/" with remote "ssh://user@remotehost.com/" folder "/home/user/sync/" (ssh port 22000)


$ unison -silent -auto -batch /home/user/sync/ ssh://user@remotehost.com//home/user/sync/ \
  -nodeletion ssh://user@remotehost.com//home/user/sync/ \
  -sshargs '-p22000' -logfile /tmp/mylog.txt
# NGINX: add <folder> in /etc/nginx/sites-available/default: 

server {
    :
    location /<folder>/ {
        proxy_pass http://<host>:<port>/;
        proxy_set_header X-Original-Host $http_host;
        proxy_set_header X-Original-Scheme $scheme;
        proxy_set_header X-Forwarded-For $remote_addr;
    }
    :
}
    
# APACHE2: add <folder> in /etc/apache2/sites-available/00-default.conf

<VirtualHost *:80>
    :
        ProxyPass /<folder> http://<host>:<port>/
        ProxyPassReverse /<folder> http://<host>:<port>/

        ProxyRequests Off
        ProxyPreserveHost On

        <proxy>
            Order deny,allow
            Allow from all
        </proxy>
    :
</VirtualHost>


#examples: folder "http://192.168.11.45/demo -> http://192.168.11.45:8080/"

server {
    :
    location /demo/ {
        proxy_pass http://localhost:8080/;
        proxy_set_header X-Original-Host $http_host;
        proxy_set_header X-Original-Scheme $scheme;
        proxy_set_header X-Forwarded-For $remote_addr;
    }
    :
}

<VirtualHost *:80>
    :
        ProxyPass /demo http://localhost:8080/
        ProxyPassReverse /demo http://localhost:8080/

        ProxyRequests Off
        ProxyPreserveHost On

        <proxy>
            Order deny,allow
            Allow from all
        </proxy>
    :
</VirtualHost>

/* other configuration for nginx:
    
server {
    listen        80;
    server_name   example.com *.example.com;
    location / {
        proxy_pass         http://127.0.0.1:5000;
        proxy_http_version 1.1;
        proxy_set_header   Upgrade $http_upgrade;
        proxy_set_header   Connection keep-alive;
        proxy_set_header   Host $host;
        proxy_cache_bypass $http_upgrade;
        proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header   X-Forwarded-Proto $scheme;
    }
}
*/
#changes in /etc/nginx/sites-available/default

server {
  server_name example.com;
  root /path/to/root;
  location / {
    # blah blah
  }
  location /demo {
    alias /path/to/root/production/folder/here;
  }
}
# Edit your /etc/postgresql/9.3/main/postgresql.conf, and change the lines as follows:

# Note: If you didn't find the postgresql.conf file, then just type 

$> locate postgresql.conf 

# in a terminal

1) change #log_directory = 'pg_log' to log_directory = 'pg_log'
2) change #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' to log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
3) change #log_statement = 'none' to log_statement = 'all'
4) change #logging_collector = off to logging_collector = on

# Optional: SELECT set_config('log_statement', 'all', true);

sudo /etc/init.d/postgresql restart or sudo service postgresql restart

#Fire query in postgresql: select 2+2

# Find current log in /var/lib/pgsql/9.2/data/pg_log/

#The log files tend to grow a lot over a time, and might kill your machine. For your safety, write a bash script that'll delete logs and restart postgresql server.
##sudo nano /etc/udev/rules.d/95-monitor-hotplug.rules

SUBSYSTEM=="drm", RUN+="/usr/local/bin/fix_tv_state.sh"

##---------------------



##sudo nano /usr/local/bin/fix_tv_state.sh

#!/bin/sh
#Fix TV state when HDMI link is lost.

export XAUTHORITY=/home/marco/.Xauthority

OUTPUT="HDMI1"
BAD_MODE="1280x720"
GOOD_MODE="1920x1080"

for MODE in $BAD_MODE $GOOD_MODE; do
 sleep 2
 DISPLAY=:0 xrandr --output $OUTPUT --mode $MODE
 sleep 2
done

##--------------------

sudo chmod +x /usr/local/bin/fix_tv_state.sh
sudo udevadm control --reload-rules

# warning: this is not script, it's a set of instructions.
#these steps create pptp vpn server so all clients can reach all others clients.

##################### SERVER SIDE (UBUNTU SERVER 16.04+) ######################

sudo apt-get install pptpd
sudo update-rc.d pptpd defaults

# I had to use this on 16.04... it fixes autostart problem:
sudo systemctl enable pptpd 

#edit file "/etc/pptpd.conf": example using nano: $> sudo nano /etc/pptpd.conf
#add the following lines:
    
    localip 10.20.0.1
    remoteip 10.20.1.100-200 #100 clients
#save it
        
#edit file "/etc/ppp/chap-secrets": example using nano: $> sudo nano /etc/ppp/chap-secrets
#add all clients with fixed ip addresses (change user1, user2... and password1, password2,.. according to your preference):

    user1 pptpd password1 10.20.1.100 
    user2 pptpd password2 10.20.1.101
    user3 pptpd password3 10.20.1.200
    :
#save it

#edit/add this line at "/etc/systl.conf":
    net.ipv4.ip_forward = 1
#save change:
sudo sysctl -p

#Configure iptables for forwarding (let clients see all each other):

iptables --table nat --append POSTROUTING --out-interface ppp0 -j MASQUERADE
iptables -I INPUT -s 10.20.0.0/16 -i ppp0 -j ACCEPT
iptables --append FORWARD --in-interface enp0s8 -j ACCEPT
iptables-save

#restart your service:

sudo service pptpd restart


##################### CLIENT SIDE FOR UBUNTU SERVER ######################

## Start client side (Ubuntu Server (w/o GUI)):
##
## ============================================================
## 1) Configure pptp: (Change your <vpn server address>)
##   (in this example we named the provider as "pptpserver")
## ============================================================

sudo apt-get install pptp-linux

sudo nano /etc/ppp/peers/pptpserver

# add the following lines:

pty "pptp <vpn server address> --nolaunchpppd"
lock
noauth
nobsdcomp
nodeflate
name server
password 13132828
remotename pptpserver
persist
maxfail 0
holdoff 5
require-mppe-128

# and save (ctrl-o ctrl-x)

# ==================================================================
# 2) Create config file for adding route automatically when startup:
#    this is necessary in order to not use vpn internet connection
#    (use same name of provider, in my case "pptpserver")
# ==================================================================

sudo nano /etc/ppp/ip-up.d/pptpserver

# add the wollowings lines:

#!/bin/bash
# This script is called with the following arguments:
# Arg Name
# $1 Interface name
# $2 The tty
# $3 The link speed
# $4 Local IP number
# $5 Peer IP number
# $6 Optional ''ipparam'' value foo
/sbin/route add -net 10.20.0.0 netmask 255.255.0.0 dev ppp0


# and save (ctrl-o ctrl-x)
#... then set execute permission:

sudo chmod +x /etc/ppp/ip-up.d/pptpserver

# ============================================================
#   STARTUP CONNECTION
# ============================================================

# ------------------------------------
# 1) Manual startup:
# ------------------------------------

sudo pon pptpserver

# ------------------------------------
# 2) Auto startup on boot:
# ------------------------------------

#
# a) USING INTERFACES: Edit interfaces file:
#

sudo nano /etc/network/interfaces

# add the following lines to the end:

auto tunnel
iface tunnel inet ppp
  provider pptpserver

# and save (ctrl-o ctrl-x)
# then restart networking:

sudo /etc/init.d/networking restart

#
# b) USING SERVICE SYSTEMCTL
#

sudo nano /etc/systemd/system/pppoe.service

# add these lines:

[Unit]
Description=PPPoE connection
 
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/usr/bin/pon pptpserver
ExecStop=/usr/bin/poff -a
 
[Install]
WantedBy=default.target

# and save
# then change permissions:

sudo chmod +x /etc/systemd/system/pppoe.service

# then reload daemons:

systemctl daemon-reload

# and it will connect on boot.

#start:
sudo systemctl start pppoe

#stop:
sudo systemctl stop pppoe
# let's create a backup from remote postgresql database using pg_dump:
#
#   pg_dump -h [host address] -Fc -o -U [database user] <database name> > [dump file]
#
# later it could be restored at the same remote server using:
#
#   sudo -u postgres pg_restore -C mydb_backup.dump
#
#Ex:

pg_dump -h 67.8.78.10 -p 5432 -Fc -o -U myuser mydb > mydb_backup.dump

pg_restore -C mydb_backup.dump



#complete (all databases and objects)

pg_dumpall -U myuser -h 67.8.78.10 -p 5432 --clean --file=mydb_backup.dump


#restore from pg_dumpall --clean:

psql -f mydb_backup.dump postgres #it doesn't matter which db you select here
#this command shows a list of supported encodings:
#pdftotext -listenc 

#this command convert pdf to html:
#pdftohtml -c -s -enc <encoding> <pdf to convert> <output html file>

#Ex:

pdftohtml -c -s -enc Latin1 test.pdf test.html
convert -density 144 myfile.pdf[0] -resize 10% -background white -alpha remove -strip -quality 90 mypreview.jpg
#!/bin/bash
# Delete all containers

$ docker rm $(docker ps -a -q)

# Delete all images

$ docker rmi $(docker images -q)
sudo su
cat /dev/null > /etc/apt/apt.conf
echo 'Acquire::http::Proxy "false";' > /etc/apt/apt.conf.d/proxy
apt-get update 
#for not running docker, use save:
docker save <dockernameortag> | gzip > mycontainer.tgz

#for running or paused docker, use export:
docker export <dockernameortag> | gzip > mycontainer.tgz

#load
gunzip -c mycontainer.tgz | docker load


#load 2
docker load -i mycontainer.tgz
find ./ -name <filemask>* -exec dcmodify \
  -m "(0010,0010)=MOLINA^HERNAN" \
  -m "(0010,0020)=3207639" \
  -m "(0010,0030)=19411128" \
  -m "(0010,0040)=M" \
  -m "(0008,0050)=" \
  -m "(0040,0275)[0].(0032,1060)=RMN HOMBRO IZQUIERDO" \
  -m "(0040,0275)[0].(0040,0007)=RMN HOMBRE IZQUIERDO" {} \;
#iptables -A OUTPUT -d <ipaddress> -j DROP

iptables -A OUTPUT -d 119.140.145.206 -j DROP
iptables-save
#> sudo apt-get install nethogs
#> sudo nethogs <network interface>

#example:

$> sudo nethogs eth0
#iptables -A INPUT -s <ipaddress> -j DROP

iptables -A INPUT -s 65.55.44.100 -j DROP
iptables-save

#un-block

iptables -D INPUT -s xx.xxx.xx.xx -j DROP
iptables -D INPUT -s xx.xxx.xx.xx/yy -j DROP
iptables-save
caffeinate -u -t 2
osascript -e 'tell application "System Events" to keystroke "mypassword"'
osascript -e 'tell application "System Events" to keystroke return'
#split the file into pieces:

  $> split --bytes=10M /path/to/bigfile.ext /path/to/image/prefixForPieces

#then put'em together again when necessary

  $> cat prefixForPieces* > bigfile.ext
#!/bin/bash

sudo apt-get install postgresql conquest-common conquest-postgres

sudo su postgres -c "createdb dicomserver"
sudo su postgres -c "createuser dicomserver"
sudo su postgres -c "psql -c \"ALTER USER dicomserver WITH ENCRYPTED PASSWORD 'dicomserver'\""
sudo su postgres -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE dicomserver TO dicomserver\""

sudo sed -i 's/CONQUESTSRV1/DICOMSERVER/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/CONQUESTSRV1/DICOMSERVER/g' /etc/conquest-dicom-server/acrnema.map

sudo sed -i 's/SQLServer\s*\=\ conquest/SQLServer\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/Username\s*\=\ postgres/Username\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/Password\s*\=\ postgres/Password\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini

sudo sed -i 's/DGATE_ENABLE\=false/DGATE_ENABLE\=true/g' /etc/default/dgate

sudo service dgate stop
sudo service postgresql restart
sudo dgate -v -r
sudo service dgate start

#when installed: AET=DICOMSERVER, PORT=11112
$ rsync -avz -e "ssh -p <ssh port number>" <user>@<remote addr>:<remote path/folder> <local path/folder>
$ sudo apt-get install tcpflow
$ sudo tcpflow -p -c -i <netinterface> port <portnum>

# Example: tcpflow -p -c -i eth0 port 80
$ find <folderpath> -name <filemask> -exec <command> <extra parameters> {} \;
#Using dcm4che:

#capture:
$ ffmpeg -an -f video4linux2 -s 640x480  -r 30 -i /dev/video0 -vcodec mpeg4 -vtag DIVX my_test.avi

# convert:
$ jpg2dcm -c mpg2dcm.cfg -ts 1.2.840.10008.1.2.4.100 <mpegfile> <dcmfile>

//---------------------------------------------------------------------

#Send to pacs: dcmtk:
$ dcmsend -d -aec AETITLE <ip address> <dicom port> <dcmfile>

//---------------------------------------------------------------------

#Video props:

$ mplayer video.wmv -identify -vo null -ao null -frames 0 2>&1 /dev/null | egrep "(^ID|VIDEO|AUDIO)"

//---------------------------------------------------------------------

# Use/compare mpg2dcm.config: (at DCM4CHE/BIN/JPG2DCM)

//---------------------------------------------------------------------

# jpg2dcm Sample Configuration for encapsulating MPEG2 MP@ML streams into
# DICOM Video Photographic Image objects
# (s. DICOM Part 3, A.32.7 Video Photographic Image IOD)
# Usage: jpg2dcm -c mpg2dcm.cfg -ts 1.2.840.10008.1.2.4.100 <mpegfile> <dcmfile>

# Patient Module Attributes
# Patient's Name
00100010:
# Patient ID
00100020:
# Issuer of Patient ID
#00100021:
# Patient's Birth Date
00100030:
# Patient's Sex
00100040:

# General Study Module Attributes
# Study Instance UID
#0020000D:
# Study Date
00080020:
# Study Time
00080030:
# Referring Physician's Name
00080090:
# Study ID
00200010:
# Accession Number
00080050:
# Study Description
#00081030:

# General Series Module Attributes
# Modality
00080060:XC
# Series Instance UID
#0020,000E:
# Series Number
00200011:1

# General Equipment Module Attributes
# Manufacturer
00080070:

# General Image Module Attributes
# Instance Number
00200013:1

# Cine Module Attributes
# Frame Time [525-line NTSC]
#00181063:33.33
# Frame Time [625-line PAL]
00181063:40.0
# Multiplexed Audio Channels Description Code Sequence
003A0300

# Multi-frame Module Attributes
#Number of Frames (use dummy value, if unknown)
00280008:1500
# Frame Increment Pointer
00280009:00181063

# Image Pixel Module Attributes (MUST be specified for encapsulating MPEG2 streams)
# (s. DICOM Part 5, 8.2.5 MPEG2 MP@ML IMAGE COMPRESSION for details)
# Samples per Pixel
00280002:3
# Photometric Interpretation
00280004:YBR_PARTIAL_420
# Planar Configuration
00280006:0
# Rows
00280010:480
# Columns
00280011:640
# Bits Allocated
00280100:8
# Bits Stored
00280101:8
# High Bit
00280102:7
# Pixel Representation
00280103:0

# Acquisition Context Module Attributes
# Acquisition Context Sequence
00400555

# VL Image Module Attributes
# Image Type
00080008:ORIGINAL\\PRIMARY
# Lossy Image Compression
00282110:01

# SOP Common Module Attributes
# SOP Class UID
00080016:1.2.840.10008.5.1.4.1.1.77.1.4.1
# SOP Instance UID
#00080018

#----------------------------------------------------------------------------
#convert video to frames:

$ ffmpeg -i test.mp4 -r 24 -f image2 test_files/%05d.png

#----------------------------------------------------------------------------
*> sudo visudo

#find 'root ALL(...' and append this line below:

www-data ALL=NOPASSWD:/usr/local/bin/myscript.sh

#Save

*> sudo cp myscript.sh /usr/local/bin/
*> sudo chmod 777 /usr/local/bin/myscript.sh

#at php script:

<?php

$cmd = shell_exec("/usr/local/bin/myscript.sh params");
echo $cmd;

?>
#Use udisks utility
#sudo apt-get install udisks

$> udisks --show-info /dev/sr0 | grep -c "blank: *1"

#this will return 0:if not blank/present; or 1:blank disk present
#!/bin/bash
#
#/etc/init.d/oracledb
#
#Run-level Startup script for the Oracle Listener and Instances
#It relies on the information on /etc/oratab

export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbname_1
export ORACLE_OWNR=oracle
export PATH=$PATH:$ORACLE_HOME/bin

if [ ! -f $ORACLE_HOME/bin/dbstart -o ! -d $ORACLE_HOME ]
then
  echo "Oracle startup: cannot start"
  exit 1
fi

case "$1" in
  start)
    #Oracle listener and instance startup
    echo -n "Starting Oracle: "
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/lsnrctl start"
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/dbstart $ORACLE_HOME"
    touch /var/lock/oracle
    echo "OK"
    ;;
  stop)
    #Oracle listener and instance shutdown
    echo -n "Shutdown Oracle: "
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/lsnrctl stop"
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/dbshut $ORACLE_HOME"
    rm -f /var/lock/oracle
    echo "OK"
    ;;
  reload|restart)
    $0 stop
    $0 start
    ;;
  *)
    echo "Usage: `basename $0` start|stop|restart|reload"
    exit 1
esac

exit 0
##################################################
#!/bin/sh

#----------------------------------------------------------------
# Put this file at /usr/local/bin:
#
#     $> sudo cp verify_nr /usr/local/bin
#
# Set executing permissions:
#
#     $> sudo chmod +x /usr/local/bin/verify_nr
#
# Then create crontab (cada minuto):
#
#     $> sudo crontab -e
#     #(Go to end and append:)
#     * * * * * /usr/local/bin/verify_nr
#     #(Save)
#----------------------------------------------------------------
SERVICE="nrservice"
if ps ax | grep -v grep | grep -v $0 | grep $SERVICE > /dev/null
then
    echo "$SERVICE service running, everything is fine" > /dev/null
else
    sudo service nrservice.sh restart
fi
$ sudo apt install dcmtk

#Service:

$ storescp -v +xa -pm +uf -fe .dcm -sp --fork -aet MARCO -od ./test_storescp 4006

#Store:

$ storescu -xs localhost 4006 dicom_file.dcm
#disable ping to your station:

sudo echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_all 

#enable ping back:

sudo echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_all
$ comm -13 \
  <(gzip -dc /var/log/installer/initial-status.gz | sed -n 's/^Package: //p' | sort) \
  <(comm -23 \
    <(dpkg-query -W -f='${Package}\n' | sed 1d | sort) \
    <(apt-mark showauto | sort) \
  )
$> sudo apt-get install freetds-bin

#At Lazarus:
#Put TZConnection component (ZConnection1) and set LibraryLocation as shown:

#  ZConnect1.LibraryLocation:=libsybdb.so.5;

#  and we're done!
$> wget -qO- ipecho.net/plain
$> dig +short myip.opendns.com @resolver1.opendns.com
$> wget -qO- shtuff.it/myip/short
$> wget -qO- whatismyip.akamai.com
$> sudo crontab -e

#then add a line like this:

* * * * * find /path/to/files/ -type f -mtime +<n> -exec rm -rf {} \;

#Ex:
#Delete "*.txt" files older than 1 day from /tmp folder every day at 2:00am:

0 2 * * * find /tmp/* -type f -mtime +1 -exec rm {} \;       #files
0 2 * * * find /tmp/* -type d -mtime +1 -exec rm -rf {} \;   #folders
#Merge file1.pdf and file2.pdf into merged.pdf:

$> gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=merged.pdf file1.pdf file2.pdf
$> find ./ -name "<filename/wild cards>" | xargs grep -i "<text to find>"

#Ex:

$> find ./ -name "*.txt" | xargs grep -i "Examples"

#Find all text files (*.txt) containing text 'Examples' from current path (./) and inner.
$> ssh <remote user>@<remote server ip> [-p <remote ssh port>] -L <remote server port>:<internal target ip>:<internal target port> -fN

#Ex:
#Redirect web traffic throughout myremoteserver.com, port 9999, to local machine 192.168.0.1, port 80.

$> ssh operator@myremoteserver.com -L 9999:192.168.0.1:80 -fN

#So you can access: "http://localhost:9999/"
#This url will respond as it was "http://192.168.0.1:80/"
sudo route ip route add <ip range> gw <gateway ip address> dev <interface>
sudo ip addr flush dev <interface>
sudo /etc/init.d/networking restart

#Ex:

route ip route add 192.168.32.0/24 gw 192.168.32.1 dev eth0
sudo ip addr flush dev eth0
sudo /etc/init.d/networking restart
#Setup the rate control and delay
sudo tc qdisc add dev lo root handle 1: htb default 12 
sudo tc class add dev lo parent 1:1 classid 1:12 htb rate 33kbps ceil 56kbps 
sudo tc qdisc add dev lo parent 1:12 netem delay 400ms
 
#Remove the rate control/delay
sudo tc qdisc del dev lo root
 
#To see what is configured on an interface, do this
sudo tc -s qdisc ls dev lo
 
#Replace lo with eth0/wlan0 to limit speed at lan
wkhtmltopdf <url1> <url2> ... <urln> <output-pdf-path-filename>
#install debian based:

sudo apt-get install nbtscan

#windows and others: download at http://www.unixwiz.net/tools/nbtscan.html

nbtscan 192.168.0.1-254 //IP Range
nbtscan 192.168.0.0/24  //whole C-class network
nbtscan 192.168.1.0/24  //whole C-class network
nbtscan 172.16.0.0/16   //Whole B-class network
nbtscan 10.0.0.0/8      //whole A-class network
$> sudo su

$> sync ; echo 1 > /proc/sys/vm/drop_caches
$> sync ; echo 2 > /proc/sys/vm/drop_caches
$> sync ; echo 3 > /proc/sys/vm/drop_caches
#install tools qemu-kvm (debian based distros)
$ sudo apt-get install qemu-kvm

#load module
$ sudo modprobe nbd

#create loopback dev for the image
$ sudo qemu-nbd -c /dev/nbd0 <path to virtual disk>.vdi

#mount the partitions, that are exposed as /dev/nbd0pXXX
$ sudo mount  -o noatime,noexec /dev/nbd0p1 /tmp/vdi/

#in the end unmount && shutdown the ndb
$ sudo umount /tmp/vdi/
$ sudo qemu-nbd -d /dev/nbd0
netsh routing ip nat add portmapping "<lan name>" tcp <caller ip> <listening port> <target ip> <target port>
#!/bin/sh

# get conda paths
export ACTIVATE_PATH=$CONDA_PREFIX/etc/conda/activate.d
export DEACTIVATE_PATH=$CONDA_PREFIX/etc/conda/deactivate.d
export ACTIVATE_SCRIPT=$ACTIVATE_PATH/env_vars.sh
export DEACTIVATE_SCRIPT=$DEACTIVATE_PATH/env_vars.sh

#delete existing activation and deactivation scripts
test -e $ACTIVATE_SCRIPT && rm $ACTIVATE_SCRIPT
test -e $DEACTIVATE_SCRIPT && rm $DEACTIVATE_SCRIPT

#create new activation script
mkdir -p $ACTIVATE_PATH
touch $ACTIVATE_SCRIPT
echo "#!/bin/sh" >> $ACTIVATE_SCRIPT
echo "export BELVO_SECRET_ID=\"$(op read "op://Personal/Belvo/add more/Secret ID")\"" >> $ACTIVATE_SCRIPT
echo "export BELVO_SECRET_PASSWORD=\"$(op read "op://Personal/Belvo/add more/Secret password")\"" >> $ACTIVATE_SCRIPT
echo "export CODA_API_KEY=\"$(op read "op://Personal/Coda/add more/automation")\"" >> $ACTIVATE_SCRIPT
echo "export GOOGLE_APPLICATION_CREDENTIALS=\"/Users/jmbenedetto/code/secrets/gcp_automation_service_account_key.json\"" >> $ACTIVATE_SCRIPT

#create deactivate script
mkdir -p $DEACTIVATE_PATH
touch $DEACTIVATE_SCRIPT
echo "#!/bin/sh" >> $DEACTIVATE_SCRIPT
echo "unset BELVO_SECRET_ID" >> $DEACTIVATE_SCRIPT
echo "unset BELVO_SECRET_PASSWORD" >> $DEACTIVATE_SCRIPT
echo "unset CODA_API_KEY" >> $DEACTIVATE_SCRIPT
echo "unset GOOGLE_APPLICATION_CREDENTIALS" >> $DEACTIVATE_SCRIPT
test -e ./file_path/file_name && echo 1 || echo 2
SINCE=`date --date '-2 weeks +2 days' +%F 2>/dev/null || date -v '-2w' -v '+2d' +%F`
bucket=<bucketname>
aws s3api list-objects-v2 --bucket "$bucket" \
    --query 'Contents[?LastModified > `'"$SINCE"'`]'
[root@mysql-in-servicecloud-consolidated-slave-1 ~]# cat /usr/local/scripts/check_slave_status.py
import commands
import os
import time

for x in range(0, 4):
        status = commands.getoutput("mysql --login-path=statuser -sN -e \"show slave status\"")
#        SLACK_URL="https://hooks.slack.com/services/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"
        SLACK_URL="https://hooks.slack.com/services/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"       

        for row in status.split("\n"):
                SERVER_NAME = "mysql-in-servicecloud-consolidated-slave-01"
                SLACK_MESSAGE = "<!channel> Problem in \`[Azure] "+SERVER_NAME+" \`: "
                Slave_IO_Running = row.split("\t")[10]
                Slave_SQL_Running = row.split("\t")[11]
                Seconds_Behind_Master = row.split("\t")[32]
                if Slave_IO_Running.find("No")!=-1 or Slave_SQL_Running.find("No")!=-1 or int(Seconds_Behind_Master)>5:
                        SLACK_MESSAGE = SLACK_MESSAGE + "\`Slave_SQL_Running: "+Slave_SQL_Running+"\`; \`Slave_IO_Running: "+Slave_IO_Running+"\`; \`Seconds_Behind_Master: "+Seconds_Behind_Master+"\`"
                        os.system("curl -X POST --data \"payload={\'text\':\'"+SLACK_MESSAGE+"\', \'username\':\'gcp-watchman\', \'icon_emoji\':\':bangbang:\'}\" "+SLACK_URL)
                os.system("curl -i -XPOST 'http://gcp-in-int-grafana.onedirect.in:8086/write?db=collectd' --data-binary 'mysql_slave_lag,slave_name='"+SERVER_NAME+"' value='"+Seconds_Behind_Master+"''")
                time.sleep(10);#!/usr/bin/python
#!/usr/bin/env bash

/usr/bin/docker exec -it $(docker ps -q --filter ancestor=200890773558.dkr.ecr.ap-southeast-2.amazonaws.com/vtrack/web) bash
mydate=`date +"%m/%d/%Y -%H:%M:%S"`
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
mytime=`date +%T`

USER=anirban
PW=Bose9711
filename="/var/lib/mysql-files/VFS_Ticket_data.csv"


rm -rf "/var/lib/mysql-files/VFS_Ticket_data.csv"
reportname="/tmp/VFS_Ticket_data_$current_time.csv"
mysql -u$USER -p$PW -e"call onedirect.get_export_to_excel_summary(8112,current_timestamp - interval 2 hour,current_timestamp)">/var/lib/mysql-files/VFS_Ticket_data.csv


mv /var/lib/mysql-files/VFS_Ticket_data.csv $reportname
echo " ****TRANSFER START**** "
echo $reportname
azcopy cp "$reportname" "https://prjdwuatadls.dfs.core.windows.net/vfsbiproject?sp=rwle&st=2022-08-05T05:47:28Z&se=2022-09-05T13:47:28Z&spr=https&sv=2021-06-08&sr=c&sig=GuyhDRcueFwQUdtL7%2FQ%2Bq5IdRFnd3QKpud1dusF%2Bu0E%3D"

echo " ****TRANSFER END**** "
mydate=`date +"%m/%d/%Y -%H:%M:%S"`
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
mytime=`date +%T`

USER=anirban
PW=Bose9711
filename="/var/lib/mysql-files/VFS_Ticket_data.csv"


rm -rf "/var/lib/mysql-files/VFS_Ticket_data.csv"
reportname="/tmp/VFS_Ticket_data_$current_time.csv"
mysql -u$USER -p$PW -e"call onedirect.get_export_to_excel_summary(8112,current_timestamp - interval 2 hour,current_timestamp)">/var/lib/mysql-files/VFS_Ticket_data.csv


mv /var/lib/mysql-files/VFS_Ticket_data.csv $reportname
echo " ****TRANSFER START**** "
echo $reportname
azcopy cp "$reportname" "https://prjdwuatadls.dfs.core.windows.net/vfsbiproject?sp=rwle&st=2022-08-05T05:47:28Z&se=2022-09-05T13:47:28Z&spr=https&sv=2021-06-08&sr=c&sig=GuyhDRcueFwQUdtL7%2FQ%2Bq5IdRFnd3QKpud1dusF%2Bu0E%3D"

echo " ****TRANSFER END**** "
grep -r <pattern> "dir/*/dir/dir/file"

or

grep -r <pattern> "*/dir/dir"

or

// generic
grep -r <pattern> *
//#########################################################################################//
/* -------------------------------------------------------------------

Name : Anon_Resampling

----------------------------------------------------------------------
Original Rule :	Replace with other values from the same domain:
1 - Table Name
2 - Field Name

-------------------------------------------------------------------*/

SUB Anon_Resampling (P_TABLENAME , P_FIELDNAME)


TRACE ##################################################;
TRACE ## Starting Function : Anon_Resampling  ##;
TRACE ## Anonymizing Field : $(P_FIELDNAME) #;
TRACE ##################################################;

//---------------------------------------//

[DistinctValues]:
Load Distinct 
[$(P_FIELDNAME)] as [OldDistinctValue],
RowNo() as [RowID],
Rand() as [Random]
Resident $(P_TABLENAME);

[AnonDistinctMapping]:
Mapping
Load
RowNo(),
[OldDistinctValue];
Load
[OldDistinctValue],
[Random]
Resident [DistinctValues]
Order By [Random];

[AnonDistinctValues]:
LOAD
*,
ApplyMap('AnonDistinctMapping',RowID,'Anon_Error') as [NewDistinctValue]
Resident DistinctValues;

Drop table DistinctValues;

[AnonMapping]:
Mapping
Load
[OldDistinctValue],
[NewDistinctValue]
Resident [AnonDistinctValues];

Drop table AnonDistinctValues;

[AnonValues]:
LOAD
*,
ApplyMap('AnonMapping',[$(P_FIELDNAME)],'Anon_Error') as [Anon_$(P_FIELDNAME)]
Resident $(P_TABLENAME);

Drop table $(P_TABLENAME);

Rename table AnonValues to $(P_TABLENAME);


END SUB

//#########################################################################################//
import pandas as pd
from codaio import Coda, Document, Cell

doc=Document.from_environment('XWykP50uN-')
transactions_table=doc.get_table('grid-bsHZ_AO1l5')

df_new=pd.DataFrame([
    {'Name':'Ricardo','transaction_id':'12dgt'},
    {'Name':'Manoel','transaction_id':'fklsod'},
])
df_new

mapping_dict={
    'Name':'Name',
    'transaction_id':'transaction_id'
}
all_data=[]
for i in range(len(df_new)):
    row_data=[]
    for j in range(len(df_new.columns)):
        row_data.append(Cell(column=mapping_dict[df_new.columns[j]],value_storage=df_new.iloc[i,j]))
    all_data.append(row_data)
transactions_table.upsert_rows(all_data)
#!/bin/sh

#create activate script
export ACTIVATE_PATH=$CONDA_PREFIX/etc/conda/activate.d
mkdir -p $ACTIVATE_PATH
touch $ACTIVATE_PATH/env_vars.sh
echo "#!/bin/sh" >> $ACTIVATE_PATH/env_vars.sh
echo "export VAR_NAME=\"VAR_VALUE\"" >> $ACTIVATE_PATH/env_vars.sh

#create deactivate script
export DEACTIVATE_PATH=$CONDA_PREFIX/etc/conda/deactivate.d
mkdir -p $DEACTIVATE_PATH
touch $DEACTIVATE_PATH/env_vars.sh
echo "#!/bin/sh" >> $DEACTIVATE_PATH/env_vars.sh
echo "unset VAR_NAME" >> $DEACTIVATE_PATH/env_vars.sh
Let vSource='lib://LoB demos:DataFiles/';
Let vDestination= 'lib://LoB demos:DataFiles/';

let vStoreType='qvd';

[Parameters]:
LOAD * INLINE [
    original_file_name, target_file_name
    Employee_Master, A001_Employee Master
	Employee All Regions, A001_Employee All Regions
	Employee Retention Predictions_v3, A001_Employee Retention Predictions
];

FOR i = 0 TO NoOfRows('Parameters') - 1
LET vOriginalFileName = peek('original_file_name', $(i), 'Parameters');
LET vTargetFileName = peek('target_file_name', $(i), 'Parameters');


[$(vOriginalFileName)]: LOAD * from [$(vSource)$(vOriginalFileName)] ($(vStoreTypeSourceFile));
STORE [$(vOriginalFileName)] INTO [$(vDestination)$(vTargetFileName)] ($(vStoreTypeDestinationFile));
DROP TABLE [$(vOriginalFileName)];

NEXT i

exit Script
branchName=$(git branch --show-current)
baseURL="https://$branchName-bmc-org.pantheonsite.io/"
npx percy exec -- cypress run --config baseUrl=$baseURL
Let vSource='lib://LoB demos:DataFiles/';
Let vDestination= 'lib://LoB demos:DataFiles/';

let vExtensionSourceFile='.csv';
let vExtensionDestinationFile='.qvd';
let vStoreTypeSourceFile='txt';
let vStoreTypeDestinationFile='qvd';

[Parameters]:
LOAD * INLINE [
    original_file_name, target_file_name
    Employee_Master, A001_Employee Master
	Employee All Regions, A001_Employee All Regions
	Employee Retention Predictions_v3, A001_Employee Retention Predictions


];

FOR i = 0 TO NoOfRows('Parameters') - 1
LET vOriginalFileName = peek('original_file_name', $(i), 'Parameters');
LET vTargetFileName = peek('target_file_name', $(i), 'Parameters');


[$(vOriginalFileName)]: LOAD * from [$(vSource)$(vOriginalFileName)$(vExtensionSourceFile)] ($(vStoreTypeSourceFile));
STORE [$(vOriginalFileName)] INTO [$(vDestination)$(vTargetFileName)$(vExtensionDestinationFile)] ($(vStoreTypeDestinationFile));
DROP TABLE [$(vOriginalFileName)];

NEXT i

exit Script
// parameters is the table to iterate over
// it contains 
FOR i = 0 TO NoOfRows('Table_Name') - 1
// save field1 value into vField
LET vField = peek('Field1', $(i), 'Table_Name');
NEXT i

[Parameters]:
LOAD * INLINE [
    Table_name, File_name
    Countries, AR_Countries V1
    Invoice Item Detail,AR_Invoice Item Detail V1
    Product Lines, AR_Product Lines V1
    Invoices, AR_Invoices V1        
    Items,AR_Items V1
    Comments,AR_Comments V1
    DSO,AR_DSO V1
    Link Table,AR_Link Table V1
    Subsidiaries,AR_Subsidiaries V1
    ExchangeRates,AR_ExchangeRates V1
    Accountants,AR_Accountants V1
];
conda env remove --name corrupted_env
#!/usr/bin/expect
set timeout 60
spawn ssh [lindex $argv 1]@[lindex $argv 0]
expect "*?assword" {
    send "[lindex $argv 2]\r"
    }
expect ":~$ " {
    send "
        mkdir -p /home/tools/baeldung/auto-test;
        cd /home/tools/baeldung/auto-test;
        tree
        sshpass -p 'Baels@123' scp -r tools@10.45.67.11:/home/tools/cw/baeldung/get_host_info.sh ./;
        tree
        bash get_host_info.sh\r"
    }
expect ":~$ " {
    send "exit\r"
    }
expect eof
# sudo loop
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
## starts with ## for comments
## starts with # for commented packages
## optional inline comment after package name with #

## resource monitors
dstat
iotop
sysstat # includes iostat
htop
ncdu
s-tui
ranger
## ranger dep https://github.com/ranger/ranger
python3-chardet
caca-utils
imagemagick
ffmpegthumbnailer
bat
atool
## atool depends on these already
# unrar
# p7zip-full
# unzip
lynx
w3m
elinks
poppler-utils
mupdf-tools
calibre
transmission-cli
mediainfo
libimage-exiftool-perl
odt2txt
jq
fontforge-nox
glances
## glances dep https://github.com/nicolargo/glances
python3-psutil
python3-bottle
hddtemp
python3-netifaces
python3-cpuinfo
python3-pysnmp4
python3-pystache
python3-zeroconf

## system
caffeine
gnome-shell-extensions
gnome-tweak-tool

## shell
neofetch
exa
openssh-server
mosh
tmux
tree
xsel
zsh
curl
git
hub # github

## command line utils
opencc
texlive # https://tex.stackexchange.com/a/504566/73420
lilypond
gitit
graphviz

## filesystem
cifs-utils
samba
# sshfs
zfsutils-linux
cryptsetup # for manually unlock full disk encrypted drives

## programming
cmake
mpich
parallel

## font
fonts-cwtex-kai
fonts-linuxlibertine

## hardware
gsmartcontrol
idle3-tools # WD Green HDD config
lm-sensors
psensor
smartmontools
vainfo # video acceleration
acpi
f3
fancontrol
hardinfo
input-utils # for lsinput

## GUI
keepassxc
chromium-browser
# google-chrome-stable # in pop OS's repo. May need more steps on Ubuntu: https://linuxhint.com/install_google_chrome_ubuntu_ppa/
kitty

## Video
ffmpeg
libbluray-bdj
kodi
vlc
mkvtoolnix # mkvinfo
mpv

## network
nmap
iperf3
wakeonlan
ifenslave
ethtool

## for sanoid
debhelper
libcapture-tiny-perl
libconfig-inifiles-perl
pv
lzop
mbuffer
##install packages 
#step 1: see the version required clearly
#step 2: go to git hub and use: curl -Lo [name of the package] [link]
#step3: move the file to /usr/local/bin
#step4: check permission of the file using ls -al [file]

curl -Lo "deno.zip" "https://github.com/denoland/deno/releases/latest/download/deno-x86_64-unknown-linux-gnu.zip"

# Make file immutable
chattr +i filename

# Make file mutable
chattr -i filename
yarn add @babel/plugin-transform-exponentiation-operator --dev
npm install react-icons --save
function hex() {
    printf "%%%02x\n" "'$1"
}

hex -   # Outputs %2d
hex _   # Outputs %5f
hex .   # Outputs %2e
#!/bin/bash
if hash ntpdate 2>/dev/null; then
    ntpdate pool.ntp.org
else
    echo "'ntpdate' is not installed. Aborting..."; exit 1
fi
#!/bin/sh
set -e
 
echo "Deploying application ..."
 
# Enter maintenance mode
(php artisan down --message 'The app is being (quickly!) updated. Please try again in a minute.') || true
    # Update codebase
    git fetch origin deploy
    git reset --hard origin/deploy
 
    # Install dependencies based on lock file
    composer install --no-interaction --prefer-dist --optimize-autoloader
 
    # Migrate database
    php artisan migrate --force
 
    # Note: If you're using queue workers, this is the place to restart them.
    # ...
 
    # Clear cache
    php artisan optimize
 
    # Reload PHP to update opcache
    echo "" | sudo -S service php7.4-fpm reload
# Exit maintenance mode
php artisan up
 
echo "Application deployed!"
npx cap open ios #abrir o projecto no xcode

npx cap open android #abrir o projecto no android
<dict>
+  <key>NSCameraUsageDescription</key>
+  <string>To be able to scan barcodes</string>
</dict>
<?xml version="1.0" encoding="utf-8"?>
<manifest
  xmlns:android="http://schemas.android.com/apk/res/android"
+  xmlns:tools="http://schemas.android.com/tools" <-- adicionas esta linha não removendo nada e seguindo esta lógica

  package="com.example">

  <application
+    android:hardwareAccelerated="true" <-- adicionas esta linha não removendo nada e seguindo esta lógica
  >
  </application>

+  <uses-permission android:name="android.permission.CAMERA" /><-- adicionas esta linha não removendo nada e seguindo esta lógica

+  <uses-sdk tools:overrideLibrary="com.google.zxing.client.android" /><-- adicionas esta linha não removendo nada e seguindo esta lógica
</manifest>
ionic build --prod

#caso quiseres colocar a app para android dás o seguinte comando:
npm install @capacitor/android
npx cap add android

#caso quiseres colocar a app para iOS dás o seguinte comando:
npm install @capacitor/ios
npx cap add ios


#no final dão estes dois ultimos comandos

npx cap sync
npx cap copy Android ou ios #dependendo do qual escolheram
...

<ion-content class="scanner-hide" *ngIf="scanStatus == false">
  <div class="padding-container center">
    <ion-button color="primary" (click)="scanCode()"><ion-icon slot="start" name="qr-code-outline"></ion-icon> Scanear Código</ion-button> <!-- Botão que chama a função do scanner-->
  </div>
  <ion-card>
    <ion-card-content><h1>{{ result }}</h1></ion-card-content> <!-- mostra o resultado do scan -->
  </ion-card>
  
  <div class="scanner-ui"> <!-- Quando estamos a scanear, chama esta classe-->
    ...Scanner Interface
    </div>
    <div class="ad-spot"></div>
</ion-content>
...
import { BarcodeScanner } from '@capacitor-community/barcode-scanner';



...



export class HomePage {
  public scanStatus:boolean = false; // no inicio da página, coloca o estado do código qr para falso
  public result:any;

  constructor() {}


  async scanCode () {

    this.setPermissions(); /* chama as permissões à camera */
  
    BarcodeScanner.hideBackground(); // coloca o fundo transparente
    this.scanStatus = true; // ao mudar esta variável para true, estamos a puxar o qr code scanner 
    document.body.classList.add("qrscanner"); // adiciona a classe css que fizemos no global
    const result = await BarcodeScanner.startScan(); // começa a fazer scan e espera por um result
  
  // se o qr scanner detectou algum número, então ele faz o código abaixo
    if (result.hasContent) {

        
        this.scanStatus = false; //como é obvio, ele tem de desligar o scanner ao obter o resultado
        BarcodeScanner.stopScan(); //para o scan
        this.result = result.content; // passa o resultado para a variável global result
        BarcodeScanner.showBackground(); //volta a mostrar o fundo
        document.body.classList.remove("qrscanner"); //remove a classe css que criamos no global
    
    }
  }

  async setPermissions(){
    const status = await BarcodeScanner.checkPermission({ force: true }); /* força a permissão para true, caso o utilizador não aceite, o scanner não funciona */
    if (status.granted) {
      // the user granted permission
      return true; // se o utilizador aceitou as permissões retorna true
    }
  
      return false; // se o utilizador não aceitou retorna false
  }
}
.scanner-ui { display: none; }
.scanner-hide { visibility: visible; }

body.qrscanner { background-color: transparent; }
body.qrscanner .scanner-ui { display: block; }
body.qrscanner .scanner-hide { visibility: hidden; }
ionic start qrcode blank --type=ionic-angular
#variáveis

nome = "Meu Nome" #Sempre que colocas o valor entre "" significa que a variável é do tipo string
#variável do tipo string é um tipo de variável que usa texto como base

idade = "28" #variável do tipo string

x = 2
y = 5
#Sempre que defines um valor numérico sem "" significa que a variável passa a ser do tipo int
#variáveis do tipo int, só aceitam valores numéricos inteiros, não podendo colocar texto à mistura



#exemplos do que não podes fazer
!var@ = 1 
#não podes, aliás não consegues usar pontuação na definição das variáveis

total = x + idade 
#não podes juntar diferentes tipos de variáveis numa só
#ou seja não consegues somar a idade "28" que está no formato string, com uma do formato int
#que é específicamente um número, é como se aquele 28 fosse escrito assim "vinte e oito"





#resultados retornados
print(x+y) #vai retornar na tela o valor 7
- mkdir work_dir_company
- nano work_dir_company/.gitignore_company

```
[user]
        email = user@mail.com
        name = userName
[core]
        sshCommand = ssh -i ~/.ssh/id_ed25519_company
```

- nano ~/.gitconfig

```
[includeIf "gitdir:~/work_dir_company/"]
	path = ~/work_dir_company/.gitignore_company
[user]
        email = user@mail.com
        name = userName
[core]
        sshCommand = ssh -i ~/.ssh/id_ed25519_company
```

- Verify with `git config --list`
Settings | Tools | Python Integrated Tools | Docstring format
echo "$USER ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/dont-prompt-$USER-for-sudo-password"
#!/bin/bash

set -e

if [ -d ~/.local/share/JetBrains/Toolbox ]; then
    echo "JetBrains Toolbox is already installed!"
    exit 0
fi

echo "Start installation..."

wget --show-progress -qO ./toolbox.tar.gz "https://data.services.jetbrains.com/products/download?platform=linux&code=TBA"

TOOLBOX_TEMP_DIR=$(mktemp -d)

tar -C "$TOOLBOX_TEMP_DIR" -xf toolbox.tar.gz
rm ./toolbox.tar.gz

"$TOOLBOX_TEMP_DIR"/*/jetbrains-toolbox

rm -r "$TOOLBOX_TEMP_DIR"

echo "JetBrains Toolbox was successfully installed!"