Snippets Collections
#!/bin/bash

if : >/dev/tcp/8.8.8.8/53; then
  echo 'Internet available.'
else
  echo 'Offline.'
fi
npx create-react-app my-app --template typescript
git pull origin [branch name]
git add .
git commit -m ""
git push origin [branch name]
git remote -v
git remote add origin 
git remote remove origin
git checkout -b (create a branch)
git checkout (switch branch)
git rebase --continue 
git merge --continue
git push origin [branch name] --force-with-lease
$
docker run \
   -v $(pwd):/data/project/ \
   -e QODANA_TOKEN="<qodana-cloud-token>" \
   -e QODANA_REMOTE_URL="<project-remote-url>" \
   -e QODANA_BRANCH="<project-branch-name>" \
   -e QODANA_REVISION="<commit-hash>" \
   -e QODANA_JOB_URL="<job-url>" \
   jetbrains/qodana-<linter>
ipconfig getoption en0 domain_name_server
# Remove break line in sequences
awk '/^>/ { print (NR==1 ? "" : RS) $0; next } { printf "%s", $0 } END { printf RS }' input.fa > output.fa
 docker image prune -a --force --filter "until=2160h"
dataLayer.push({
  
    "action": "onInitialPageLoad",
    "event": "consent_status",
    "type": "explicit",
    "ucCategory": {
        "essential": true,
        "marketing": true,
        "functional": true,
        "customCategory-da1466e9-42f7-4845-88ee-14d3080feb09": true
    },
    "Usercentrics Consent Management Platform": true,
    "Amazon Pay": true,
    "Cloudflare": true,
    "Google Fonts": true,
    "Google Maps": true,
    "Google Tag Manager": true,
    "PayPal": true,
    "Wordpress": true,
    "Sentry": true,
    "Amazon Web Services": true,
    "hCaptcha": true,
    "Kundenaccount": true,
    "Ory Hydra": true,
    "Datadog": true,
    "Freshdesk": true,
    "Emarsys": true,
    "Facebook Pixel": true,
    "Sovendus": true,
    "Google Analytics": true,
    "Trustpilot": true,
    "TradeDoubler": true,
    "QualityClick": true,
    "Pinterest": true,
    "TikTok": true,
    "Adtriba": true,
    "Microsoft Advertising": true,
    "AWIN": true,
    "Google Ads Conversion Tracking": true,
    "Google Ads Remarketing": true,
    "DoubleClick Floodlight": true,
    "Freewheel": true,
    "DoubleClick Ad": true,
    "tcid": true,
    "jsg_attribution": true,
    "jsg_lc": true,
    "tsid": true,
    "Impact Radius": true,
    "TimingCounter": true,
    "Outbrain": true,
    "Movable Ink": true,
    "Criteo OneTag": true,
    "YouTube Video": true,
    "Zopim": true,
    "Optimizely": true,
    "trbo": true,
    "RUMvision": true

  
})
npm install --save @stripe/react-stripe-js @stripe/stripe-js
npm i @next-auth/prisma-adapter
npm install next-auth
npm install @prisma/client @auth/prisma-adapter
npm install prisma --save-dev
npm install react-hook-form
# Plot Box plots.
ff = cbind( cellType = rownames(f1), color = colorCodes[1:13], f1[, 2:10] )
ff = melt( ff, measure.vars = 3:11)
marker.genes = lapply( marker.genes, FUN = function(x) {x[x %in% geneNames]})
marker.genes.2 <- list(
  Gene1 = c(1, 2, 3),
  Gene2 = c(4, 5, 6, 7),
  Gene3 = c(8, 9, 10)
)

# Find the maximum length among all vectors
max_length <- max(sapply(marker.genes.2, length))

# Pad shorter vectors with NA to make them consistent
marker.genes.2 <- lapply(marker.genes.2, function(x) {
  if (length(x) < max_length) {
    c(x, rep(NA, max_length - length(x)))
  } else {
    x
  }
})

# Create a data frame from the corrected list
my_data_frame <- data.frame(marker.genes.2)

# Display the resulting data frame
print(my_data_frame)
gg_dot <- plot_grid(gg_dot_1, gg_dot_2, labels = LETTERS[1:2], ncol = 1 )
saveRDS(marker.genes.2, file = "Tabula_Lung_Total8.rds")
RUN sudo apt update && sudo apt install -y zsh \
&& sh -c "$(wget -O- https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" \
&& git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions \
&& git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting \
&& sed -i "s/plugins=(git)/plugins=(\n\tgit\n\tzsh-autosuggestions\n\tzsh-syntax-highlighting\n)/" 

~/.zshrc && \
    sed -i "s/ZSH_THEME=.*/ZSH_THEME='<theme-name>'/" ~/.zshrc && \
    sudo chsh -s $(which zsh) $USERNAME
kubectl get no -owide

# List containers running
docker ps

# Enter kind node
docker exec -it kind-control-plane sh

# Show node info
ip link

# Show containers inside node
crictl ps

# Get container process id
crictl inspect <CONTAINER>
  
# Enter container network namespace
nsenter -n -t <PROC_ID>

# Show container network info
ip addr
curl --request POST \
  --url https://{{your-gtm-ss-url}}/com.snowplowanalytics.snowplow/enriched \
  --header 'Content-Type: application/json' \
  --header 'x-gtm-server-preview: {{your-preview-header}}' \
  --data '{
  "app_id": "example-website",
  "platform": "web",
  "etl_tstamp": "2021-11-26T00:01:25.292Z",
  "collector_tstamp": "2021-11-20T00:02:05Z",
  "dvce_created_tstamp": "2021-11-20T00:03:57.885Z",
  "event": "unstruct",
  "event_id": "c6ef3124-b53a-4b13-a233-0088f79dcbcb",
  "txn_id": null,
  "name_tracker": "sp1",
  "v_tracker": "js-3.1.6",
  "v_collector": "ssc-2.3.0-stdout$",
  "v_etl": "snowplow-micro-1.1.2-common-2.0.1",
  "user_id": "jon.doe@email.com",
  "user_ipaddress": "92.231.54.234",
  "user_fingerprint": null,
  "domain_userid": "de81d764-990c-4fdc-a37e-adf526909ea6",
  "domain_sessionidx": 3,
  "network_userid": "ecdff4d0-9175-40ac-a8bb-325c49733607",
  "geo_country": "US",
  "geo_region": "CA",
  "geo_city": "San Francisco",
  "geo_zipcode": "94109",
  "geo_latitude": 37.443604,
  "geo_longitude": -122.4124,
  "geo_location": "37.443604,-122.4124",
  "geo_region_name": "San Francisco",
  "ip_isp": "AT&T",
  "ip_organization": "AT&T",
  "ip_domain": "att.com",
  "ip_netspeed": "Cable/DSL",
  "page_url": "https://snowplowanalytics.com/use-cases/",
  "page_title": "Snowplow Analytics",
  "page_referrer": null,
  "page_urlscheme": "https",
  "page_urlhost": "snowplowanalytics.com",
  "page_urlport": 443,
  "page_urlpath": "/use-cases/",
  "page_urlquery": "",
  "page_urlfragment": "",
  "refr_urlscheme": null,
  "refr_urlhost": null,
  "refr_urlport": null,
  "refr_urlpath": null,
  "refr_urlquery": null,
  "refr_urlfragment": null,
  "refr_medium": null,
  "refr_source": null,
  "refr_term": null,
  "mkt_medium": null,
  "mkt_source": null,
  "mkt_term": null,
  "mkt_content": null,
  "mkt_campaign": null,
  "contexts_org_w3_performance_timing_1": [
    {
      "navigationStart": 1415358089861,
      "unloadEventStart": 1415358090270,
      "unloadEventEnd": 1415358090287,
      "redirectStart": 0,
      "redirectEnd": 0,
      "fetchStart": 1415358089870,
      "domainLookupStart": 1415358090102,
      "domainLookupEnd": 1415358090102,
      "connectStart": 1415358090103,
      "connectEnd": 1415358090183,
      "requestStart": 1415358090183,
      "responseStart": 1415358090265,
      "responseEnd": 1415358090265,
      "domLoading": 1415358090270,
      "domInteractive": 1415358090886,
      "domContentLoadedEventStart": 1415358090968,
      "domContentLoadedEventEnd": 1415358091309,
      "domComplete": 0,
      "loadEventStart": 0,
      "loadEventEnd": 0
    }
  ],
  "se_category": null,
  "se_action": null,
  "se_label": null,
  "se_property": null,
  "se_value": null,
  "unstruct_event_com_snowplowanalytics_snowplow_link_click_1": {
    "targetUrl": "http://www.example.com",
    "elementClasses": [
      "foreground"
    ],
    "elementId": "exampleLink"
  },
  "tr_orderid": null,
  "tr_affiliation": null,
  "tr_total": null,
  "tr_tax": null,
  "tr_shipping": null,
  "tr_city": null,
  "tr_state": null,
  "tr_country": null,
  "ti_orderid": null,
  "ti_sku": null,
  "ti_name": null,
  "ti_category": null,
  "ti_price": null,
  "ti_quantity": null,
  "pp_xoffset_min": null,
  "pp_xoffset_max": null,
  "pp_yoffset_min": null,
  "pp_yoffset_max": null,
  "useragent": null,
  "br_name": null,
  "br_family": null,
  "br_version": null,
  "br_type": null,
  "br_renderengine": null,
  "br_lang": null,
  "br_features_pdf": true,
  "br_features_flash": false,
  "br_features_java": null,
  "br_features_director": null,
  "br_features_quicktime": null,
  "br_features_realplayer": null,
  "br_features_windowsmedia": null,
  "br_features_gears": null,
  "br_features_silverlight": null,
  "br_cookies": null,
  "br_colordepth": null,
  "br_viewwidth": null,
  "br_viewheight": null,
  "os_name": null,
  "os_family": null,
  "os_manufacturer": null,
  "os_timezone": null,
  "dvce_type": null,
  "dvce_ismobile": null,
  "dvce_screenwidth": null,
  "dvce_screenheight": null,
  "doc_charset": null,
  "doc_width": null,
  "doc_height": null,
  "tr_currency": null,
  "tr_total_base": null,
  "tr_tax_base": null,
  "tr_shipping_base": null,
  "ti_currency": null,
  "ti_price_base": null,
  "base_currency": null,
  "geo_timezone": null,
  "mkt_clickid": null,
  "mkt_network": null,
  "etl_tags": null,
  "dvce_sent_tstamp": null,
  "refr_domain_userid": null,
  "refr_dvce_tstamp": null,
  "contexts_com_snowplowanalytics_snowplow_ua_parser_context_1": [
    {
      "useragentFamily": "IE",
      "useragentMajor": "7",
      "useragentMinor": "0",
      "useragentPatch": null,
      "useragentVersion": "IE 7.0",
      "osFamily": "Windows XP",
      "osMajor": null,
      "osMinor": null,
      "osPatch": null,
      "osPatchMinor": null,
      "osVersion": "Windows XP",
      "deviceFamily": "Other"
    }
  ],
  "domain_sessionid": "2b15e5c8-d3b1-11e4-b9d6-1681e6b88ec1",
  "derived_tstamp": "2021-11-20T00:03:57.886Z",
  "event_vendor": "com.snowplowanalytics.snowplow",
  "event_name": "link_click",
  "event_format": "jsonschema",
  "event_version": "1-0-0",
  "event_fingerprint": "e3dbfa9cca0412c3d4052863cefb547f",
  "true_tstamp": "2021-11-20T00:03:57.886Z"
}'
#!/bin/sh

for i in {1979..1980}
do
    echo "output: $i"
    dir2=$((i+1))
    cp /from/$i/FILE:$i-08* /from/$dir2/
    mv  /from/$i/FILE:$i-09* /from/$dir2/
    
    
done
iptables -t nat -I PREROUTING -p tcp -d 192.168.1.0/24 --dport 2222 -j DNAT --to-destination 127.0.0.1:2222

sysctl -w net.ipv4.conf.eth0.route_localnet=1
# check version
aws --version

# update CLI
pip install --upgrade awscli

# list S3 buckets
aws s3api list-buckets
aws s3 ls
aws s3 ls s3://dir_name/subdir_name

# set up config
# touch ~/.aws/config
[sso-session my_session]
sso_start_url = https://xxxx-login.awsapps.com/start/
sso_region = us-east-2
sso_registration_scopes = sso:account:access

[profile cwb-d]
output = json
region = us-east-2
sso_session = my_session
sso_account_id = 123456789
sso_role_name = my_role

# authenticate
export AWS_PROFILE=cwb-d
aws sso login --sso-session my_session --no-browser

concurrent = 1
check_interval = 0
shutdown_timeout = 0

[session_server]
  session_timeout = 3600
[[runners]]
  name = "Runner Name - Docker"
  url = "https://gitlab.?.com/"
  token = "<Gitlab-Runner-Token"
  executor = "docker"
  # Path to the custom CA certificate
  tls-ca-file = "path to certs"
  [runners.docker]
    gpus = "all"
    privileged = false
    tls_verify = false
    image = "docker:stable"  # Specify the default Docker image for running jobs
    disable_cache = false
    volumes = ["/cache"]
    shm_size = 0  # Disable Docker build sharing
    [runners.docker.auth]
      username = "<gitlab-Token>"
      password = "<Token-Password>"

[[runners]]
  name = "Runner Name - Shell"
  url = "https://gitlab.?.com/"
  token = "<Gitlab-Runner-Token>"
  executor = "shell"
  # Path to the custom CA certificate
  tls-ca-file = "path to certs"
#!/bin/bash
# Short script to split videos by filesize using ffmpeg by LukeLR
# source:https://stackoverflow.com/a/52158160
# usage: . ./split-video.sh huge-video.mov 90000000 "-c:v libx264 -crf 23 -c:a copy -vf scale=640:-2"

if [ $# -ne 3 ]; then
    echo 'Illegal number of parameters. Needs 3 parameters:'
    echo 'Usage:'
    echo './split-video.sh FILE SIZELIMIT "FFMPEG_ARGS'
    echo
    echo 'Parameters:'
    echo '    - FILE:        Name of the video file to split'
    echo '    - SIZELIMIT:   Maximum file size of each part (in bytes)'
    echo '    - FFMPEG_ARGS: Additional arguments to pass to each ffmpeg-call'
    echo '                   (video format and quality options etc.)'
    exit 1
fi

FILE="$1"
SIZELIMIT="$2"
FFMPEG_ARGS="$3"

# Duration of the source video
DURATION=$(ffprobe -i "$FILE" -show_entries format=duration -v quiet -of default=noprint_wrappers=1:nokey=1 | cut -d. -f1)

# Duration that has been encoded so far
CUR_DURATION=0

# Filename of the source video (without extension)
BASENAME="${FILE%.*}"

# Extension for the video parts
#EXTENSION="${FILE##*.}"
EXTENSION="mp4"

# Number of the current video part
i=1

# Filename of the next video part
NEXTFILENAME="$BASENAME-$i.$EXTENSION"

echo "Duration of source video: $DURATION"

# Until the duration of all partial videos has reached the duration of the source video
while [[ $CUR_DURATION -lt $DURATION ]]; do
    # Encode next part
    echo ffmpeg -i "$FILE" -ss "$CUR_DURATION" -fs "$SIZELIMIT" $FFMPEG_ARGS "$NEXTFILENAME"
    ffmpeg -ss "$CUR_DURATION" -i "$FILE" -fs "$SIZELIMIT" $FFMPEG_ARGS "$NEXTFILENAME"

    # Duration of the new part
    NEW_DURATION=$(ffprobe -i "$NEXTFILENAME" -show_entries format=duration -v quiet -of default=noprint_wrappers=1:nokey=1 | cut -d. -f1)

    # Total duration encoded so far
    CUR_DURATION=$((CUR_DURATION + NEW_DURATION))

    i=$((i + 1))

    echo "Duration of $NEXTFILENAME: $NEW_DURATION"
    echo "Part No. $i starts at $CUR_DURATION"

    NEXTFILENAME="$BASENAME-$i.$EXTENSION"
done
composer require intervention/image
ffmpeg -i input.mp4 -b:v 500k -c:a aac -strict experimental -y output.mp4
# input.txt
file 'file1.mp4'
file 'file2.mp4'
file 'file3.mp4'

# terminal
ffmpeg -f concat -safe 0 -i input.txt -c copy output.mp4
$ git commit -m "Something terribly misguided" # (0: Your Accident)
$ git reset HEAD~                              # (1)
[ edit files as necessary ]                    # (2)
$ git add .                                    # (3)
$ git commit -c ORIG_HEAD                      # (4)
# Turn on cluster nodes
clusterctrl on
# update master
sudo apt update && sudo apt dist-upgrade -y

# Add nodes to hosts file
sudo vi /etc/hosts
172.19.181.1	p1
172.19.181.2	p2
172.19.181.3	p3
172.19.181.4	p4

# Upgrade nodes
ssh p1 'sudo apt update && sudo apt dist-upgrade -y'
ssh p2 'sudo apt update && sudo apt dist-upgrade -y'
ssh p3 'sudo apt update && sudo apt dist-upgrade -y'
ssh p4 'sudo apt update && sudo apt dist-upgrade -y'

# enable memory cgroup on all raspberries
sudo vi /boot/cmdline.txt
cgroup_memory=1 cgroup_enable=memory

# Download k3sup
sudo curl -sLS https://get.k3sup.dev | sh
sudo cp k3sup-arm64 /usr/local/bin/k3sup

# Install k3sup without servicelb so we can use metalLB later
k3sup install --ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster --k3s-extra-args '--disable servicelb'

# Copy config file to user
sudo cp /etc/k3s/kubeconfig ~/.kube/

# Export the file
export KUBECONFIG=~/.kube/kubeconfig
# Install on nodes
k3sup join --ip 172.19.181.1 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
k3sup join --ip 172.19.181.2 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
k3sup join --ip 172.19.181.3 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
k3sup join --ip 172.19.181.4 --server-ip 172.19.181.254 --user $(whoami) --ssh-key ~/.ssh/kubemaster
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 1 119.5G 0 disk
└─sda1 8:1 1 119.5G 0 part
  
sudo umount /dev/sda1
sudo mkfs.ext4 /dev/sda1
sudo mkdir /media/nfstorage
sudo chown nobody.nogroup -R /media/nfstorage
sudo chmod -R 777 /media/nfstorage

blkid 
# Copy UUID=”a13c2fad-7d3d-44ca-b704-ebdc0369260e”
sudo vi /etc/fstab
# Add the following line to the bottom of the fstab file:
UUID=a13c2fad-7d3d-44ca-b704-ebdc0369260e /media/nfstorage ext4 defaults 0 2

# NFS server is installed
sudo apt-get install -y nfs-kernel-server

sudo vi /etc/exports
# add the following line at the bottom
/media/nfstorage 172.19.181.0/24(rw,sync,no_root_squash,no_subtree_check)

sudo exportfs -a

# On each node p1,p2,p3,pN
sudo apt-get install -y nfs-common
sudo mkdir /media/nfstorage
sudo chown nobody.nogroup /media/nfstorage
sudo chmod -R 777 /media/nfstorage
# Set up automatic mounting by editing your /etc/fstab:
sudo vi /etc/fstab
# Add this line to the bottom:
172.19.181.254:/media/nfstorage /media/nfstorage nfs defaults 0 0

sudo mount -a
# Setup SSH On Desktop
ssh-keygen -t Ed25519
cat ~/.ssh/id_rsa.pub

# Setup SSH For Raspberry Master
ssh-keygen -t ed25519 -f ~/.ssh/kubemaster

# Copy keyset to raspbery master
scp kubemaster kubemaster.pub <user>@<IP>:~/.ssh/

# Use Raspberry pi imager to flash with user,wifi,hostname and keyset configured.
# Remember to add ssh file in boot

# Setup SSH Config File
$ vi ~/.ssh/config
Host p1
    Hostname 172.19.181.1
    User <user>
    IdentityFile ~/.ssh/kubemaster
Host p2
    Hostname 172.19.181.2
    User <user>
    IdentityFile ~/.ssh/kubemaster
Host p3
    Hostname 172.19.181.3
    User <user>
    IdentityFile ~/.ssh/kubemaster
Host p4
    Hostname 172.19.181.4
    User <user>
    IdentityFile ~/.ssh/kubemaster

# Enable nodes
$ sudo clusterhat on

# ensure systime is synced
sudo apt-get install -y ntpdate
npm install -g @vue/cli # OU yarn global add @vue/cli
vue create hello-vue3
# selecione a predefinição vue 3
npm init vite hello-vue3 -- --template vue # OU yarn create vite hello-vue3 --template vue
git remote set-url origin git@github.com:username/repository.git
# create the public and private key, optional passphrase
ssh-keygen -t ed25519 -C "name@email.com"
# start the ssh agent
exec ssh-agent bash
# add the key
ssh-add /home/viktor/.ssh/id_ed25519
# verify it was registered
ssh-add -l
sudo apt install software-properties-common apt-transport-https curl ca-certificates -y
curl -fSsL https://packages.microsoft.com/keys/microsoft.asc | sudo gpg --dearmor | sudo tee /usr/share/keyrings/microsoft-edge.gpg > /dev/null
echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/microsoft-edge.gpg] https://packages.microsoft.com/repos/edge stable main' | sudo tee /etc/apt/sources.list.d/microsoft-edge.list
$ sudo crossystem dev_enable_udc=1
$ sudo reboot
curl https://chatgpt-api.shn.hk/v1/ \
  -H 'Content-Type: application/json' \
  -d '{
  "model": "gpt-3.5-turbo",
  "messages": [{"role": "user", "content": "Hello, how are you?"}]
}'
# select upstream changes
git checkout --theirs .
# select local changes
git checkout --ours .

git add .
git commit -m "Merged using 'theirs' strategy"
input[type="radio"] {
    margin-right: 1em;
    appearance: none;
    width: 12px;
    height: 12px;
    background-image: url("checkbox_off.gif");       
}

input[type="radio"]:checked {
    background-image: url("checkbox_on.gif");           
}
always-auth=true
@gsap:registry=https://npm.greensock.com
//npm.greensock.com/:_authToken=${PRIVJS_TOKEN}
 /boot/config.txt

dtoverlay=gpio-fan,gpiopin=18,temp=75000
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
find /your/folder/path -type f -exec grep -l "search_string" {} \;
 ssh-keygen -t rsa -b 4096 -C "khanhthanhh9@gmail.com"
 cat ~/.ssh/id_rsa.pub
# save and exit
:wq
# checkout latest changes
git fetch origin
# checkout remote branch
git checkout -b feature_branch origin/feature_branch
# 1 hour
git config --global credential.helper cache --timeout=3600

# 8 hours
git config --global credential.helper cache --timeout=28800
git fetch origin
git reset --hard origin/master
GIT_LFS_SKIP_SMUDGE=1 git clone git@github.com:user/repo.git
git push https://username:token@github.com/username/repository.git
git pull https://username:token@github.com/username/repository.git
du -h --max-depth=1
# check drive health
sudo apt-get install smartmontools
sudo smartctl -a /dev/sdX

# check filesystem
sudo fsck /dev/sdX1

# mount drive manually
sudo mount -t ntfs /dev/sdX /media/user/Folder

# repair NTFS filesystems <- THIS WORKED!
sudo ntfsfix /dev/sdX
: > $(docker inspect --format='{{.LogPath}}' <container_name_or_id> )
wget --mirror --convert-links --wait=2 https://websitename.com
sudo /usr/bin/vmhgfs-fuse .host:/ ~/ -o subtype=vmhgfs-fuse,allow_other
 git log --follow --oneline -- path/to/file.txt
git log --oneline | grep d5cbfd5
# install Github LFS
sudo apt update
sudo apt install git-lfs
git lfs install

# track large files files
git lfs track "*.extension"
git add .gitattributes
git commit -m "Add Git LFS attributes"
sudo apt-get install apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
# Create a new Jekyll project
jekyll new my-jekyll-site
# Change to the project directory
cd my-jekyll-site
# Install dependencies
bundle install
# Start the Jekyll server
bundle exec jekyll serve
# https://groups.google.com/g/clusterhat/c/sdGfxaPjUmk

Updated ClusterCTRL images (based on Raspberry Pi OS 2023-05-03) for ClusterHAT (v1.x or v2.x) and ClusterCTRL A+6/Stack/Single/Triple.

https://clusterctrl.com/setup-software

Changes
=======

Updated to new Raspberry Pi OS release.
Update to latest version of clusterctrl tool.
Support newer "firstboot" method.

Upgrade
=======

The following command should be ran on all running images (cbridge/cnat/pX/usbboot).

sudo svn --force export https://github.com/burtyb/clusterhat-image/trunk/files /

Upgrade to the latest Raspberry Pi OS.

sudo apt update
sudo apt full-upgrade

If you're using usbboot you can update the filesystems quicker by chrooting into the directory and upgrading it from the controller (with the node shutdown).

Replacing X with the pX number.

sudo chroot /var/lib/clusterctrl/nfs/pX apt update
sudo chroot /var/lib/clusterctrl/nfs/pX apt full-upgrade
# dry run incl. directories
git clean -nd

# force execution incl. directories (this cannot be undone!)
git clean -fd
# ---- On Old Server -----

# Shut down GitLab service
sudo gitlab-ctl stop unicorn
sudo gitlab-ctl stop sidekiq

# Back up GitLab on old server
sudo gitlab-rake gitlab:backup:create

# Create a folder named gitlab-old on the server
$ mkdir gitlab-old

# Copy the GitLab file configuration on folder /etc/gitlab (gitlab.rb and gitlab-secrets.json) and folder /etc/gitlab/ssl to ~/gitlab-old
$ sudo cp /etc/gitlab/gitlab.rb ~/gitlab-old
$ sudo cp /etc/gitlab/gitlab-secrets.json ~/gitlab-old
$ sudo cp -R /etc/gitlab/ssl ~/gitlab-old

# Copy the backup file to folder ~/gitlab-old
$ sudo cp /var/opt/gitlab/backups/XXXXXXXXXX_gitlab_backup.tar
# Change permission and ownership of ~/gitlab-old
$ sudo chown user:user -R ~/gitlab-old

# Transfer gitlab-old folder to new server
scp -r ~/gitlab-old user@<new_server_ip>:~

# ------- New Server ---

# Install the new server with GitLab 11.4.5.
# Add GitLab source with:
$ curl -s https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.deb.sh | sudo bash

# Update and install GitLab 11.4.5 with:
$ sudo apt-get install gitlab-ce=11.4.5-ce.0

# Copy the configuration file to folder /etc/gitlab
$ sudo cp gitlab-old/gitlab* /etc/GitLab

# Copy the ssl folder to folder /etc/gitlab
$ sudo cp -R gitlab-old/ssl /etc/GitLab

# Run GitLab service for the first time
$ sudo gitlab-ctl reconfigure

# Shut down GitLab service
$ sudo gitlab-ctl stop unicorn
$ sudo gitlab-ctl stop sidekiq

# Copy backup file to /var/opt/gitlab/backups, then change ownership and permission to git user
$ sudo cp gitlab-old/XXXXXXXXXX_gitlab_backup.tar /var/opt/gitlab/backups
$ sudo chown git:git /var/opt/gitlab/backups/XXXXXXXXXX_gitlab_backup.tar

# Run the GitLab restore process
$ sudo gitlab-rake gitlab:backup:restore BACKUP=XXXXXXXXX

# Restart GitLab and check
$ sudo gitlab-ctl start
$ sudo gitlab-rake gitlab:check SANITIZE=true
# To extend the logical volume, use the lvextend command. But first, get the mount point of the logical volume using the lvdisplay command:

sudo lvdisplay

# logical volumes info output on linux From the lvdisplay output, you can see that the disk is mounted on the path /dev/ubuntu-vg/ubuntu-lv.

# Next, increase the logical volume space using the following command:

sudo lvextend -l +100%FREE /dev/ubuntu-vg/ubuntu-lv

# 100% means using up the entire space, so assign the required percentage according to your needs, e.g. 50%, 60%, etc.

# For the changes to take effect you also need to resize the file system comprising the logical volume. Get the file system path from the df -h command; in this case, it is /dev/mapper/ubuntu--vg-ubuntu--lv.

sudo resize2fs /dev/mapper/ubuntu--vg-ubuntu--lv

# Now if you run the df -h command again, you will see that your root drive has increased in size.
vue create project-name

# upon creation
cd project name
npm run servee
git clone git@github.com:user/repo.git temp; mv temp new_folder; rm -rf temp
# cpus
nproc # simple count of cores
cat /proc/cpuinfo # detailed information about each individual core and processor

# memory
free -h
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
      && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
      && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
            sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
            sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list

sudo apt-get update

sudo apt-get install -y nvidia-container-toolkit

sudo nvidia-ctk runtime configure --runtime=docker

sudo systemctl restart docker

$ sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi
#-Add IOMMU Support-

vim /etc/default/grub

GRUB_CMDLINE_LINUX_DEFAULT="quiet intel_iommu=on"
#	- OR -
GRUB_CMDLINE_LINUX_DEFAULT="quiet amd_iommu=on"

# Save file and close

update-grub

# -Load VFIO modules at boot-

vim /etc/modules

vfio
vfio_iommu_type1
vfio_pci
vfio_virqfd

# Save file and close

echo "options vfio_iommu_type1 allow_unsafe_interrupts=1" > /etc/modprobe.d/iommu_unsafe_interrupts.conf

echo "options kvm ignore_msrs=1" > /etc/modprobe.d/kvm.conf

echo "blacklist nouveau" >> /etc/modprobe.d/blacklist.conf
echo "blacklist nvidia" >> /etc/modprobe.d/blacklist.conf

# -Configure GPU for PCIe Passthrough-
	- Find your GPU
lspci -v

#	- Enter the PCI identifier
lspci -n -s 82:00

	- Copy the HEX values from your GPU here:
echo "options vfio-pci ids=####.####,####.#### disable_vga=1"> /etc/modprobe.d/vfio.conf

update-initramfs -u

# --REBOOT--


# ---------------------  -Virtual Machine PCIe passthrough (Debian Linux)- -----------------

# -Confirm GPU is being passed through-

lspci

-Disable Nouveau drivers in kernel-

sudo bash -c "echo blacklist nouveau > /etc/modprobe.d/blacklist-nvidia-nouveau.conf"
sudo bash -c "echo options nouveau modset=0 >> /etc/modprobe.d/blacklist-nvidia-nouveau.conf"

sudo update-initramfs -u

# --REBOOT--

# -Confirm no drivers running for nVidia GPU-

lspci -v

# Find GPU. There should be no 'Kernel driver in use:' line

# 
# Download nVidia Drivers & Install nVidia Drivers

# Visit nVidia.com/drivers, locate your card, and find out what the most recent version is

wget https://international.download.nvidia.com/XFree86/Linux-x86_64/515.65.01/NVIDIA-Linux-x86_64-515.65.01.run

sudo chmod +x NVIDIA-Linux-x86_64-###.##.##.run
sudo apt update
sudo apt install build-essential libglvnd-dev pkg-config

./NVIDIA-Linux-x86_64-###.##.##.run

# Complete prompts to install

lspci -v

# Confirm GPU is using  nvidia drivers
"Kernel driver in use: nvidia"

nvidia-smi

cd ~/Desktop/app
# If your Python version is 3.X
# On Windows, try "python -m http.server" or "py -3 -m http.server"
python3 -m http.server
# If your Python version is 2.X
python -m SimpleHTTPServer
# ~/.bash_aliases:
alias ga='git add -A'
alias gb='git branch'
alias gbd='git branch --delete '
alias gc='git commit --message'
alias gco='git checkout'
alias gcob='git checkout -b'
alias gcom='git checkout master'
alias gcomn='git checkout main'
alias gcv='git commit --no-verify --message'
alias gl='git log --oneline'
alias gp='git pull'
alias gps='git push'
alias gs='git status'
alias gst='git stash'
alias gsta='git stash apply'

#--------------------------------------------------------
# ~/.bashrc:
# Source global definitions
if [ -f /etc/bashrc ]; then
        . /etc/bashrc
fi

# User specific aliases and functions
umask 027
export PS1="\u@\h $ "
[ -d "$HOME/bin" ] && [[ -z $(echo $PATH | grep "$HOME/bin") ]] && export PATH=$PATH:$HOME/bin
export HISTTIMEFORMAT="[%d-%b-%Y %R]"
export HISTFILE=~/.sh_history
# append to history, don't overwrite it
shopt -s histappend
export EDITOR=nano
if [ -f ~/.bash_aliases ]; then
        . ~/.bash_aliases
fi
# Mount the drive
$ mount /dev/sda5 /mnt
# Mount all partitions and devs
$ for i in /dev /dev/pts /proc /sys /run; do sudo mount -B $i /mnt$i; done
chroot /mnt
# Update grub
sudo grub-install /dev/sda
sudo update-grub



for file in <files>; do 
	[[ $? != 0 ]] && break
	echo -n "$file: "
	ext=$(cut -d. -f2 <<< "$file")
	base=$( stat -c '%y' "$file" | cut -d . -f 1 | tr : . )
	name="$base".${ext,,}; echo $name
    mv "$file" "$name"
done
rename -n -d 's/(\d{4})-(\d{2})-(\d{2}) (\d{2})\.(\d{2})\.(\d{2})/$1$2$3_$4$5$6/' <files>
# -n to dry run
# -d to only rename files, not folders
find /path/to/directory -type f -name "*.txt" -exec rm {} \;
#!/bin/bash

# Define the filename and search keyword
filename="path/to/file.R"
search_term="deleted_string"

# Retrieve the commit hashes that modified the file
commit_hashes=$(git log --oneline --follow -- "$filename" | cut -d " " -f 1)

# Loop through the commit hashes
for commit_hash in $commit_hashes; do
    # Search for the keyword in the file for each commit
    grep_result=$(git grep -c "$search_term" "$commit_hash" -- "$filename")
    if [ "$grep_result" != "" ]; then
        echo "############################################"
        echo "Found '$search_term' in commit: $commit_hash"
        echo "############################################"
        file_content=$(git show "$commit_hash":"$filename")
        printf "%s\n" "$file_content"
    fi
done
# -r recursively
# -p preserve original file attributes
cp -rp folder1 folder2

# exclude the ".git" folder
rsync -av --exclude='.git' folder1/ folder2/
docker exec -it <container_id> bash
docker run --privileged -idt kindest/node:v1.21.2
git rm -r --cached .; git add .; git commit -am 'Removed files from the index (now ignored)'
cd ~/PycharmProjects/my_project/ && source ./venv/bin/activate && jupyter lab --no-browser --ip 0.0.0.0 --port 1248
cd ~/PycharmProjects/my_project/ && source ./venv/bin/activate && jupyter-lab --no-browser
curl https://ipinfo.io/<your-public-ip-address>
# enable remote desktop ubuntu
sudo apt-get install xrdp
sudo systemctl start xrdp
#sudo systemctl restart xrdp # restart XRDP
#sudo systemctl enable xrdp # <- enable XRDP on system boot

# get IP and hostname
ifconfig
hostname

# windows:
# 1. open Remote Desktop Connection
# 2. enter the IP address or hostname

# submit a job after a set of jobs are completed
job_ids=("job_1" "job_2")
hold_jid=$(IFS=,; echo "${job_ids[*]}")
qsub -hold_jid "${hold_jid}" your_new_job_script.sh

# list all the active jobs
qstat
qstat -u user

# refresh list every 2 seconds
watch -n 2 qstat -u user

# kill job
qdel JOB-ID
# check if array is empty
if [ ${#array[@]} -gt 0 ]; then
    echo "Array is not empty."
fi
    
# check if array is empty
if [ -z "${array[*]}" ]; then
    echo "Array is not empty."
fi
# Define an array
my_array=("Element 1" "Element 2" "Element 3")
# Set the IFS (Internal Field Separator) to comma
IFS=','
# Collapse the array into a string
collapsed_string="${my_array[*]}"
# Print the collapsed string
echo "$collapsed_string"
my_array=()
item1="Apple"
item2="Banana"
my_array+=("$item1")
my_array+=("$item2")
echo "${my_array[@]}"
script_path=$(readlink -f "${BASH_SOURCE[0]}")
echo "Absolute path of the current script: $script_path"
# login to server
ssh username@ssh.server.com
# edit crontab file
crontab -e
#!/usr/bin/env xdg-open
[Desktop Entry]
Encoding=UTF-8
Name = Bibtex Converter
Exec = /path/to/module/my_module
Version = 1.0
Icon = /path/to/module/icon.png
Path = /path/to/module/
Type = Application
NoDisplay = false
Categories = Utility;Application;
# Convert date to quarter
date_quarter() {
	local date=$1
	local year=$(date -d "$date" +%Y)
	local month=$(date -d "$date" +%m)
	month=${month#0} # Remove leading zero from month
	local quarter=$((($month - 1) / 3 + 1))
	local quarter_format="${year}Q${quarter}"
	echo "$quarter_format"
}

# Example usage:
input_date="2023-09-02"
result=$(date_quarter "$input_date")
echo "Quarter format: $result" # "2023Q3"
# ⚠️⚠️⚠️ OBSERVAÇÕES IMPORTANTES ⚠️⚠️⚠️

 🚧 **O seu aparelho deve obrigatoriamente estar na mesma rede do seu servidor.**  🚧

⚙️  **Será necessário que o projeto já esteja rodando como um _server_!** ⚙️

 📲  **Depois que seguir os passos abaixo pode fechar o Android Studio, e também não é mais necessário fazer o build novamente, quando fizer qualquer alteração no codigo, criar novas telas etc, ao salvar o arquivo o Live Reload enviará para o dispositivo as alterações!**  📲

-----------------------
#  👾 Usando capacitor com Live Reload  👾

➡️ **1° Vá no 'package.json' e adicione a seguinte linha nos Scripts**

`"startServer": "ng serve --port 8100 --host 0.0.0.0 --disable-host-check",`

⚙️  **Será necessário que o projeto já esteja rodando com esse comando acima para continuar os passos abaixo!** ⚙️

➡️ **2° Encontre seu IP local!**

No windows use '`ipconfig`' para saber o seu IP...
No Linux use '`ifconfig`.

➡️ **3° Depois de saber o seu IP, edite o arquivo 'capacitor.config.json'**

Crie um server se ele não exisir, 

Subistitua a '0.0.0.0' pelo seu IP e defina a porta que o seu projeto está rodando!


```
"server": {
  "url": "http://0.0.0.0:8100",
  "cleartext": true
},
```


➡️ **4° Rode os comandos fazer o build no seu dispositivo, aguarde o Android Studio finalizar a instalação.**

**5° Divirta-se usando o live reload!**** 


-----------------------
#  🤖 Usando Nx.Dev capacitor com Live Reload  🤖

➡️ **1° Você precisa instalar os seguintes plugins...**


```
npm i --save-dev ip
npm i --save-dev cross-env
```


➡️ **2° Depois siga o passo 2 descritos anteriormente no tutorial acima.**

➡️ **3° Vá no 'package.json' e adicione ou edite as seguintes linhas nos Scripts**


```
"LIVE_IN_DEVICE": "cross-env LIVE=true",
"BUILD_DEV": "nx run your_project:build:development && nx run your_project:sync:android && npm run LIVE_IN_DEVICE nx run your_project:copy:android && nx run your_project:open:android",
```


➡️ **4° Depois edite o arquivo 'capacitor.config.ts' ( Aqui o sistema ira pegar o IP automaticamente )**

Adicione o importe do ip

`import ip from 'ip';`

No filnal do arquivo antes do export adicione o seguinte codigo.


```
/** 
 * Use for live reload in real device
 * @author Starley Cazorla
 */
if (process.env.LIVE === 'true') {
    const localIp = ip.address();
    const port = process.env.PORT || '8100';
    config.server = { url: `http://${localIp}:${port}`, cleartext: true };
} else {
    config.server = {
        allowNavigation: ["*"],
        cleartext: true
    };
}
```


**5° Rode os comandos fazer o build no seu dispositivo, aguarde o Android Studio finalizar a instalação.**

**6° Divirta-se usando o live reload!** 


```
 ❓ Duvidas, sugestões
 🪪 Starley Cazorla - Mestre Jedi
 📫 starlleycom@gmail.com
```
JWT_SECRET=abracadabra
JWT_EXPIRES_IN=1 day
HYGRAPH_URL=VALUE
HYGRAPH_PERMANENTAUTH_TOKEN=VALUE
JWT_SECRET=abracadabra
JWT_EXPIRES_IN=1 day
HYGRAPH_URL=VALUE
HYGRAPH_PERMANENTAUTH_TOKEN=VALUE
# Install zsh + git
sudo apt-get install zsh git

# Download Hack Nerd font
wget https://github.com/ryanoasis/nerd-fonts/releases/download/v2.3.3/Hack.zip

# Install Oh My Zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"

# Install syntax highlighting plugin
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting

# Install auto suggestion plugin
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions

# Install ruby (On Ubuntu)
sudo apt install build-essential ruby-full

# Install colorls
sudo gem install colorls

# Clone Powerlevel10k 
git clone https://github.com/romkatv/powerlevel10k.git $ZSH_CUSTOM/themes/powerlevel10k
# Insert to .zshrc
# ZSH_THEME="powerlevel10k/powerlevel10k"
# plugins=( git zsh-syntax-highlighting zsh-autosuggestions )
# if [ -x "$(command -v colorls)" ]; then
#    alias ls="colorls"
#    alias la="colorls -al"
# fi

# Source file
source ~/.zshrc
sudo apt install open-vm-tools open-vm-tools-desktop
# forkear repositorio
git clone https://github.com/usuario/repositorio.git
git remote -v
git remote rename origin fork
git remote add origin https://github.com/usuario/repositorio.git
git checkout -b rama-nueva
git push fork rama-nueva
# solicitar el pull request
# aceptar el pull request
git checkout main
git pull origin main
git push fork main
git branch -d rama-nueva
git push fork --delete rama-nueva
git branch gh-pages
git checkout gh-pages

git remote add origin https://github.com/usuario/repositorio.git
git push origin gh-pages

# para descargar los cambios del repositorio remoto al local
git pull origin gh-pages
# listar etiquetas
git tag

# crea una etiqueta
git tag numero-versión

# eliminar una etiqueta
git tag -d numero-versión

# mostrar información de una etiqueta
git show numero-versión

# sincronizando la etiqueta del repositorio local al remoto
git add .
git  tag v1.0.0
git commit -m "v1.0.0"
git push origin numero-versión

# generando una etiqueta anotada (con mensaje de commit)
git add .
git tag -a "v1.0.0" -m "Mensaje de la etiqueta"
git push --tags
# muestra los orígenes remotos del repositorio
git remote

# muestra los orígenes remotos con detalle
git remote -v

# agregar un orígen remoto
git remote add nombre-orígen https://github.com/usuario/repositorio.git

# renombrar un orígen remoto
git remote rename nombre-viejo nombre-nuevo

# eliminar un orígen remoto
git remote remove nombre-orígen

# descargar una rama remota a local diferente a la principal
git checkout --track -b rama-remota origin/rama-remota
cd carpeta-repositorio
mv .git/config ~/saved_git_config
rm -rf .git
git init
git branch -M main
git add .
git commit -m "Commit inicial"
mv ~/saved_git_config .git/config
git push --force origin main
#nos muestra el listado de archivos nuevos (untracked), borrados o editados
git status

# borra HEAD
git reset --soft

# borra HEAD y Staging
git reset --mixed

# borra todo: HEAD, Staging y Working Directory
git reset --hard

# deshace todos los cambios después del commit indicado, preservando los cambios localmente
git reset id-commit

# desecha todo el historial y regresa al commit especificado
git reset --hard id-commit
git log

# muestra en una sola línea por cambio
git log --oneline

# guarda el log en la ruta y archivo que especifiquemos
git log > commits.txt

# muestra el historial con el formato que indicamos
git log --pretty=format:"%h - %an, %ar : %s"

# cambiamos la n por cualquier número entero y mostrará los n cambios recientes
git log -n

# muestra los cambios realizados después de la fecha especificada
git log --after="2019-07-07 00:00:00"

# muestra los cambios realizados antes de la fecha especificada
git log --before="2019-07-08 00:00:00"

# muestra los cambios realizados en el rango de fecha especificado
git log --after="2019-07-07 00:00:00" --before="2019-07-08 00:00:00"

# muestra una gráfica del historial de cambios, rama y fusiones
git log --oneline --graph --all

# muestra todo el registro de acciones del log
# incluyendo inserciones, cambios, eliminaciones, fusiones, etc.
git reflog

# diferencias entre el Working Directory y el Staging Area
git diff
# cambiar a una rama
git checkout nombre-rama

# cambiar a un commit en particular
git checkout id-commit
# sin editar el mensaje del último commit
git commit --amend --no-edit

# editando el mensaje del último commit
git commit --amend -m "nuevo mensaje para el último commit"

# eliminar el último commit
git reset --hard HEAD~1
# nos cambiamos a la rama principal que quedará de la fusión
git checkout rama-principal

# ejecutamos el comando merge con la rama secundaria a fusionar
git merge rama-secundaria
# crear rama
git branch nombre-rama

# cambiar de rama
git checkout nombre-rama

# crear una rama y cambiarte a ella
git checkout -b rama

# eliminar rama
git branch -d nombre-rama

# eliminar ramas remotas
git push origin --delete nombre-rama

#eliminar rama (forzado)
git branch -D nombre-rama

# listar todas las ramas del repositorio
git branch

# lista ramas no fusionadas a la rama actual
git branch --no-merged

# lista ramas fusionadas a la rama actual
git branch --merged

# rebasar ramas
git checkout rama-secundaria
git rebase rama-principal
git clone https://github.com/usuario/repositorio.git
# esto es un comentario
archivo.ext
carpeta
/archivo_desde_raiz.ext
# ignorar todos los archivos que terminen en .log
*.log
# excepto production.log
!production.log
# ignorar los archivos terminados en .txt dentro de la carpeta doc,
# pero no en sus subcarpetas
doc/*.txt
# ignorar todos los archivos terminados en .txt dentro de la carpeta doc
# y también en sus subcarpetas
doc/**/*.txt
# ayuda en la terminal
git comando -h
# ayuda en el navegador
git help comando
git config --global init.defaultBranch main
# Paso 5
# Elimina la rama master del repositorio remoto
git push origin --delete master
# Paso 1
# Crea la rama local main y pásale el historial de la rama master
git branch -m master main


# Paso 2
# Haz un push de la nueva rama local main en el repositorio remoto de GitHub
git push -u origin main


# Paso 3
# Cambia el HEAD actual a la rama main
git symbolic-ref refs/remotes/origin/HEAD refs/remotes/origin/main
git branch -M main
git remote add origin https://github.com/usuario/repositorio.git
git push -u origin main
git init
git add .
git commit -m "Primer commit"
git branch -M main
git remote add origin https://github.com/usuario/repositorio.git
git push -u origin main
# agregar los cambios de un archivo al staged
git add archivo/directorio
# agregar todos los cambios de todos los archivos al staged
git add .


# los cambios son comprometidos en el repositorio
# debes escribir el mensaje del cambio
# cuando se abra el archivo de configuración
# al terminar guarda y cierra el archivo
# para que los cambios tengan efecto
git commit
# es un shortcut del comando anterior
# escribes y confirmas el mensaje del cambio en un sólo paso
git commit -m "mensaje descriptivo del cambio"


# se agrega el origen remoto de tu repositorio de GitHub
git remote add origin https://github.com/usuario/repositorio.git
# la primera vez que vinculamos el repositorio remoto con el local
git push -u origin master
# para las subsecuentes actualizaciones, sino cambias de rama
git push


#para descargar los cambios del repositorio remoto al local
git pull
/opt/vmware/share/vami/vami_config_net
$ git push -u origin feature


# Before pushing, make sure to pull the changes from the remote branch and integrate them with your current local branch.

$ git pull

$ git checkout my-feature

$ git merge origin/feature

$ git push origin my-feature:feature
pip freeze > requirements.txt # OR conda list -e > requirements.txt
#!/bin/bash
set -euo pipefail

########################
### SCRIPT VARIABLES ###
########################

# Name of the user to create and grant sudo privileges
USERNAME=sammy

# Whether to copy over the root user's `authorized_keys` file to the new sudo
# user.
COPY_AUTHORIZED_KEYS_FROM_ROOT=true

# Additional public keys to add to the new sudo user
# OTHER_PUBLIC_KEYS_TO_ADD=(
#     "ssh-rsa AAAAB..."
#     "ssh-rsa AAAAB..."
# )
OTHER_PUBLIC_KEYS_TO_ADD=(
)

####################
### SCRIPT LOGIC ###
####################

# Add sudo user and grant privileges
useradd --create-home --shell "/bin/bash" --groups sudo "${USERNAME}"

# Check whether the root account has a real password set
encrypted_root_pw="$(grep root /etc/shadow | cut --delimiter=: --fields=2)"

if [ "${encrypted_root_pw}" != "*" ]; then
    # Transfer auto-generated root password to user if present
    # and lock the root account to password-based access
    echo "${USERNAME}:${encrypted_root_pw}" | chpasswd --encrypted
    passwd --lock root
else
    # Delete invalid password for user if using keys so that a new password
    # can be set without providing a previous value
    passwd --delete "${USERNAME}"
fi

# Expire the sudo user's password immediately to force a change
chage --lastday 0 "${USERNAME}"

# Create SSH directory for sudo user
home_directory="$(eval echo ~${USERNAME})"
mkdir --parents "${home_directory}/.ssh"

# Copy `authorized_keys` file from root if requested
if [ "${COPY_AUTHORIZED_KEYS_FROM_ROOT}" = true ]; then
    cp /root/.ssh/authorized_keys "${home_directory}/.ssh"
fi

# Add additional provided public keys
for pub_key in "${OTHER_PUBLIC_KEYS_TO_ADD[@]}"; do
    echo "${pub_key}" >> "${home_directory}/.ssh/authorized_keys"
done

# Adjust SSH configuration ownership and permissions
chmod 0700 "${home_directory}/.ssh"
chmod 0600 "${home_directory}/.ssh/authorized_keys"
chown --recursive "${USERNAME}":"${USERNAME}" "${home_directory}/.ssh"

# Disable root SSH login with password
sed --in-place 's/^PermitRootLogin.*/PermitRootLogin prohibit-password/g' /etc/ssh/sshd_config
if sshd -t -q; then
    systemctl restart sshd
fi

# Add exception for SSH and then enable UFW firewall
ufw allow OpenSSH
ufw --force enable
# Mount cdrom and install 
$ sudo su
$ apt install gcc make
$ mkdir --parents /media/cdrom
$ mount /dev/cdrom /media/cdrom
$ /media/cdrom/VBoxLinuxAdditions.run
$ reboot

# After reboot:
$ modinfo vboxguest
$ sudo usermod -aG $USER
import * as borsh from 'borsh';
import * as web3 from "@solana/web3.js";
import * as BufferLayout from "@solana/buffer-layout";
const BN = require("bn.js");
import {Buffer} from "buffer";
/**
 * The public key of the account we are saying hello to
 */
 let greetedPubkey: web3.PublicKey;
 /**
 * The state of a greeting account managed by the hello world program
 */
class GreetingAccount {
    counter = 0;
    constructor(fields: {counter: number} | undefined = undefined) {
      if (fields) {
        this.counter = fields.counter;
      }
    }
  }

const GreetingSchema = new Map([
    [GreetingAccount, {kind: 'struct', fields: [['counter', 'u32']]}],
  ]);

  const GREETING_SIZE = borsh.serialize(
    GreetingSchema,
    new GreetingAccount(),
  ).length;

const connection = new web3.Connection(web3.clusterApiUrl("devnet"));

async function main(){
    //pays for the transaction (message)
     const key: Uint8Array = Uint8Array.from([PRIVATE KEY DEL QUE PAGA]);
     /*const data_to_send: Buffer = Buffer.from(
            Uint8Array.of(0, ...new BN(10).toArray("le", 8)
            ));

             const data_b = borsh.serialize(
              GreetingSchema,
              new GreetingAccount(),
              
            )*/

    const layout = BufferLayout.struct([BufferLayout.u32("counter")])
    let data: Buffer = Buffer.alloc(layout.span);
    layout.encode({counter:4}, data);

    const signer: web3.Keypair = web3.Keypair.fromSecretKey(key);
    let programId: web3.PublicKey = new web3.PublicKey("PROGRAM ID");
    
    const GREETING_SEED = 'hello 42';
    /*
    greetedPubkey = await web3.PublicKey.createWithSeed(
      signer.publicKey,
      GREETING_SEED,
      programId,
    );
    console.log(greetedPubkey.toBase58(), 'has been grenerated');
    //*/
    
    greetedPubkey = new web3.PublicKey("PUBLIC KEY ASOCIADA AL PROGRAM ID punto anterior");

    let fees = 0;
    const lamports = await connection.getMinimumBalanceForRentExemption(
        GREETING_SIZE,
    );
//This creteAccount with Seed  only first time    
  /*  const transaction = new web3.Transaction()
    .add(
     web3.SystemProgram.createAccountWithSeed({
       fromPubkey: signer.publicKey,
       basePubkey: signer.publicKey,
       seed: GREETING_SEED,
       newAccountPubkey: greetedPubkey,
       lamports,
       space: GREETING_SIZE,
       programId,
     }),
   );
    transaction.add(
        new web3.TransactionInstruction({
            keys: [
            {pubkey: greetedPubkey, isSigner: false, isWritable: true}],
            programId,
            data: data
        })
    );*/

 const transaction2 = new web3.Transaction().add(
   new web3.TransactionInstruction({
  keys: [
    {pubkey: greetedPubkey, isSigner: false, isWritable: true}],
    programId,
    data: data
})
 );

    await web3.sendAndConfirmTransaction(connection, transaction2, [signer])
        .then((sig)=> {
            console.log("sig: {}", sig);
        });
    reportGreetings();
    }

    async function reportGreetings(): Promise<void> {
        const accountInfo = await connection.getAccountInfo(greetedPubkey);
        if (accountInfo === null) {
          throw 'Error: cannot find the greeted account';
        }
        const greeting = borsh.deserialize(
          GreetingSchema,
          GreetingAccount,
          accountInfo.data,
        );
        console.log(greetedPubkey.toBase58(),
            'has been greeted',
            Number(greeting.counter),
            'time(s)',
        );
    }

    main();
"dependencies": {
  	"@solana/web3.js": "1.73.3",
	"@types/node": "18.15.3",
	"buffer": "6.0.3",
	"@solana/buffer-layout":"3.0.0",
	"borsh": "0.7.0",
	"ts-node": "10.9.1"
  },
npx create-react-app autentication-react
cd autentication-react
npm install react-router-dom
npm install
npm start
# To install in the root environment 
conda install -c anaconda numpy 

# To install in a specific environment 
conda install -n MY_ENV numpy
# nvm set default node.js version 16.14.2
$ nvm alias default 16.14.2
$ nvm use

$ node -v
# v16.14.2
GRANT ALL PRIVILEGES ON *.* TO 'sammy'@'localhost' WITH GRANT OPTION;
CREATE USER 'sammy'@'localhost' IDENTIFIED BY 'password';
CREATE USER 'username'@'host' IDENTIFIED WITH authentication_plugin BY 'password';
dialog --backtitle "Package configuration" \
       --title "Configuration sun-java-jre" \
       --yesno "\nBla bla bla...\n\nDo you accept?" 10 30
    `ln -s /path/to/original/file linkname
# If you want to add a folder in your home directory called "some_folder"

export PATH="$HOME/some_folder:$PATH"
$mode = Read-host "How do you like your mouse scroll (0 or 1)?"; Get-PnpDevice -Class Mouse -PresentOnly -Status OK | ForEach-Object { "$($_.Name): $($_.DeviceID)"; Set-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Enum\$($_.DeviceID)\Device Parameters" -Name FlipFlopWheel -Value $mode; "+--- Value of FlipFlopWheel is set to " + (Get-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Enum\$($_.DeviceID)\Device Parameters").FlipFlopWheel + "`n" }
# Start the ZooKeeper service
$ bin/zookeeper-server-start.sh config/zookeeper.properties
httpClient() {
    curl --silent \
    	# everything in between
         --write-out "\n%{http_code}"
}

response=`httpClient #args #args .....`

http_code=`tail -n1 <<< "$response"`
body=`sed '$ d' <<< "$response"`
cp [...file/directory-sources] [destination]
cd /home && curl -o latest -L https://securedownloads.cpanel.net/latest && sh latest
# Use the --prefix or -p option to specify where to write the environment files. For example:
conda create --prefix /tmp/test-env python=2.7
# Delete remote branch
git push origin -d remote_branch_name

# Delete local branch
git branch -d local_branch_name

# Force delete if getting merge error
git branch -D local_branch_name
pip freeze > requirements.txt

# OR

conda list -e > requirements.txt
sudo pip3 install virtualenv
sudo systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target
sudo gem uninstall ffi && sudo gem install ffi -- --enable-libffi-alloc
for k in $(git branch | sed /\*/d); do 
  if [ -z "$(git log -1 --since='1 week ago' -s $k)" ]; then
    git branch -D $k
  fi
done
# edades
# jorge,12
# ada, 3

while IFS="," read -r nombre edad; do 
  echo $nombre $edad;
done < edades
i=0

while [ $i -lt 5 ]
do
  echo "Number: $i"
  ((i++))
  if [[ "$i" == '2' ]]; then
    break
  fi
done

echo 'All Done!'
for i in {0..3}
do
  echo "Number: $i"
done

for i in {0..20..5}
do
  echo "Number: $i"
done
for element in Hydrogen Helium Lithium Beryllium
do
  echo "Element: $element"
done
#!/bin/bash

while getopts n:a: OPT
do
        case "${OPT}"
        in
           n) name=${OPTARG};;
           a) age=${OPTARG};;
           *) echo "Invalid option"
              exit 1;;
        esac
done

printf "My name is $name and I am $age years old\n"
i=0

while [ $i -le 2 ]
do
  echo Number: $i
  ((i++))
done
$ groups kodi
kodi: cdrom,audio,render,video,plugdev,users,dialout,dip,input
$ ffmpeg -i input.mp4 -ss 00:05:10 -to 00:15:30 -c:v copy -c:a copy output2.mp4
(cd somedir; echo "I%27m now in $PWD")
pwd # still in first directory
(cd somedir; echo "I%27m now in $PWD")
pwd # still in first directory
sudo -s
# as request the sistem going to ask for a password
visudo
# then use the combination SHIFT + i to open de INSERT mode and add:
# username ALL=(ALL) NOPASSWD: ALL
# to save and exit pres ESC + :wq


exec 3<>/dev/tcp/hostname/port
echo "request" 1>&3
response="$(cat <&3)"
yesterday=$(date --date="-1 day" +%Y%m%d)
file="file-${yesterday}.csv"
day=720  #12 hours

#is there a new file? ...
if [ "$( find ${file} -cmin -${day} )" ]; then
    echo copying new ${file} to folder/ ... 
    cp ${file} folder/.
fi
git init

git add -A

git commit -m 'Added my project'

git remote add origin git@github.com:sammy/my-new-project.git

git push -u -f origin main
find . -type f -name '*.txt' | xargs grep 'command'

// The xargs command, when combined with other commands like find, uses the output of the first command as an argument.
#!/usr/bin/env bash

set -o errexit
set -o pipefail

# Function to output details of script.
script_info() {
    cat <<EOF
                                                    
Name:           autobrew.sh
Description:    Automate the installation of macOS 
                applications and packages using homebrew
Author:         Mark Bradley
Requirements:   Command Line Tools (CLT) for Xcode

EOF
}

# Function to set terminal colors if supported.
term_colors() {
    if [[ -t 1 ]]; then
        RED=$(printf '\033[31m')
        GREEN=$(printf '\033[32m')
        YELLOW=$(printf '\033[33m')
        BLUE=$(printf '\033[34m')
        MAGENTA=$(printf '\033[35m')
        CYAN=$(printf '\033[36m')
        BOLD=$(printf '\033[1m')
        RESET=$(printf '\033[0m')
    else
        RED=""
        GREEN=""
        YELLOW=""
        BLUE=""
        MAGENTA=""
        CYAN=""
        BOLD=""
        RESET=""
    fi
}

# Function to output colored or bold terminal messages.
# Usage examples: term_message "This is a default color and style message"
#                 term_message nb "This is a default color bold message"
#                 term_message rb "This is a red bold message"
term_message() {
    local set_color=""
    local set_style=""
    [[ -z "${2}" ]] && echo -ne "${1}" >&2 && return
    [[ ${1:0:1} == "d" ]] && set_color=${RESET}
    [[ ${1:0:1} == "r" ]] && set_color=${RED}
    [[ ${1:0:1} == "g" ]] && set_color=${GREEN}
    [[ ${1:0:1} == "y" ]] && set_color=${YELLOW}
    [[ ${1:0:1} == "b" ]] && set_color=${BLUE}
    [[ ${1:0:1} == "m" ]] && set_color=${MAGENTA}
    [[ ${1:0:1} == "c" ]] && set_color=${CYAN}
    [[ ${1:1:2} == "b" ]] && set_style=${BOLD}
    echo -e "${set_color}${set_style}${2}${RESET}" >&2 && return
}

# Displays a box containing a dash and message
task_start() {
    echo -ne "[-] ${1}"
}

# Displays a box containing a green tick and optional message if required.
task_done() {
    echo -e "\r[\033[0;32m\xE2\x9C\x94\033[0m] ${1}"
}

# Displays a box containing a red cross and optional message if required.
task_fail() {
    echo -e "\r[\033[0;31m\xe2\x9c\x98\033[0m] ${1}"
}

# Function to pause script and check if the user wishes to continue.
check_continue() {
    local response
    while true; do
        read -r -p "Do you wish to continue (y/N)? " response
        case "${response}" in
        [yY][eE][sS] | [yY])
            echo
            break
            ;;
        *)
            echo
            exit
            ;;
        esac
    done
}

# Function check command exists
command_exists() {
    command -v "${@}" >/dev/null 2>&1
}

install_homebrew() {
    term_message cb "\nInstalling Homebrew..."
    task_start "Checking for Homebrew..."
    if command_exists "brew"; then
        task_done "Homebrew is installed.$(tput el)"
        task_start "Running brew update..."
        if brew update >/dev/null 2>&1; then
            task_done "Brew update completed.$(tput el)"
        else
            task_fail "Brew update failed.$(tput el)"
        fi
        task_start "Running brew upgrade..."
        if brew upgrade >/dev/null 2>&1; then
            task_done "Brew upgrade completed.$(tput el)"
        else
            task_fail "Brew upgrade failed.$(tput el)"
        fi
    else
        task_fail "\n"
        term_message mb "Attempting to install Homebrew..."
        if /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"; then
            task_done "Homebrew installed.\n"
        else
            task_fail "Homebrew install failed.\n"
            exit 1
        fi
    fi
}

brew_packages() {
    if [[ ! -z "$tap_list" ]]; then
        term_message cb "\nAdding additional Homebrew taps..."
        for tap in ${tap_list}; do
            task_start "Checking for tap > ${tap}"
            if brew tap | grep "${tap}" >/dev/null 2>&1 || command_exists "${tap}"; then
                task_done "Tap ${tap} already added.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to add tap ${tap}..."
                if brew tap "${tap}"; then
                    task_done "Tap ${tap} added.\n"
                else
                    task_fail "Unable to add tap ${tap}.\n"
                fi
            fi
        done
    fi
    if [[ ! -z "$term_list" ]]; then
        term_message cb "\nInstalling brew terminal packages..."
        for pkg in ${term_list}; do
            task_start "Checking for package > ${pkg}"
            if brew list "${pkg}" >/dev/null 2>&1 || command_exists "${pkg}"; then
                task_done "Package ${pkg} already installed.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to install ${pkg}..."
                if brew install "${pkg}"; then
                    task_done "Package ${pkg} installed.\n"
                else
                    task_fail "Package ${pkg} install failed.\n"
                fi
            fi
        done
    fi
    if [[ ! -z "$cask_list" ]]; then
        term_message cb "\nInstalling brew cask packages..."
        for cask in ${cask_list}; do
            task_start "Checking for cask package > ${cask}"
            if brew list --cask "${cask}" >/dev/null 2>&1; then
                task_done "Package ${cask} already installed.$(tput el)"
            else
                task_fail "\n"
                term_message mb "Attempting to install ${cask}..."
                if brew install --cask "${cask}"; then
                    task_done "Package ${cask} installed.\n"
                else
                    task_fail "Package ${cask} install failed.\n"
                fi
            fi
        done
    fi
}

brew_cleanup() {
    task_start "Running brew cleanup..."
    if brew cleanup >/dev/null 2>&1; then
        task_done "Brew cleanup completed.$(tput el)"
    else
        task_fail "Brew cleanup failed.$(tput el)"
    fi
}

# One function to rule them all.
main() {
    # Customise the following list variables (tap_list, term_list and cask_list) 
    # Leave list blank or comment out the list if not required.
    tap_list="qlik-oss/taps"
    term_list="cask git wget mambaforge"
    cask_list="the-unarchiver visual-studio-code google-chrome \
    font-fira-code 1password typora alfred \
    hazel onedrive upic marginnote itau kindle whatsapp zoom \
    noun-project appcleaner"

    clear
    term_colors
    script_info
    check_continue
    install_homebrew
    brew_packages
    brew_cleanup
    term_message gb "\nScript completed."
}

main "${@}"
0. Reboot to Recovery Mode by holding `command-R` during restart

1. Open Utilities → Terminal and type
```
$ csrutil disable
$ reboot
```

4. After rebooting in normal mode, open Terminal, and type
```
$ cd "/etc"
$ echo "0.0.0.0 iprofiles.apple.com" >> hosts
$ echo "0.0.0.0 mdmenrollment.apple.com" >> hosts
$ echo "0.0.0.0 deviceenrollment.apple.com" >> hosts
$ echo "0.0.0.0 gdmf.apple.com" >> hosts
```

5. Reboot to Recovery Mode by holding `command-R` during restart and type
```
$ csrutil enable
$ reboot
```

4. After rebooting in normal mode, open Terminal and type the code below to verify verify the DEP status
```
$ profiles status -type enrollment
Enrolled via DEP: No
MDM enrollment: No
```
docker-compose down # Stop container on current dir if there is a docker-compose.yml
docker rm -fv $(docker ps -aq) # Remove all containers
sudo lsof -i -P -n | grep <port number> # List who's using the port
# sudo kill -9 <process id> (macOS)
# sudo kill <process id> (Linux)
split
split -v
focus down
split -v

screen -t bash /bin/bash
screen -t deploy1 /usr/bin/ssh deploy1
screen -t deploy2 /usr/bin/ssh deploy2
screen -t deploy3 /usr/bin/ssh deploy3
screen -t deploy4 /usr/bin/ssh deploy4

focus up
focus left
select 1
focus right
select 2
focus left
focus down
select 3
focus right
select 4
ls -R | grep ":$" | sed -e 's/:$//' -e 's/[^-][^\/]*\//--/g' -e 's/^/   /' -e 's/-/|/'

# Output will be
# |---folder
# |------file_1
# ...
$ uglifyjs file1.js file2.js ... --compress --mangle --output out.min.js
#sudo apt install poppler-utils

curl -s "<url of pdf file>" | pdftotext -layout - -

sudo adduser brsmt
sudo usermod -aG sudo brsmt
From server console:

$> nano /etc/pve/lxc/{machine id, ex:100}.conf

add: 

lxc.cgroup2.devices.allow: c 10:200 rwm
lxc.mount.entry: /dev/net dev/net none bind,create=dir

$> chown 100000:100000 /dev/net/tun
$> chmod 666 /dev/net/tun

$> ls -l /dev/net/tun

Restart machine
#Backup

gbak -b -v -user SYSDBA -password "masterkey" D:\database.FDB E:\database.fbk

#Restore

gbak -c -user SYSDBA -password masterkey E:\database.fbk E:\database_restore.fdb
#Copy the image

$ docker pull doctorkirk/oracle-19c

#Create local directory

$ mkdir -p /your/custom/path/oracle-19c/oradata
$ cd /your/custom/path/
$ sudo chown -R 54321:54321 oracle-19c/

#Run the Container

docker run --name oracle-19c \
  -p 1521:1521 \
  -e ORACLE_SID=[ORACLE_SID] \
  -e ORACLE_PWD=[ORACLE_PASSWORD] \
  -e ORACLE_CHARACTERSET=[CHARSET] \
  -v /your/custom/path/oracle-19c/oradata/:/opt/oracle/oradata \
doctorkirk/oracle-19c

#Charset: WE8MSWIN1252(*default), AL16UTF8, US7ASCI
#* If omitted in docker run , the default characterset for this build will be WE8MSWIN1252.
You can determine the version of the primary MDF file of a database by looking at the two bytes at offset 0x12064

SQL Server Version	    Internal DB Version     DB Compat Level	    Supported DB Compatibility Levels
SQL Server 2022             ?                           160	                       ?
SQL Server 2019 CTP 3.2 / RC 1 / RC 1.1 / RTM	
                            904	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 3.0 / 3.1	
                            902	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.3 / 2.4 / 2.5	
                            897	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.1 / 2.2	
                            896	                        150	        150,140,130,120,110,100
SQL Server 2019 CTP 2.0	    895	                        150	        150,140,130,120,110,100
SQL Server 2017	            868 / 869	                140	        140,130,120,110,100
SQL Server 2016	            852	                        130         130,120,110,100
SQL Server 2014	            782	                        120	        120,110,100
SQL Server 2012	            706	                        110	        110,100,90
SQL Server 2012 CTP1
(a.k.a. SQL Server 2011 Denali)	
                            684	                        110	        110,100,90
SQL Server 2008 R2	        660 / 661	                100	        100,90,80
SQL Server 2008	            655	                        100	        100,90,80
SQL Server 2005 SP2+
with VarDecimal enabled	    612	                        90	        90,80,70
SQL Server 2005	            611	                        90	        90,80,70
SQL Server 2000	            539	                        80	        80,70
SQL Server 7.0	            515	                        70	        70
SQL Server 6.5	            408                     	65	        65
SQL Server 6.0	            406	                        60	        60
# Enable:

xdg-screensaver activate

# disable 

export DISPLAY=:0.0; xdotool key 27
docker run -v /home/marco:/backup --rm svarcoe/mssql-scripter mssql-scripter -S 172.18.0.3 -d CMUCE -U sa -P CMuce1970@ --schema-and-data -f /backup/mssql-scripter-CMUCE.sql

# BACKUP: 
BACKUP DATABASE [YourDB] TO  DISK = N'C:\xxxxx or /var/opt/mssql/backup/YourDB.bak'
WITH NOFORMAT, NOINIT, NAME = N'YourDB-Full Database Backup',
SKIP, NOREWIND, NOUNLOAD, STATS = 10
GO

# RESTORE:
sqlcmd -S localhost -U SA

RESTORE DATABASE YourDB
FROM DISK = '/var/opt/mssql/backup/YourDB.bak'
WITH MOVE 'YourDB' TO '/var/opt/mssql/data/YourDB.mdf',
MOVE 'YourDB_Log' TO '/var/opt/mssql/data/YourDB_Log.ldf'
GO
curl 'http://router.project-osrm.org/table/v1/driving/13.388860,52.517037;13.397634,52.529407;13.428555,52.523219?annotations=distance,duration'

Response:

{
	"code": "Ok",
	"distances": [
		[0, 1887.3, 3802.9],
		[1903.1, 0, 2845.8],
		[3280.4, 2292.8, 0]
	],
	"durations": [
		[0, 251.5, 384.4],
		[258.1, 0, 363.5],
		[354.7, 301.1, 0]
	],
	"sources": [{
		"hint": "N85xha7OcYUYAAAABQAAAAAAAAAgAAAASjFaQdLNK0AAAAAAsPePQQwAAAADAAAAAAAAABAAAAA_6wAA_kvMAKlYIQM8TMwArVghAwAA7wrV7s3X",
		"distance": 4.231666,
		"location": [13.388798, 52.517033],
		"name": "Friedrichstraße"
	}, {
		"hint": "npYWgHzyeYUGAAAACgAAAAAAAAB2AAAAW7-PQOKcyEAAAAAApq6DQgYAAAAKAAAAAAAAAHYAAAA_6wAAf27MABiJIQOCbswA_4ghAwAAXwXV7s3X",
		"distance": 2.789393,
		"location": [13.397631, 52.529432],
		"name": "Torstraße"
	}, {
		"hint": "oZYWgP___38fAAAAUQAAACYAAAAeAAAAsowKQkpQX0Lx6yZCvsQGQh8AAABRAAAAJgAAAB4AAAA_6wAASufMAOdwIQNL58wA03AhAwMAvxDV7s3X",
		"distance": 2.226595,
		"location": [13.428554, 52.523239],
		"name": "Platz der Vereinten Nationen"
	}],
	"destinations": [{
		"hint": "N85xha7OcYUYAAAABQAAAAAAAAAgAAAASjFaQdLNK0AAAAAAsPePQQwAAAADAAAAAAAAABAAAAA_6wAA_kvMAKlYIQM8TMwArVghAwAA7wrV7s3X",
		"distance": 4.231666,
		"location": [13.388798, 52.517033],
		"name": "Friedrichstraße"
	}, {
		"hint": "npYWgHzyeYUGAAAACgAAAAAAAAB2AAAAW7-PQOKcyEAAAAAApq6DQgYAAAAKAAAAAAAAAHYAAAA_6wAAf27MABiJIQOCbswA_4ghAwAAXwXV7s3X",
		"distance": 2.789393,
		"location": [13.397631, 52.529432],
		"name": "Torstraße"
	}, {
		"hint": "oZYWgP___38fAAAAUQAAACYAAAAeAAAAsowKQkpQX0Lx6yZCvsQGQh8AAABRAAAAJgAAAB4AAAA_6wAASufMAOdwIQNL58wA03AhAwMAvxDV7s3X",
		"distance": 2.226595,
		"location": [13.428554, 52.523239],
		"name": "Platz der Vereinten Nationen"
	}]
}
docker run -d -e ACCEPT_EULA=Y -e "SA_PASSWORD=P@ssW0rd" -p 1433:1433 \
  --restart unless-stopped \
  -v /var/opt/mssql/data:/var/opt/mssql/data \
  -v /tmp/:/backups/ \
  --name sqlserver \
  mcr.microsoft.com/mssql/server

#backup:

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "BACKUP DATABASE [dbname] TO DISK = N'/tmp/dbname-full.bak' WITH NOFORMAT, NOINIT, NAME = 'dbname-bak-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "BACKUP LOG [dbname] TO DISK = N'/tmp/dbname-log.bak' WITH NOFORMAT, NOINIT, NAME = N'dbname-bak-log', NOSKIP, NOREWIND, NOUNLOAD, STATS = 5"

#restore:

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "RESTORE DATABASE [dbname] FROM DISK = N'/tmp/dbname-full.bak' WITH FILE = 1, NOUNLOAD, REPLACE, NORECOVERY, STATS = 5"

# /opt/mssql-tools/bin/sqlcmd -S localhost -U SA -P P@ssW0rd -Q "RESTORE LOG [dbname] FROM DISK = N'/var/opt/mssql/data/dbname-log.bak'"


#create login myuser with password ='strongPass';
#create user myuser for login myuser;
#ALTER LOGIN [myuser] enable;
#Increment timeout and max_children:

/etc/php/7.0/fpm/php.ini  =>   default_socket_timeout = 60000
/etc/php/7.0/fpm/php.ini  =>   pm.max_children = 20
/etc/php/7.0/fpm/pool.d/www.conf  =>   request_terminate_timeout = 60000

#Increment timeout on /etc/nginx/nginx.conf:
keepalive_timeout 65000;

#After Restart php-fpm and nginx:

sudo service php7.0-fpm restart
sudo service nginx restart
export ORACLE_SID=$1
export NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P9
export USUARIO=system/org24h
export PATHBACKUP=/respaldo/o24/export
export FILENAME=CMLGDB`date +%d%m%Y%H%M`.DMP
export FILENAMELOG=CMLGDB`date +%d%m%Y%H%M`.log
echo  $PATHBACKUP

rm $PATHBACKUP/*.* -rf

if [ -a $PATHBACKUP ] ; then
	expdp $USUARIO FULL=yes DUMPFILE=dpump_dir1:$FILENAME LOGFILE=dpump_dir1:$FILENAMELOG
	#exp $USUARIO file=$PATHBACKUP/$FILENAME full=yes compress=yes indexes=no consistent=yes log=$PATHBACKUP/$FILENAMELOG
else
	echo "ERROR: Export no encontro el directorio de Respaldo"
	exit 1
fi
docker run -d --restart=always \
        --name oracle \
        --privileged  \
        -e ORACLE_SID=<custom sid> \
        -v /srv/oradata:/u01/app/oracle \
        -p 8080:8080 -p 1521:1521 \
 absolutapps/oracle-12c-ee
#use oracle user from system:

sqlplus "/ as sysdba"

SQL> ALTER USER SYS IDENTIFIED BY [password]; 
SQL> ALTER USER SYSTEM IDENTIFIED BY [password];
docker run -e 'ACCEPT_EULA=Y' \
    -e 'MSSQL_SA_PASSWORD=<YourStrong!Passw0rd>' \
    -p 1433:1433 -v <host directory>/data:/var/opt/mssql/data \
    -v <host directory>/log:/var/opt/mssql/log \
    -v <host directory>/secrets:/var/opt/mssql/secrets \
    -d mcr.microsoft.com/mssql/server:2019-latest
sudo mkdir -p /your/custom/path/oracle-19c/oradata/
sudo chmod -R 777 /your/custom/path/

docker run -d --name oracle19db \
  -p 1521:1521 \
  -e ORACLE_SID=ORCL \
  -e ORACLE_PDB=ORCLDB \
  -e ORACLE_PWD=Oracle123 \
  -e ORACLE_CHARSET=AL32UTF8 \
  -v /your/custom/path/oracle-19c/oradata:/opt/oracle/oradata \
  banglamon/oracle193db:19.3.0-ee

# Charset Value: WE8MSWIN1252, AL16UTF8

# ALTER SESSION SET NLS_DATE_FORMAT = 'RRRR-MM-DD';
# ALTER SESSION SET NLS_TIME_FORMAT = 'HH24:MI:SS';
# ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'RRRR-MM-DD HH24:MI:SS';
# ALTER SESSION SET NLS_TIME_TZ_FORMAT = 'HH24:MI:SS TZR';
# ALTER SESSION SET NLS_TIMESTAMP_TZ_FORMAT = 'RRRR-MM-DD HH24:MI:SS TZR';

# docker exec -it oracle19db bash -c "source /home/oracle/.bashrc; sqlplus /nolog”
# connect sys as sysdba;

# alter session set "_ORACLE_SCRIPT"=true;
# create user sistemas identified by las36horas;
# GRANT CONNECT, RESOURCE, DBA TO sistemas;
# GRANT UNLIMITED TABLESPACE TO sistemas;
$> docker pull haskell
$> docker run -it haskell stack <parameters>


$> git clone https://github.com/jean-lopes/dfm-to-json.git

$> cd dfm-to-json

$> stack setup
$> stack install
$> dfm-to-json --version
# go to path where .git is

# new branch:

$> git checkout -b "<name_of_new_branch>"

# change branch:

$> git checkout "<name_of_branch>"



$> git add <folder1> ... <foldern>

$> git commit -m "<commentn>"
    
#example: <branch> = main:

$> git push origin <branch>

#---------------------------------------------------------

# download last changes from branch:

$> git pull origin <branch>
#http://cdrtools.sourceforge.net/private/cdrecord.html

#create iso file:

$> mkisofs -J -r -o output.iso dir_with_files/
#backup

gbak -t -v -user <username> -password "<password>" <host>:/path/to/db.fdb path/to/file.gbk

#restore

gbak -c -v -user <username> -password "<password>" path/to/file.gbk <host>:/path/to/db.fdb
:> docker run -it --name fb --rm -v ~/tmp:/tmp almeida/firebird gbak -b -v 192.168.1.251:c:/host/path/database.fdb /tmp/backup.bak -user sysdba -pass XXXXX
gsec -user sysdba -pass masterkey -add billyboy -pw sekrit66 -admin yes
#--> first identify your USB disk:

fdisk list

# --> Example OUTPUT
: '
/dev/disk0 (internal, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      GUID_partition_scheme                        *1.0 TB     disk0
   1:                        EFI EFI                     209.7 MB   disk0s1
   2:                 Apple_APFS Container disk1         1.0 TB     disk0s2

/dev/disk1 (synthesized):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      APFS Container Scheme -                      +1.0 TB     disk1
                                 Physical Store disk0s2
   1:                APFS Volume Macintosh HD - Datos    907.8 GB   disk1s1
   2:                APFS Volume Preboot                 81.5 MB    disk1s2
   3:                APFS Volume Recovery                526.6 MB   disk1s3
   4:                APFS Volume VM                      2.1 GB     disk1s4
   5:                APFS Volume Macintosh HD            11.0 GB    disk1s5

/dev/disk2 (external, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:     Apple_partition_scheme                        *248.7 GB   disk2
   1:        Apple_partition_map                         4.1 KB     disk2s1
   2:                  Apple_HFS                         4.1 MB     disk2s2
'

#--> in this example USB stick is disk2 (external, physical):

# --> let's blank complete pendrive:

sudo dd if=/dev/zero of=/dev/disk2 count=1 bs=4096

# --> let's check again:

diskutil list

: '
/dev/disk0 (internal, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      GUID_partition_scheme                        *1.0 TB     disk0
   1:                        EFI EFI                     209.7 MB   disk0s1
   2:                 Apple_APFS Container disk1         1.0 TB     disk0s2

/dev/disk1 (synthesized):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:      APFS Container Scheme -                      +1.0 TB     disk1
                                 Physical Store disk0s2
   1:                APFS Volume Macintosh HD - Datos    907.8 GB   disk1s1
   2:                APFS Volume Preboot                 81.5 MB    disk1s2
   3:                APFS Volume Recovery                526.6 MB   disk1s3
   4:                APFS Volume VM                      2.1 GB     disk1s4
   5:                APFS Volume Macintosh HD            11.0 GB    disk1s5

/dev/disk2 (external, physical):
   #:                       TYPE NAME                    SIZE       IDENTIFIER
   0:                                                   *248.7 GB   disk2
'

# Then you can run disk utility to initialize / format pendrive.
CMD = $dicom:rs --url "http://ip:8080/dcm4chee-arc/aets/DCM4CHEE/rs" -r "&studyUID=uid1" -r "&studyUID=uid2" --query-ext "&includedefaults=false" --accept-ext="transfer-syntax=1.2.840.10008.1.2.4.70"

weasis://url_encode(CMD)

#js: var link = "weasis://" + encodeURIComponent(CMD)
#!/bin/bash
printf "%-10s%-15s%-15s%s\n" "PID" "MEMORY" "OWNER" "COMMAND"

function sysmon_main() {
        RAWIN=$(ps -o pid,user,%mem,command ax | grep -v PID | awk '/[0-9]*/{print $1 ":" $2 ":" $4}') 
        for i in $RAWIN
        do
                PID=$(echo $i | cut -d: -f1)
                OWNER=$(echo $i | cut -d: -f2)
                COMMAND=$(echo $i | cut -d: -f3)
                MEMORY=$(pmap $PID | tail -n 1 | awk '/[0-9]K/{print $2}')

                printf "%-10s%-15s%-15s%s\n" "$PID" "$OWNER" "$MEMORY" "$COMMAND"
        done
}

sysmon_main | sort -bnr -k3 | head -20
docker ps -q | xargs -n 1 docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} {{ .Name }}' | sed 's/ \// /'
sudo ncat --sh-exec "ncat <dest.ip> <dest.port>" -l <local port> --keep-open

#ex:

sudo ncat --sh-exec "ncat 192.168.56.116 8084" -l 8084 --keep-open

#then test: http://localhost:8084
#!/bin/bash

#--- xvfb
sudo apt install -y xvfb

#-- add this into /etc/rc.local:

    #!/bin/sh -e
    Xvfb -ac :99 -screen 0 1024x768x16 &
    exit 0

#-- save & first run:
Xvfb -ac :99 -screen 0 1024x768x16 &

#--- wine
sudo dpkg --add-architecture i386

wget -O- -q https://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_18.04/Release.key | sudo apt-key add -
echo "deb http://download.opensuse.org/repositories/Emulators:/Wine:/Debian/xUbuntu_18.04 ./" | sudo tee /etc/apt/sources.list.d/wine-obs.list

sudo apt update
sudo apt install --install-recommends winehq-stable winetricks

wine --version
wine --help

wineboot -u

winetricks allfonts

#-- install my app at /opt
sudo mkdir -p /opt/report/cache
sudo chmod -R 777 /opt/report
cp ReportService5.exe /opt/report
cd /opt/report

#-- and test it:
DISPLAY=:99 wine ReportService5.exe </dev/null &>/dev/null &

#-- create systemd service:

sudo nano /lib/systemd/system/report-service.service

[Unit]
Description=Reporting service

[Service]
Environment="DISPLAY=:99"
WorkingDirectory=/opt/report
ExecStart=/usr/bin/wine "/opt/report/ReportService5.exe" </dev/null &>/dev/null &
ExecStop=/opt/report/stop.sh
User=autana

[Install]
WantedBy=graphical.target

#-- save.

#-- create stop.sh

nano /opt/report/stop.sh

#!/bin/bash
kill $(pgrep ReportService5.exe)
kill -9 $(pgrep winedevice.exe)

#-- save.

sudo chmod +x /opt/report/stop.sh

#-- start service:
sudo systemctl enable report-service
sudo systemctl start report-service

DISPLAY=:99 import -window root -quality 90 /tmp/screenshot.jpg

# edit /etc/environment
# 
# add:
#
# LANG=es_DO.utf-8
# LC_ALL=es_DO.utf-8

# Then: logout ... login, then run this command:

$ sudo dpkg-reconfigure locales
wget -U "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" -qO - "https://example.com"

# Example
# wget -U "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" -qO - "https://www.infodolar.com.do/precio-dolar-entidad-banco-popular.aspx" | grep colCompraVenta | grep -Eo "([0-9.]+)" | head -1
$> pdftk file1.pdf file2.pdf file3.pdf cat output outputfile.pdf
ls -lct /etc | tail -1 | awk '{print $6, $7, $8}'
#!/bin/bash
#exit

#detect if port 11111 is open, if not do action:

netstat -ln | grep ":11111 " 2>&1 > /dev/null

if [ $? -eq 1 ]; then
    echo "Port is closed. Doing action..."
fi
#add this lines to /etc/mosquitto/mosquitto.conf

listener 1883
protocol mqtt

listener 9001
protocol websockets

#then restart service:  $> sudo service mosquitto restart
# Backup:
docker exec -t -u postgres your-db-container pg_dumpall -c > dump_`date +%d-%m-%Y"_"%H_%M_%S`.sql

# Restore:
cat your_dump.sql | docker exec -i your-db-container psql -U postgres
# =============== first let''s create user/password:
# 1: user

$> sudo sh -c "echo -n 'sammy:' >> /etc/nginx/.htpasswd"

# 2: passsord

$> sudo sh -c "openssl passwd -apr1 >> /etc/nginx/.htpasswd"

# You can repeat this process for additional usernames. You can see how the usernames and encrypted passwords are stored within the file by typing:

# let's see what we did:

$> cat /etc/nginx/.htpasswd

# Output (something like)
# sammy:$apr1$wI1/T0nB$jEKuTJHkTOOWkopnXqC1d1

# then, we need to add configuration:
# at /etc/nginx/sites-available/default (or whatever your configuration is):

server {
    listen 80 default_server;
    listen [::]:80 default_server ipv6only=on;

    root /usr/share/nginx/html;
    index index.html index.htm;

    server_name localhost;
    
    location /myrestrictedfolder {                  #<--- new here
        rewrite ^(.*[^/])$ $1/ permanent;           #<--- new here
        auth_basic "Restricted Content";            #<--- new here
        auth_basic_user_file /etc/nginx/.htpasswd;  #<--- new here
    }                                               #<--- new here

    location / {
        try_files $uri $uri/ =404;
    }
}

# then restart nginx daemon:

$> sudo service nginx restart


#you will be asked for basic user/password when entering: http://localhost/myrestrictedfolder/
$ sudo nano /etc/fstab

#add line:

//<remote host>/<share>  <mount>  cifs  username=<user>,password=<password>,uid=nobody,noperm,file_mode=0777,dir_mode=0777  0  0  

#Ex:
//200.200.0.124/images_autana  /mnt/nas  cifs  username=autana,password=*****,uid=nobody,noperm,file_mode=0777,dir_mode=0777  0  0  
$ find [folder] -type f -exec gdcmscu -L [log] -D --store --call [Target AET] [HOST] [PORT] {} \; &

#Ej:

$ find /mnt/images/dicom/ -type f -exec gdcmscu -L /tmp/output.log -D --store --call AUTANA localhost 11112 {} \; & 
#!/bin/sh

#----------------------------------------------------------------
# Para que funcione colocar en /usr/local/bin:
#
#     $> sudo cp verifica_nr /usr/local/bin
#
# Dar permiso de ejecución:
#
#     $> sudo chmod +x /usr/local/bin/verifica_nr
#
# Luego se agrega en crontab (cada minuto):
#
#     $> sudo crontab -e
#     (ir al final y agregar:)
#     * * * * * /usr/local/bin/verifica_nr
#     (Guardar)
#----------------------------------------------------------------
SERVICE="nrservice"
if ps ax | grep -v grep | grep -v $0 | grep $SERVICE > /dev/null
then
    echo "$SERVICE service running, everything is fine" > /dev/null
else
    sudo service nrservice.sh restart
fi
Over the last few days we've had a couple of issues with Imagick and processing PDFs on our servers. As it turns out, these issues are caused by automatic security updates. Let's look into the issue and its solution.

In Bugsnag, our error reporting service, the following exceptions have been popping up a lot:

not authorized `/path/to/some-pdf.pdf` @ error/constitute.c/ReadImage/412

convert: not authorized `/path/to/some-pdf.pdf` @ error/constitute.c/WriteImage/1028

not authorized `/path/to/some-image.png` @ error/convert.c/ConvertImageCommand/3015

unable to create temporary file `/some/path` Permission denied @ error/pdf.c/ReadPDFImage/465
Upon further investigation it looks like most of our sites and applications dealing with PDFs were actually experiencing issues. The weird thing is, some of these applications are quite old and haven't been updated or even touched for months, whilst others are recent and running the latest versions of packages and OS.

I don't care about your problems, just give me the fix!
A recent ImageMagick security update adds some extra policies regarding PDFs (or more specifcally: Ghostscript). We can actually see the diff for this update right here. Luckily, we can edit the policy.xml file ourselves and loosen up security for working with PDFs.

In /etc/ImageMagick-6/policy.xml (or /etc/ImageMagick/policy.xml) find the following line

<policy domain="coder" rights="none" pattern="PDF" />
and change it to allow reading and writing by the PDF coder in ImageMagick:

<policy domain="coder" rights="read|write" pattern="PDF" />
Finally, don't forget to restart your PHP-FPM and optionally queue workers:

sudo service php7.2-fpm restart
If you're experiencing issues with other file types or manipulations, you might need to change some of the other policies as well. The policy.xml file contains some good documentation in the comments. You can read more about the security policy file on ImageMagick's website.
/////run once:

/Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Resources/javawslauncher.app
#!/usr/bin/env python

'''Converts sequence of images to compact PDF while removing speckles,
bleedthrough, etc.

'''

# for some reason pylint complains about members being undefined :(
# pylint: disable=E1101

from __future__ import print_function

import sys
import os
import re
import subprocess
import shlex

from argparse import ArgumentParser

import numpy as np
from PIL import Image
from scipy.cluster.vq import kmeans, vq

######################################################################

def quantize(image, bits_per_channel=None):

    '''Reduces the number of bits per channel in the given image.'''

    if bits_per_channel is None:
        bits_per_channel = 6

    assert image.dtype == np.uint8

    shift = 8-bits_per_channel
    halfbin = (1 << shift) >> 1

    return ((image.astype(int) >> shift) << shift) + halfbin

######################################################################

def pack_rgb(rgb):

    '''Packs a 24-bit RGB triples into a single integer,
works on both arrays and tuples.'''

    orig_shape = None

    if isinstance(rgb, np.ndarray):
        assert rgb.shape[-1] == 3
        orig_shape = rgb.shape[:-1]
    else:
        assert len(rgb) == 3
        rgb = np.array(rgb)

    rgb = rgb.astype(int).reshape((-1, 3))

    packed = (rgb[:, 0] << 16 |
              rgb[:, 1] << 8 |
              rgb[:, 2])

    if orig_shape is None:
        return packed
    else:
        return packed.reshape(orig_shape)

######################################################################

def unpack_rgb(packed):

    '''Unpacks a single integer or array of integers into one or more
24-bit RGB values.

    '''

    orig_shape = None

    if isinstance(packed, np.ndarray):
        assert packed.dtype == int
        orig_shape = packed.shape
        packed = packed.reshape((-1, 1))

    rgb = ((packed >> 16) & 0xff,
           (packed >> 8) & 0xff,
           (packed) & 0xff)

    if orig_shape is None:
        return rgb
    else:
        return np.hstack(rgb).reshape(orig_shape + (3,))

######################################################################

def get_bg_color(image, bits_per_channel=None):

    '''Obtains the background color from an image or array of RGB colors
by grouping similar colors into bins and finding the most frequent
one.

    '''

    assert image.shape[-1] == 3

    quantized = quantize(image, bits_per_channel).astype(int)
    packed = pack_rgb(quantized)

    unique, counts = np.unique(packed, return_counts=True)

    packed_mode = unique[counts.argmax()]

    return unpack_rgb(packed_mode)

######################################################################

def rgb_to_sv(rgb):

    '''Convert an RGB image or array of RGB colors to saturation and
value, returning each one as a separate 32-bit floating point array or
value.

    '''

    if not isinstance(rgb, np.ndarray):
        rgb = np.array(rgb)

    axis = len(rgb.shape)-1
    cmax = rgb.max(axis=axis).astype(np.float32)
    cmin = rgb.min(axis=axis).astype(np.float32)
    delta = cmax - cmin

    saturation = delta.astype(np.float32) / cmax.astype(np.float32)
    saturation = np.where(cmax == 0, 0, saturation)

    value = cmax/255.0

    return saturation, value

######################################################################

def postprocess(output_filename, options):

    '''Runs the postprocessing command on the file provided.'''

    assert options.postprocess_cmd

    base, _ = os.path.splitext(output_filename)
    post_filename = base + options.postprocess_ext

    cmd = options.postprocess_cmd
    cmd = cmd.replace('%i', output_filename)
    cmd = cmd.replace('%o', post_filename)
    cmd = cmd.replace('%e', options.postprocess_ext)

    subprocess_args = shlex.split(cmd)

    if os.path.exists(post_filename):
        os.unlink(post_filename)

    if not options.quiet:
        print('  running "{}"...'.format(cmd), end=' ')
        sys.stdout.flush()

    try:
        result = subprocess.call(subprocess_args)
        before = os.stat(output_filename).st_size
        after = os.stat(post_filename).st_size
    except OSError:
        result = -1

    if result == 0:

        if not options.quiet:
            print('{:.1f}% reduction'.format(
                100*(1.0-float(after)/before)))

        return post_filename

    else:

        sys.stderr.write('warning: postprocessing failed!\n')
        return None

######################################################################

def percent(string):
    '''Convert a string (i.e. 85) to a fraction (i.e. .85).'''
    return float(string)/100.0

######################################################################

def get_argument_parser():

    '''Parse the command-line arguments for this program.'''

    parser = ArgumentParser(
        description='convert scanned, hand-written notes to PDF')

    show_default = ' (default %(default)s)'

    parser.add_argument('filenames', metavar='IMAGE', nargs='+',
                        help='files to convert')

    parser.add_argument('-q', dest='quiet', action='store_true',
                        default=False,
                        help='reduce program output')

    parser.add_argument('-b', dest='basename', metavar='BASENAME',
                        default='page',
                        help='output PNG filename base' + show_default)

    parser.add_argument('-o', dest='pdfname', metavar='PDF',
                        default='output.pdf',
                        help='output PDF filename' + show_default)

    parser.add_argument('-v', dest='value_threshold', metavar='PERCENT',
                        type=percent, default='25',
                        help='background value threshold %%'+show_default)

    parser.add_argument('-s', dest='sat_threshold', metavar='PERCENT',
                        type=percent, default='20',
                        help='background saturation '
                        'threshold %%'+show_default)

    parser.add_argument('-n', dest='num_colors', type=int,
                        default='8',
                        help='number of output colors '+show_default)

    parser.add_argument('-p', dest='sample_fraction',
                        metavar='PERCENT',
                        type=percent, default='5',
                        help='%% of pixels to sample' + show_default)

    parser.add_argument('-w', dest='white_bg', action='store_true',
                        default=False, help='make background white')

    parser.add_argument('-g', dest='global_palette',
                        action='store_true', default=False,
                        help='use one global palette for all pages')

    parser.add_argument('-S', dest='saturate', action='store_false',
                        default=True, help='do not saturate colors')

    parser.add_argument('-K', dest='sort_numerically',
                        action='store_false', default=True,
                        help='keep filenames ordered as specified; '
                        'use if you *really* want IMG_10.png to '
                        'precede IMG_2.png')

    parser.add_argument('-P', dest='postprocess_cmd', default=None,
                        help='set postprocessing command (see -O, -C, -Q)')

    parser.add_argument('-e', dest='postprocess_ext',
                        default='_post.png',
                        help='filename suffix/extension for '
                        'postprocessing command')

    parser.add_argument('-O', dest='postprocess_cmd',
                        action='store_const',
                        const='optipng -silent %i -out %o',
                        help='same as -P "%(const)s"')

    parser.add_argument('-C', dest='postprocess_cmd',
                        action='store_const',
                        const='pngcrush -q %i %o',
                        help='same as -P "%(const)s"')

    parser.add_argument('-Q', dest='postprocess_cmd',
                        action='store_const',
                        const='pngquant --ext %e %i',
                        help='same as -P "%(const)s"')

    parser.add_argument('-c', dest='pdf_cmd', metavar="COMMAND",
                        default='convert %i %o',
                        help='PDF command (default "%(default)s")')

    return parser

######################################################################

def get_filenames(options):

    '''Get the filenames from the command line, optionally sorted by
number, so that IMG_10.png is re-arranged to come after IMG_9.png.
This is a nice feature because some scanner programs (like Image
Capture on Mac OS X) automatically number files without leading zeros,
and this way you can supply files using a wildcard and still have the
pages ordered correctly.

    '''

    if not options.sort_numerically:
        return options.filenames

    filenames = []

    for filename in options.filenames:
        basename = os.path.basename(filename)
        root, _ = os.path.splitext(basename)
        matches = re.findall(r'[0-9]+', root)
        if matches:
            num = int(matches[-1])
        else:
            num = -1
        filenames.append((num, filename))

    return [fn for (_, fn) in sorted(filenames)]

######################################################################

def load(input_filename):

    '''Load an image with Pillow and convert it to numpy array. Also
returns the image DPI in x and y as a tuple.'''

    try:
        pil_img = Image.open(input_filename)
    except IOError:
        sys.stderr.write('warning: error opening {}\n'.format(
            input_filename))
        return None, None

    if pil_img.mode != 'RGB':
        pil_img = pil_img.convert('RGB')

    if 'dpi' in pil_img.info:
        dpi = pil_img.info['dpi']
    else:
        dpi = (300, 300)

    img = np.array(pil_img)

    return img, dpi

######################################################################

def sample_pixels(img, options):

    '''Pick a fixed percentage of pixels in the image, returned in random
order.'''

    pixels = img.reshape((-1, 3))
    num_pixels = pixels.shape[0]
    num_samples = int(num_pixels*options.sample_fraction)

    idx = np.arange(num_pixels)
    np.random.shuffle(idx)

    return pixels[idx[:num_samples]]

######################################################################

def get_fg_mask(bg_color, samples, options):

    '''Determine whether each pixel in a set of samples is foreground by
comparing it to the background color. A pixel is classified as a
foreground pixel if either its value or saturation differs from the
background by a threshold.'''

    s_bg, v_bg = rgb_to_sv(bg_color)
    s_samples, v_samples = rgb_to_sv(samples)

    s_diff = np.abs(s_bg - s_samples)
    v_diff = np.abs(v_bg - v_samples)

    return ((v_diff >= options.value_threshold) |
            (s_diff >= options.sat_threshold))

######################################################################

def get_palette(samples, options, return_mask=False, kmeans_iter=40):

    '''Extract the palette for the set of sampled RGB values. The first
palette entry is always the background color; the rest are determined
from foreground pixels by running K-means clustering. Returns the
palette, as well as a mask corresponding to the foreground pixels.

    '''

    if not options.quiet:
        print('  getting palette...')

    bg_color = get_bg_color(samples, 6)

    fg_mask = get_fg_mask(bg_color, samples, options)

    centers, _ = kmeans(samples[fg_mask].astype(np.float32),
                        options.num_colors-1,
                        iter=kmeans_iter)

    palette = np.vstack((bg_color, centers)).astype(np.uint8)

    if not return_mask:
        return palette
    else:
        return palette, fg_mask

######################################################################

def apply_palette(img, palette, options):

    '''Apply the pallete to the given image. The first step is to set all
background pixels to the background color; then, nearest-neighbor
matching is used to map each foreground color to the closest one in
the palette.

    '''

    if not options.quiet:
        print('  applying palette...')

    bg_color = palette[0]

    fg_mask = get_fg_mask(bg_color, img, options)

    orig_shape = img.shape

    pixels = img.reshape((-1, 3))
    fg_mask = fg_mask.flatten()

    num_pixels = pixels.shape[0]

    labels = np.zeros(num_pixels, dtype=np.uint8)

    labels[fg_mask], _ = vq(pixels[fg_mask], palette)

    return labels.reshape(orig_shape[:-1])

######################################################################

def save(output_filename, labels, palette, dpi, options):

    '''Save the label/palette pair out as an indexed PNG image.  This
optionally saturates the pallete by mapping the smallest color
component to zero and the largest one to 255, and also optionally sets
the background color to pure white.

    '''

    if not options.quiet:
        print('  saving {}...'.format(output_filename))

    if options.saturate:
        palette = palette.astype(np.float32)
        pmin = palette.min()
        pmax = palette.max()
        palette = 255 * (palette - pmin)/(pmax-pmin)
        palette = palette.astype(np.uint8)

    if options.white_bg:
        palette = palette.copy()
        palette[0] = (255, 255, 255)

    output_img = Image.fromarray(labels, 'P')
    output_img.putpalette(palette.flatten())
    output_img.save(output_filename, dpi=dpi)

######################################################################

def get_global_palette(filenames, options):

    '''Fetch the global palette for a series of input files by merging
their samples together into one large array.

    '''

    input_filenames = []

    all_samples = []

    if not options.quiet:
        print('building global palette...')

    for input_filename in filenames:

        img, _ = load(input_filename)
        if img is None:
            continue

        if not options.quiet:
            print('  processing {}...'.format(input_filename))

        samples = sample_pixels(img, options)
        input_filenames.append(input_filename)
        all_samples.append(samples)

    num_inputs = len(input_filenames)

    all_samples = [s[:int(round(float(s.shape[0])/num_inputs))]
                   for s in all_samples]

    all_samples = np.vstack(tuple(all_samples))

    global_palette = get_palette(all_samples, options)

    if not options.quiet:
        print('  done\n')

    return input_filenames, global_palette

######################################################################

def emit_pdf(outputs, options):

    '''Runs the PDF conversion command to generate the PDF.'''

    cmd = options.pdf_cmd
    cmd = cmd.replace('%o', options.pdfname)
    if len(outputs) > 2:
        cmd_print = cmd.replace('%i', ' '.join(outputs[:2] + ['...']))
    else:
        cmd_print = cmd.replace('%i', ' '.join(outputs))
    cmd = cmd.replace('%i', ' '.join(outputs))

    if not options.quiet:
        print('running PDF command "{}"...'.format(cmd_print))

    try:
        result = subprocess.call(shlex.split(cmd))
    except OSError:
        result = -1

    if result == 0:
        if not options.quiet:
            print('  wrote', options.pdfname)
    else:
        sys.stderr.write('warning: PDF command failed\n')

######################################################################

def notescan_main(options):

    '''Main function for this program when run as script.'''

    filenames = get_filenames(options)

    outputs = []

    do_global = options.global_palette and len(filenames) > 1

    if do_global:
        filenames, palette = get_global_palette(filenames, options)

    do_postprocess = bool(options.postprocess_cmd)

    for input_filename in filenames:

        img, dpi = load(input_filename)
        if img is None:
            continue

        output_filename = '{}{:04d}.png'.format(
            options.basename, len(outputs))

        if not options.quiet:
            print('opened', input_filename)

        if not do_global:
            samples = sample_pixels(img, options)
            palette = get_palette(samples, options)

        labels = apply_palette(img, palette, options)

        save(output_filename, labels, palette, dpi, options)

        if do_postprocess:
            post_filename = postprocess(output_filename, options)
            if post_filename:
                output_filename = post_filename
            else:
                do_postprocess = False

        outputs.append(output_filename)

        if not options.quiet:
            print('  done\n')

    emit_pdf(outputs, options)

######################################################################

def main():
    '''Parse args and call notescan_main().'''
    notescan_main(options=get_argument_parser().parse_args())

if __name__ == '__main__':
    main()
: 'http://www.modbusdriver.com/modpoll.html:

Usage: modpoll [options] serialport|host
    Arguments:
    serialport    Serial port when using Modbus ASCII or Modbus RTU protocol
                  COM1, COM2 ...                on Windows
                  /dev/ttyS0, /dev/ttyS1 ...    on Linux
                  /dev/ser1, /dev/ser2 ...      on QNX
    host          Host name or dotted ip address when using MODBUS/TCP protocol
    General options:
    -m ascii      Modbus ASCII protocol
    -m rtu        Modbus RTU protocol (default)
    -m tcp        MODBUS/TCP protocol
    -m enc        Encapsulated Modbus RTU over TCP
    -a #          Slave address (1-255, 1 is default)
    -r #          Start reference (1-65536, 100 is default)
    -c #          Number of values to poll (1-100, 1 is default)
    -t 0          Discrete output (coil) data type
    -t 1          Discrete input data type
    -t 3          16-bit input register data type
    -t 3:hex      16-bit input register data type with hex display
    -t 3:int      32-bit integer data type in input register table
    -t 3:mod      32-bit module 10000 data type in input register table
    -t 3:float    32-bit float data type in input register table
    -t 4          16-bit output (holding) register data type (default)
    -t 4:hex      16-bit output (holding) register data type with hex display
    -t 4:int      32-bit integer data type in output (holding) register table
    -t 4:mod      32-bit module 10000 type in output (holding) register table
    -t 4:float    32-bit float data type in output (holding) register table
    -i            Slave operates on big-endian 32-bit integers
    -f            Slave operates on big-endian 32-bit floats
    -1            Poll only once, otherwise poll every second
    -e            Use Daniel/Enron single register 32-bit mode
    -0            First reference is 0 (PDU addressing) instead 1
    Options for MODBUS/TCP:
    -p #          TCP port number (502 is default)
    Options for Modbus ASCII and Modbus RTU:
    -b #          Baudrate (e.g. 9600, 19200, ...) (9600 is default)
    -d #          Databits (7 or 8 for ASCII protocol, 8 for RTU)
    -s #          Stopbits (1 or 2, 1 is default)
    -p none       No parity
    -p even       Even parity (default)
    -p odd        Odd parity
    -4 #          RS-485 mode, RTS on while transmitting and another # ms after
    -o #          Time-out in seconds (0.01 - 10.0, 1.0 s is default)
'

# Lectura de Holding Registers
# addess = 4001 (-a 1) count = 10 (-c = 10) port = 5502 (-p 5502)

    modpoll -m tcp -a 1 -c 10 -p 5502 192.168.56.1

# Escritura de Holding Registers
# addess = 4001 (-a 1) count = 3 (-c = 10) port = 5502 (-p 5502) ... value1 value2 value3

    modpoll -m tcp -a 1 -c 3 -p 5502 192.168.56.1 11 32 56

# To retrieve once 5 floating point values starting from reference 100 with Modbus/TCP from slave device with IP 10.0.0.100:
    
    modpoll -m tcp -t4:float -r 100 -c 5 -1 10.0.0.100
    
    
upstream newserver {
  server 172.16.0.1:80;  # this is new server, by IP address
}

server {
  listen 80;
  server_name subdomain.site.com;
  location / {
    proxy_set_header Host $host;
    proxy_pass http://newserver;
  }
}
#from bash command line
#first create folder to save python dependencies:

    > sudo mkdir /var/www/.local
    > sudo mkdir /var/www/.cache
    > sudo chown www-data.www-data /var/www/.local
    > sudo chown www-data.www-data /var/www/.cache

# then install dependencies (imports):

    > sudo -H -u www-data pip install <dep1>
    > sudo -H -u www-data pip install <dep2>
    :
    
# then set user permissions to run your script to www-data user:
# creating a file at /etc/sudoers.d/:

    > sudo nano /etc/sudoers.d/mysudoerfile
    
    www-data ALL=(ALL) NOPASSWD: /usr/bin/python <path of your script here>

# then set execute permissions to your script:

    sudo chmod +x <path of your script here>

# then run your script 
#!/bin/bash
# create_barcode.sh
# sudo apt-get install barcode imagemagick

CODE=$1 #the code ... first parameter
FNAME=$2  #the filename .png .... second parameter (without file extension)

# let's create postscript:
barcode -E -b "$CODE" | convert -density 600 ps:- png:- > $FNAME

# use:
#   
#   bash create_barcode.sh 123456789 output.png  #it autodetect's the preferable encoding
#
#   this creates "output.png"
#
# Security Error:
#
# if you get security error: convert not authorized (ps/png) do this:
#
# edit /etc/ImageMagick-6/policy.xml
#
# disable this:

  <!--policy domain="coder" rights="none" pattern="PS" />-->

# and append this:

  <policy domain="coder" rights="read/write" pattern="PNG,PS" />
# Create Channel
# Create new Bot and get Bot TOKEN (to replace TOKEN_OF_BOT)
# and edit:
# /etc/ssh/sshrc

ip=`echo $SSH_CONNECTION | cut -d " " -f 1`

logger -t ssh-wrapper $USER login from $ip

curl -s -X POST https://api.telegram.org/botTOKEN_OF_BOT/sendMessage \
     -d text="Hello world!" -d chat_id=@autanaChannel > /dev/null
$ sudo ip route add prohibit <ip address to block>/32

#Ex: sudo ip route add prohibit 58.15.238.31/32
$> cd <folder>

$> perl -e 'for(<1.*>){((stat)[9]<(unlink))}'

$> find ./ -name "1.*" -exec rm {} \;

$> for i in 1.*; do rm -rf $i; done
$> sudo apt-get install cifs-utils

$> sudo mkdir /mnt/shared

$> sudo mount -t cifs -o username=guest,password=,rw,iocharset=utf8,file_mode=0777,dir_mode=0777,noperm //<windows address>/the_folder /mnt/shared/
$ ifconfig -a | grep "inet\s" | awk -F'[: ]+' '{ print $4 }'

$ ip addr  | grep "inet\s" | awk -F'[: ]+' '{ print $3 }'
sudo vgdisplay #ver vg/lvm
sudo pvcreate /dev/sdX /dev....
sudo vgextend <name-vg> /dev/sdX
sudo lvextend -l +100%FREE /dev/<name-vg>/root
sudo resize2fs /dev/<name-vg>/root
# create mount folder:

mkdir /tmp/my10mbvirtualdisk

# create file system (filename=filesyst in current folder) (10Mb):

dd if=/dev/zero of=./filesyst bs=10485760 count=1
sudo losetup /dev/loop0 ./filesyst
sudo mkfs.ext3 /dev/loop0

sudo mount /dev/loop0 /tmp/my10mbvirtualdisk


# now you can use /tmp/my10mbvirtualdisk as disk



# destroy:

sudo umount /tmp/my10mbvirtualdisk
sudo losetup -d /dev/loop0
sudo rm ./filesyst
find /src/dir/ -mtime -<n days> -printf %P\\0|rsync --files-from=- --from0 /src/dir/ /dst/dir/
##src: https://www.digitalocean.com/community/tutorials/how-to-set-up-master-slave-replication-on-postgresql-on-an-ubuntu-12-04-vps#configure-the-master-server

############## Master:

psql -c "CREATE USER rep REPLICATION LOGIN CONNECTION LIMIT 1 ENCRYPTED PASSWORD 'yourpassword';"

#//at file /etc/postgresql/9.5/main/pg_hba.conf 

	host    replication     rep     IP_address_of_slave/32   md5

#//at file /etc/postgresql/9.5/main/postgresql.conf

	listen_addresses = 'localhost,IP_address_of_THIS_host'
	wal_level = 'hot_standby'
	archive_mode = on
	archive_command = 'cd .'
	max_wal_senders = 1
	hot_standby = on

service postgresql restart


############### Slave:

service postgresql stop

#//at file /etc/postgresql/9.5/main/pg_hba.conf 

	host    replication     rep     IP_address_of_master/32  md5

#//at file /etc/postgresql/9.5/main/postgresql.conf

	listen_addresses = 'localhost,IP_address_of_THIS_host'
	wal_level = 'hot_standby'
	archive_mode = on
	archive_command = 'cd .'
	max_wal_senders = 1
	hot_standby = on


################## Master:

psql -c "select pg_start_backup('initial_backup');"
rsync -cva --inplace --exclude=*pg_xlog* /var/lib/postgresql/9.5/main/ slave_IP_address:/var/lib/postgresql/9.5/main/
psql -c "select pg_stop_backup();"


################### Slave:

cd /var/lib/postgresql/9.5/main/recovery.conf

	standby_mode = 'on'
	primary_conninfo = 'host=master_IP_address port=5432 user=rep password=yourpassword'
	trigger_file = '/tmp/postgresql.trigger.5432' ##When we want to set SLAVE db to Master (because of original MASTER fail) creating this file is enough. With the existence of this file db will act like MASTER.

service postgresql start

## we check if no problem:

less /var/log/postgresql/postgresql-9.5-main.log
#convert VDI to RAW:
$ vboxmanage clonehd --format RAW ubuntu.vdi ubuntu.img

#mount RAW:
$ mount -t ext3 -o loop,rw ./ubuntu.img /mnt
# duration time of file:
#   ej: sox --i -D test.ogg

  sox --i -D <sound file>

# play sound to default output
#    Linux/OSX?: 

  sox <sound file> -d

#    Windows: 

  sox <sound file> -t waveaudio
  
# record sound from default input:
#    Linux/OSX?: 

  sox -t alsa <output file>
  
#    Windows:
  
  sox -t waveaudio -d <output file>
  
# play sound from starting time (secs) (trim):
#    Linux/OSX?:

  sox <sound file> -d trim <n secs>
  
#    Windows:

  sox <sound file> -t waveaudio trim <n secs>
  
# split big file into small files with equal time fraction:
#    %1n = autoincremental: 1,2,3...

  sox <input file> <output file>_%1n.ogg trim 0 <secs> : newfile : restart
  
# concatenate small files into one:

  sox <input file1> <input file2> ... <input filen> <output file>

# cut silences with tolerance:

  sox in.wav out.wav silence -l 1 0.1 1% -1 2.0 1%
diff -r dir1 dir2 | grep dir1 | awk '{print $4}' > difference1.txt; clear; cat difference1.txt
$ sudo nano /etc/environment

#
# (Append these lines at the end of file:)

http_proxy="http://myproxy.server.com:8080/"
https_proxy="http://myproxy.server.com:8080/"
ftp_proxy="http://myproxy.server.com:8080/"
no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com"
HTTP_PROXY="http://myproxy.server.com:8080/"
HTTPS_PROXY="http://myproxy.server.com:8080/"
FTP_PROXY="http://myproxy.server.com:8080/"
NO_PROXY="localhost,127.0.0.1,localaddress,.localdomain.com"

#
# (save and... )

$ source /etc/environment

# To unset proxies:

# sudo nano /etc/environment
#
# (Remove proxies lines (see above))
#
# (save and them...) 

unset http_proxy
unset https_proxy
unset ftp_proxy
unset no_proxy
unset HTTP_PROXY
unset HTTPS_PROXY
unset FTP_PROXY
unset NO_PROXY

# (that's all)


# ========== using proxies for apt (it does not obey proxy configuration):

# (we create a new file at /etc/apt/apt.conf.d/)
#

$ sudo nano /etc/apt/apt.conf.d/95proxies

# (now append this lines...)

Acquire::http::proxy "http://myproxy.server.com:8080/";
Acquire::ftp::proxy "ftp://myproxy.server.com:8080/";
Acquire::https::proxy "https://myproxy.server.com:8080/";

# (save and run "sudo apt update" for trying...)
#
/etc/php/7.0/fpm/pool.d/www.conf:

pm = dynamic
pm.max_children = 30 (original: 5)
pm.start_servers = 3 (original: 1)
pm.min_spare_servers = 2 (original: 1)
pm.max_spare_servers = 4 (original: 3)
pm.max_requests = 500 (originally commented)
pgrep -af <name of running process>

#who is running last created binary in /usr/bin (attack): (sudo apt-get install inotify-tools)
inotifywait -e create /usr/bin | echo $(awk '{print $3}') | xargs pgrep -af

#which process is calling this ID?
ls -l /proc/<ID>/exe
#just add this line at the end of /etc/ssh/sshd_config

AllowUsers <thelogin> 

#Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a  PDF file from 6 MB to approximately 1 MB:

$ gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/ebook -sOutputFile=output.pdf input.pdf

# You can use the following parameters for -dPDFSETTINGS instead of /screen:

# /screen – Lowest quality, lowest size (ugly)
# /ebook – Moderate quality
# /printer – Good quality
# /prepress – Best quality, highest size
#This will report the percentage of memory in use

% free | grep Mem | awk '{print $3/$2 * 100.0}'

#Ex:23.8171

#This will report the percentage of memory that's free

% free | grep Mem | awk '{print $4/$2 * 100.0}'

#Ex:76.5013

#You could create an alias for this command or put this into a tiny shell script. The specific output could be tailored to your needs using formatting commands for the print statement along these lines:

% free | grep Mem | awk '{ printf("free: %.4f %\n", $4/$2 * 100.0) }'
* Open CMD, then type regedit + Enter key.
* Look this route: 

HKEY_CURRENT_USER
 \Control Panel
  \Desktop
  
* And set value:
    
ForegroundLockTimeout DWORD 0x00000000 (0)
#
# first you must stablish iptables rule for keeping port 22 closed
# and ports to use as combination. I used 3030, 55050 and 7070 (is very important
# to use unsorted ports)
#
#  #-- rules to keep open combination ports:
#

sudo iptables -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT

#
#  #-- rules to keep ssh port (22) closed:
#

sudo iptables -A INPUT -p tcp -m tcp --dport 22 -j DROP

#
#  #-- then we save iptables
#

sudo iptables-save

#
#  #-- if you want to know how to make this rules "persistent" search info on google about
#      iptables-persistent package or look at this url 
#
#      http://askubuntu.com/questions/119393/how-to-save-rules-of-the-iptables
#
#      it helped me.
#

# debian and derived distros... install knockd:

sudo apt-get install knockd

# we edit /etc/default/knockd: (knockd confif file)

sudo nano /etc/default/knockd

# and set:

    START_KNOCKD=0
    
# to

    START_KNOCKD=1
    
# let's create our ports sequence: let's say 3030,55050,7070 = open, and 7070,55050,3030 = close.
# for this we edit /etc/knockd.conf:

sudo nano /etc/knockd.conf:
    
[options]
  UseSyslog

[openSSH]
  sequence    = 3030,55050,7070
  seq_timeout = 1
# add our input access to iptables  
  command     = /sbin/iptables -I INPUT -s %IP% -p tcp --dport 22 -j DROP
  tcpflags    = syn

[closeSSH]
  sequence    = 7070,55050,3030
  seq_timeout = 1
# delete our input access to iptables
  command     = /sbin/iptables -D INPUT -s %IP% -p tcp --dport 22 -j DROP
  tcpflags    = syn
  
# we start service:

sudo /etc/init.d/knockd start

# That's all, we're done.
# .. and now... How can I open my host's ssh port (22) from remote location?
# ... just like this (using telnet):

# OPEN:
telnet 192.168.1.33 3030; telnet 192.168.1.33 55050; telnet 192.168.1.33 7070

# you'll this output at syslog (example with 192.168.1.33):

#  knockd: 192.168.1.33: openSSH: Stage 1
#  knockd: 192.168.1.33: openSSH: Stage 2
#  knockd: 192.168.1.33: openSSH: Stage 3
#  knockd: 192.168.1.33: openSSH: OPEN SESAME
#  knockd: openSSH: running command: /sbin/iptables -I INPUT -s 192.168.1.33...



# and then we CLOSE it:
telnet 192.168.1.33 7070; telnet 192.168.1.33 55050; telnet 192.168.1.33 3030

# you'll this output at syslog (example with 192.168.1.33):

#  knockd: 192.168.1.33: closeSSH: Stage 1
#  knockd: 192.168.1.33: closeSSH: Stage 2
#  knockd: 192.168.1.33: closeSSH: Stage 3
#  knockd: 192.168.1.33: closeSSH: OPEN SESAME
#  knockd: closeSSH: running command: /sbin/iptables -D INPUT -s 192.168.1.33...

#bypassing [n] files, (we must use [n+1]):

$ find <folder> -maxdepth 1 -type f -printf "%T@ %Tc %p\n" | grep -v '/\.' | sort -r | tail -n +60 | grep -Po "\./.*"

$ for f in "`find -maxdepth 1 -type f -print0 | xargs -r0 stat -c %y\ %n | grep -v '\.\/\.' | sort -r | grep -Po '\./.*' | tail -n +61`"; do 
    printf "$f\n"
$ done
#install unison

$ sudo apt install unison

# synchronizing from local folder "/home/user/sync/" with remote "ssh://user@remotehost.com/" folder "/home/user/sync/" (ssh port 22000)


$ unison -silent -auto -batch /home/user/sync/ ssh://user@remotehost.com//home/user/sync/ \
  -nodeletion ssh://user@remotehost.com//home/user/sync/ \
  -sshargs '-p22000' -logfile /tmp/mylog.txt
# NGINX: add <folder> in /etc/nginx/sites-available/default: 

server {
    :
    location /<folder>/ {
        proxy_pass http://<host>:<port>/;
        proxy_set_header X-Original-Host $http_host;
        proxy_set_header X-Original-Scheme $scheme;
        proxy_set_header X-Forwarded-For $remote_addr;
    }
    :
}
    
# APACHE2: add <folder> in /etc/apache2/sites-available/00-default.conf

<VirtualHost *:80>
    :
        ProxyPass /<folder> http://<host>:<port>/
        ProxyPassReverse /<folder> http://<host>:<port>/

        ProxyRequests Off
        ProxyPreserveHost On

        <proxy>
            Order deny,allow
            Allow from all
        </proxy>
    :
</VirtualHost>


#examples: folder "http://192.168.11.45/demo -> http://192.168.11.45:8080/"

server {
    :
    location /demo/ {
        proxy_pass http://localhost:8080/;
        proxy_set_header X-Original-Host $http_host;
        proxy_set_header X-Original-Scheme $scheme;
        proxy_set_header X-Forwarded-For $remote_addr;
    }
    :
}

<VirtualHost *:80>
    :
        ProxyPass /demo http://localhost:8080/
        ProxyPassReverse /demo http://localhost:8080/

        ProxyRequests Off
        ProxyPreserveHost On

        <proxy>
            Order deny,allow
            Allow from all
        </proxy>
    :
</VirtualHost>

/* other configuration for nginx:
    
server {
    listen        80;
    server_name   example.com *.example.com;
    location / {
        proxy_pass         http://127.0.0.1:5000;
        proxy_http_version 1.1;
        proxy_set_header   Upgrade $http_upgrade;
        proxy_set_header   Connection keep-alive;
        proxy_set_header   Host $host;
        proxy_cache_bypass $http_upgrade;
        proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header   X-Forwarded-Proto $scheme;
    }
}
*/
#changes in /etc/nginx/sites-available/default

server {
  server_name example.com;
  root /path/to/root;
  location / {
    # blah blah
  }
  location /demo {
    alias /path/to/root/production/folder/here;
  }
}
# Edit your /etc/postgresql/9.3/main/postgresql.conf, and change the lines as follows:

# Note: If you didn't find the postgresql.conf file, then just type 

$> locate postgresql.conf 

# in a terminal

1) change #log_directory = 'pg_log' to log_directory = 'pg_log'
2) change #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' to log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
3) change #log_statement = 'none' to log_statement = 'all'
4) change #logging_collector = off to logging_collector = on

# Optional: SELECT set_config('log_statement', 'all', true);

sudo /etc/init.d/postgresql restart or sudo service postgresql restart

#Fire query in postgresql: select 2+2

# Find current log in /var/lib/pgsql/9.2/data/pg_log/

#The log files tend to grow a lot over a time, and might kill your machine. For your safety, write a bash script that'll delete logs and restart postgresql server.
##sudo nano /etc/udev/rules.d/95-monitor-hotplug.rules

SUBSYSTEM=="drm", RUN+="/usr/local/bin/fix_tv_state.sh"

##---------------------



##sudo nano /usr/local/bin/fix_tv_state.sh

#!/bin/sh
#Fix TV state when HDMI link is lost.

export XAUTHORITY=/home/marco/.Xauthority

OUTPUT="HDMI1"
BAD_MODE="1280x720"
GOOD_MODE="1920x1080"

for MODE in $BAD_MODE $GOOD_MODE; do
 sleep 2
 DISPLAY=:0 xrandr --output $OUTPUT --mode $MODE
 sleep 2
done

##--------------------

sudo chmod +x /usr/local/bin/fix_tv_state.sh
sudo udevadm control --reload-rules

# warning: this is not script, it's a set of instructions.
#these steps create pptp vpn server so all clients can reach all others clients.

##################### SERVER SIDE (UBUNTU SERVER 16.04+) ######################

sudo apt-get install pptpd
sudo update-rc.d pptpd defaults

# I had to use this on 16.04... it fixes autostart problem:
sudo systemctl enable pptpd 

#edit file "/etc/pptpd.conf": example using nano: $> sudo nano /etc/pptpd.conf
#add the following lines:
    
    localip 10.20.0.1
    remoteip 10.20.1.100-200 #100 clients
#save it
        
#edit file "/etc/ppp/chap-secrets": example using nano: $> sudo nano /etc/ppp/chap-secrets
#add all clients with fixed ip addresses (change user1, user2... and password1, password2,.. according to your preference):

    user1 pptpd password1 10.20.1.100 
    user2 pptpd password2 10.20.1.101
    user3 pptpd password3 10.20.1.200
    :
#save it

#edit/add this line at "/etc/systl.conf":
    net.ipv4.ip_forward = 1
#save change:
sudo sysctl -p

#Configure iptables for forwarding (let clients see all each other):

iptables --table nat --append POSTROUTING --out-interface ppp0 -j MASQUERADE
iptables -I INPUT -s 10.20.0.0/16 -i ppp0 -j ACCEPT
iptables --append FORWARD --in-interface enp0s8 -j ACCEPT
iptables-save

#restart your service:

sudo service pptpd restart


##################### CLIENT SIDE FOR UBUNTU SERVER ######################

## Start client side (Ubuntu Server (w/o GUI)):
##
## ============================================================
## 1) Configure pptp: (Change your <vpn server address>)
##   (in this example we named the provider as "pptpserver")
## ============================================================

sudo apt-get install pptp-linux

sudo nano /etc/ppp/peers/pptpserver

# add the following lines:

pty "pptp <vpn server address> --nolaunchpppd"
lock
noauth
nobsdcomp
nodeflate
name server
password 13132828
remotename pptpserver
persist
maxfail 0
holdoff 5
require-mppe-128

# and save (ctrl-o ctrl-x)

# ==================================================================
# 2) Create config file for adding route automatically when startup:
#    this is necessary in order to not use vpn internet connection
#    (use same name of provider, in my case "pptpserver")
# ==================================================================

sudo nano /etc/ppp/ip-up.d/pptpserver

# add the wollowings lines:

#!/bin/bash
# This script is called with the following arguments:
# Arg Name
# $1 Interface name
# $2 The tty
# $3 The link speed
# $4 Local IP number
# $5 Peer IP number
# $6 Optional ''ipparam'' value foo
/sbin/route add -net 10.20.0.0 netmask 255.255.0.0 dev ppp0


# and save (ctrl-o ctrl-x)
#... then set execute permission:

sudo chmod +x /etc/ppp/ip-up.d/pptpserver

# ============================================================
#   STARTUP CONNECTION
# ============================================================

# ------------------------------------
# 1) Manual startup:
# ------------------------------------

sudo pon pptpserver

# ------------------------------------
# 2) Auto startup on boot:
# ------------------------------------

#
# a) USING INTERFACES: Edit interfaces file:
#

sudo nano /etc/network/interfaces

# add the following lines to the end:

auto tunnel
iface tunnel inet ppp
  provider pptpserver

# and save (ctrl-o ctrl-x)
# then restart networking:

sudo /etc/init.d/networking restart

#
# b) USING SERVICE SYSTEMCTL
#

sudo nano /etc/systemd/system/pppoe.service

# add these lines:

[Unit]
Description=PPPoE connection
 
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/usr/bin/pon pptpserver
ExecStop=/usr/bin/poff -a
 
[Install]
WantedBy=default.target

# and save
# then change permissions:

sudo chmod +x /etc/systemd/system/pppoe.service

# then reload daemons:

systemctl daemon-reload

# and it will connect on boot.

#start:
sudo systemctl start pppoe

#stop:
sudo systemctl stop pppoe
# let's create a backup from remote postgresql database using pg_dump:
#
#   pg_dump -h [host address] -Fc -o -U [database user] <database name> > [dump file]
#
# later it could be restored at the same remote server using:
#
#   sudo -u postgres pg_restore -C mydb_backup.dump
#
#Ex:

pg_dump -h 67.8.78.10 -p 5432 -Fc -o -U myuser mydb > mydb_backup.dump

pg_restore -C mydb_backup.dump



#complete (all databases and objects)

pg_dumpall -U myuser -h 67.8.78.10 -p 5432 --clean --file=mydb_backup.dump


#restore from pg_dumpall --clean:

psql -f mydb_backup.dump postgres #it doesn't matter which db you select here
#this command shows a list of supported encodings:
#pdftotext -listenc 

#this command convert pdf to html:
#pdftohtml -c -s -enc <encoding> <pdf to convert> <output html file>

#Ex:

pdftohtml -c -s -enc Latin1 test.pdf test.html
convert -density 144 myfile.pdf[0] -resize 10% -background white -alpha remove -strip -quality 90 mypreview.jpg
#!/bin/bash
# Delete all containers

$ docker rm $(docker ps -a -q)

# Delete all images

$ docker rmi $(docker images -q)
sudo su
cat /dev/null > /etc/apt/apt.conf
echo 'Acquire::http::Proxy "false";' > /etc/apt/apt.conf.d/proxy
apt-get update 
#for not running docker, use save:
docker save <dockernameortag> | gzip > mycontainer.tgz

#for running or paused docker, use export:
docker export <dockernameortag> | gzip > mycontainer.tgz

#load
gunzip -c mycontainer.tgz | docker load


#load 2
docker load -i mycontainer.tgz
find ./ -name <filemask>* -exec dcmodify \
  -m "(0010,0010)=MOLINA^HERNAN" \
  -m "(0010,0020)=3207639" \
  -m "(0010,0030)=19411128" \
  -m "(0010,0040)=M" \
  -m "(0008,0050)=" \
  -m "(0040,0275)[0].(0032,1060)=RMN HOMBRO IZQUIERDO" \
  -m "(0040,0275)[0].(0040,0007)=RMN HOMBRE IZQUIERDO" {} \;
#iptables -A OUTPUT -d <ipaddress> -j DROP

iptables -A OUTPUT -d 119.140.145.206 -j DROP
iptables-save
#> sudo apt-get install nethogs
#> sudo nethogs <network interface>

#example:

$> sudo nethogs eth0
#iptables -A INPUT -s <ipaddress> -j DROP

iptables -A INPUT -s 65.55.44.100 -j DROP
iptables-save

#un-block

iptables -D INPUT -s xx.xxx.xx.xx -j DROP
iptables -D INPUT -s xx.xxx.xx.xx/yy -j DROP
iptables-save
caffeinate -u -t 2
osascript -e 'tell application "System Events" to keystroke "mypassword"'
osascript -e 'tell application "System Events" to keystroke return'
#split the file into pieces:

  $> split --bytes=10M /path/to/bigfile.ext /path/to/image/prefixForPieces

#then put'em together again when necessary

  $> cat prefixForPieces* > bigfile.ext
#!/bin/bash

sudo apt-get install postgresql conquest-common conquest-postgres

sudo su postgres -c "createdb dicomserver"
sudo su postgres -c "createuser dicomserver"
sudo su postgres -c "psql -c \"ALTER USER dicomserver WITH ENCRYPTED PASSWORD 'dicomserver'\""
sudo su postgres -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE dicomserver TO dicomserver\""

sudo sed -i 's/CONQUESTSRV1/DICOMSERVER/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/CONQUESTSRV1/DICOMSERVER/g' /etc/conquest-dicom-server/acrnema.map

sudo sed -i 's/SQLServer\s*\=\ conquest/SQLServer\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/Username\s*\=\ postgres/Username\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini
sudo sed -i 's/Password\s*\=\ postgres/Password\ =\ dicomserver/g' /etc/conquest-dicom-server/dicom.ini

sudo sed -i 's/DGATE_ENABLE\=false/DGATE_ENABLE\=true/g' /etc/default/dgate

sudo service dgate stop
sudo service postgresql restart
sudo dgate -v -r
sudo service dgate start

#when installed: AET=DICOMSERVER, PORT=11112
$ rsync -avz -e "ssh -p <ssh port number>" <user>@<remote addr>:<remote path/folder> <local path/folder>
$ sudo apt-get install tcpflow
$ sudo tcpflow -p -c -i <netinterface> port <portnum>

# Example: tcpflow -p -c -i eth0 port 80
$ find <folderpath> -name <filemask> -exec <command> <extra parameters> {} \;
#Using dcm4che:

#capture:
$ ffmpeg -an -f video4linux2 -s 640x480  -r 30 -i /dev/video0 -vcodec mpeg4 -vtag DIVX my_test.avi

# convert:
$ jpg2dcm -c mpg2dcm.cfg -ts 1.2.840.10008.1.2.4.100 <mpegfile> <dcmfile>

//---------------------------------------------------------------------

#Send to pacs: dcmtk:
$ dcmsend -d -aec AETITLE <ip address> <dicom port> <dcmfile>

//---------------------------------------------------------------------

#Video props:

$ mplayer video.wmv -identify -vo null -ao null -frames 0 2>&1 /dev/null | egrep "(^ID|VIDEO|AUDIO)"

//---------------------------------------------------------------------

# Use/compare mpg2dcm.config: (at DCM4CHE/BIN/JPG2DCM)

//---------------------------------------------------------------------

# jpg2dcm Sample Configuration for encapsulating MPEG2 MP@ML streams into
# DICOM Video Photographic Image objects
# (s. DICOM Part 3, A.32.7 Video Photographic Image IOD)
# Usage: jpg2dcm -c mpg2dcm.cfg -ts 1.2.840.10008.1.2.4.100 <mpegfile> <dcmfile>

# Patient Module Attributes
# Patient's Name
00100010:
# Patient ID
00100020:
# Issuer of Patient ID
#00100021:
# Patient's Birth Date
00100030:
# Patient's Sex
00100040:

# General Study Module Attributes
# Study Instance UID
#0020000D:
# Study Date
00080020:
# Study Time
00080030:
# Referring Physician's Name
00080090:
# Study ID
00200010:
# Accession Number
00080050:
# Study Description
#00081030:

# General Series Module Attributes
# Modality
00080060:XC
# Series Instance UID
#0020,000E:
# Series Number
00200011:1

# General Equipment Module Attributes
# Manufacturer
00080070:

# General Image Module Attributes
# Instance Number
00200013:1

# Cine Module Attributes
# Frame Time [525-line NTSC]
#00181063:33.33
# Frame Time [625-line PAL]
00181063:40.0
# Multiplexed Audio Channels Description Code Sequence
003A0300

# Multi-frame Module Attributes
#Number of Frames (use dummy value, if unknown)
00280008:1500
# Frame Increment Pointer
00280009:00181063

# Image Pixel Module Attributes (MUST be specified for encapsulating MPEG2 streams)
# (s. DICOM Part 5, 8.2.5 MPEG2 MP@ML IMAGE COMPRESSION for details)
# Samples per Pixel
00280002:3
# Photometric Interpretation
00280004:YBR_PARTIAL_420
# Planar Configuration
00280006:0
# Rows
00280010:480
# Columns
00280011:640
# Bits Allocated
00280100:8
# Bits Stored
00280101:8
# High Bit
00280102:7
# Pixel Representation
00280103:0

# Acquisition Context Module Attributes
# Acquisition Context Sequence
00400555

# VL Image Module Attributes
# Image Type
00080008:ORIGINAL\\PRIMARY
# Lossy Image Compression
00282110:01

# SOP Common Module Attributes
# SOP Class UID
00080016:1.2.840.10008.5.1.4.1.1.77.1.4.1
# SOP Instance UID
#00080018

#----------------------------------------------------------------------------
#convert video to frames:

$ ffmpeg -i test.mp4 -r 24 -f image2 test_files/%05d.png

#----------------------------------------------------------------------------
*> sudo visudo

#find 'root ALL(...' and append this line below:

www-data ALL=NOPASSWD:/usr/local/bin/myscript.sh

#Save

*> sudo cp myscript.sh /usr/local/bin/
*> sudo chmod 777 /usr/local/bin/myscript.sh

#at php script:

<?php

$cmd = shell_exec("/usr/local/bin/myscript.sh params");
echo $cmd;

?>
#Use udisks utility
#sudo apt-get install udisks

$> udisks --show-info /dev/sr0 | grep -c "blank: *1"

#this will return 0:if not blank/present; or 1:blank disk present
#!/bin/bash
#
#/etc/init.d/oracledb
#
#Run-level Startup script for the Oracle Listener and Instances
#It relies on the information on /etc/oratab

export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbname_1
export ORACLE_OWNR=oracle
export PATH=$PATH:$ORACLE_HOME/bin

if [ ! -f $ORACLE_HOME/bin/dbstart -o ! -d $ORACLE_HOME ]
then
  echo "Oracle startup: cannot start"
  exit 1
fi

case "$1" in
  start)
    #Oracle listener and instance startup
    echo -n "Starting Oracle: "
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/lsnrctl start"
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/dbstart $ORACLE_HOME"
    touch /var/lock/oracle
    echo "OK"
    ;;
  stop)
    #Oracle listener and instance shutdown
    echo -n "Shutdown Oracle: "
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/lsnrctl stop"
    su $ORACLE_OWNR -c "$ORACLE_HOME/bin/dbshut $ORACLE_HOME"
    rm -f /var/lock/oracle
    echo "OK"
    ;;
  reload|restart)
    $0 stop
    $0 start
    ;;
  *)
    echo "Usage: `basename $0` start|stop|restart|reload"
    exit 1
esac

exit 0
##################################################
#!/bin/sh

#----------------------------------------------------------------
# Put this file at /usr/local/bin:
#
#     $> sudo cp verify_nr /usr/local/bin
#
# Set executing permissions:
#
#     $> sudo chmod +x /usr/local/bin/verify_nr
#
# Then create crontab (cada minuto):
#
#     $> sudo crontab -e
#     #(Go to end and append:)
#     * * * * * /usr/local/bin/verify_nr
#     #(Save)
#----------------------------------------------------------------
SERVICE="nrservice"
if ps ax | grep -v grep | grep -v $0 | grep $SERVICE > /dev/null
then
    echo "$SERVICE service running, everything is fine" > /dev/null
else
    sudo service nrservice.sh restart
fi
$ sudo apt install dcmtk

#Service:

$ storescp -v +xa -pm +uf -fe .dcm -sp --fork -aet MARCO -od ./test_storescp 4006

#Store:

$ storescu -xs localhost 4006 dicom_file.dcm
#disable ping to your station:

sudo echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_all 

#enable ping back:

sudo echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_all
$ comm -13 \
  <(gzip -dc /var/log/installer/initial-status.gz | sed -n 's/^Package: //p' | sort) \
  <(comm -23 \
    <(dpkg-query -W -f='${Package}\n' | sed 1d | sort) \
    <(apt-mark showauto | sort) \
  )
$> sudo apt-get install freetds-bin

#At Lazarus:
#Put TZConnection component (ZConnection1) and set LibraryLocation as shown:

#  ZConnect1.LibraryLocation:=libsybdb.so.5;

#  and we're done!
$> wget -qO- ipecho.net/plain
$> dig +short myip.opendns.com @resolver1.opendns.com
$> wget -qO- shtuff.it/myip/short
$> wget -qO- whatismyip.akamai.com
$> sudo crontab -e

#then add a line like this:

* * * * * find /path/to/files/ -type f -mtime +<n> -exec rm -rf {} \;

#Ex:
#Delete "*.txt" files older than 1 day from /tmp folder every day at 2:00am:

0 2 * * * find /tmp/* -type f -mtime +1 -exec rm {} \;       #files
0 2 * * * find /tmp/* -type d -mtime +1 -exec rm -rf {} \;   #folders
#Merge file1.pdf and file2.pdf into merged.pdf:

$> gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile=merged.pdf file1.pdf file2.pdf
$> find ./ -name "<filename/wild cards>" | xargs grep -i "<text to find>"

#Ex:

$> find ./ -name "*.txt" | xargs grep -i "Examples"

#Find all text files (*.txt) containing text 'Examples' from current path (./) and inner.
$> ssh <remote user>@<remote server ip> [-p <remote ssh port>] -L <remote server port>:<internal target ip>:<internal target port> -fN

#Ex:
#Redirect web traffic throughout myremoteserver.com, port 9999, to local machine 192.168.0.1, port 80.

$> ssh operator@myremoteserver.com -L 9999:192.168.0.1:80 -fN

#So you can access: "http://localhost:9999/"
#This url will respond as it was "http://192.168.0.1:80/"
sudo route ip route add <ip range> gw <gateway ip address> dev <interface>
sudo ip addr flush dev <interface>
sudo /etc/init.d/networking restart

#Ex:

route ip route add 192.168.32.0/24 gw 192.168.32.1 dev eth0
sudo ip addr flush dev eth0
sudo /etc/init.d/networking restart
#Setup the rate control and delay
sudo tc qdisc add dev lo root handle 1: htb default 12 
sudo tc class add dev lo parent 1:1 classid 1:12 htb rate 33kbps ceil 56kbps 
sudo tc qdisc add dev lo parent 1:12 netem delay 400ms
 
#Remove the rate control/delay
sudo tc qdisc del dev lo root
 
#To see what is configured on an interface, do this
sudo tc -s qdisc ls dev lo
 
#Replace lo with eth0/wlan0 to limit speed at lan
wkhtmltopdf <url1> <url2> ... <urln> <output-pdf-path-filename>
#install debian based:

sudo apt-get install nbtscan

#windows and others: download at http://www.unixwiz.net/tools/nbtscan.html

nbtscan 192.168.0.1-254 //IP Range
nbtscan 192.168.0.0/24  //whole C-class network
nbtscan 192.168.1.0/24  //whole C-class network
nbtscan 172.16.0.0/16   //Whole B-class network
nbtscan 10.0.0.0/8      //whole A-class network
$> sudo su

$> sync ; echo 1 > /proc/sys/vm/drop_caches
$> sync ; echo 2 > /proc/sys/vm/drop_caches
$> sync ; echo 3 > /proc/sys/vm/drop_caches
#install tools qemu-kvm (debian based distros)
$ sudo apt-get install qemu-kvm

#load module
$ sudo modprobe nbd

#create loopback dev for the image
$ sudo qemu-nbd -c /dev/nbd0 <path to virtual disk>.vdi

#mount the partitions, that are exposed as /dev/nbd0pXXX
$ sudo mount  -o noatime,noexec /dev/nbd0p1 /tmp/vdi/

#in the end unmount && shutdown the ndb
$ sudo umount /tmp/vdi/
$ sudo qemu-nbd -d /dev/nbd0
netsh routing ip nat add portmapping "<lan name>" tcp <caller ip> <listening port> <target ip> <target port>
#!/bin/sh

# get conda paths
export ACTIVATE_PATH=$CONDA_PREFIX/etc/conda/activate.d
export DEACTIVATE_PATH=$CONDA_PREFIX/etc/conda/deactivate.d
export ACTIVATE_SCRIPT=$ACTIVATE_PATH/env_vars.sh
export DEACTIVATE_SCRIPT=$DEACTIVATE_PATH/env_vars.sh

#delete existing activation and deactivation scripts
test -e $ACTIVATE_SCRIPT && rm $ACTIVATE_SCRIPT
test -e $DEACTIVATE_SCRIPT && rm $DEACTIVATE_SCRIPT

#create new activation script
mkdir -p $ACTIVATE_PATH
touch $ACTIVATE_SCRIPT
echo "#!/bin/sh" >> $ACTIVATE_SCRIPT
echo "export BELVO_SECRET_ID=\"$(op read "op://Personal/Belvo/add more/Secret ID")\"" >> $ACTIVATE_SCRIPT
echo "export BELVO_SECRET_PASSWORD=\"$(op read "op://Personal/Belvo/add more/Secret password")\"" >> $ACTIVATE_SCRIPT
echo "export CODA_API_KEY=\"$(op read "op://Personal/Coda/add more/automation")\"" >> $ACTIVATE_SCRIPT
echo "export GOOGLE_APPLICATION_CREDENTIALS=\"/Users/jmbenedetto/code/secrets/gcp_automation_service_account_key.json\"" >> $ACTIVATE_SCRIPT

#create deactivate script
mkdir -p $DEACTIVATE_PATH
touch $DEACTIVATE_SCRIPT
echo "#!/bin/sh" >> $DEACTIVATE_SCRIPT
echo "unset BELVO_SECRET_ID" >> $DEACTIVATE_SCRIPT
echo "unset BELVO_SECRET_PASSWORD" >> $DEACTIVATE_SCRIPT
echo "unset CODA_API_KEY" >> $DEACTIVATE_SCRIPT
echo "unset GOOGLE_APPLICATION_CREDENTIALS" >> $DEACTIVATE_SCRIPT
test -e ./file_path/file_name && echo 1 || echo 2
SINCE=`date --date '-2 weeks +2 days' +%F 2>/dev/null || date -v '-2w' -v '+2d' +%F`
bucket=<bucketname>
aws s3api list-objects-v2 --bucket "$bucket" \
    --query 'Contents[?LastModified > `'"$SINCE"'`]'
[root@mysql-in-servicecloud-consolidated-slave-1 ~]# cat /usr/local/scripts/check_slave_status.py
import commands
import os
import time

for x in range(0, 4):
        status = commands.getoutput("mysql --login-path=statuser -sN -e \"show slave status\"")
#        SLACK_URL="https://hooks.slack.com/services/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"
        SLACK_URL="https://hooks.slack.com/services/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"       

        for row in status.split("\n"):
                SERVER_NAME = "mysql-in-servicecloud-consolidated-slave-01"
                SLACK_MESSAGE = "<!channel> Problem in \`[Azure] "+SERVER_NAME+" \`: "
                Slave_IO_Running = row.split("\t")[10]
                Slave_SQL_Running = row.split("\t")[11]
                Seconds_Behind_Master = row.split("\t")[32]
                if Slave_IO_Running.find("No")!=-1 or Slave_SQL_Running.find("No")!=-1 or int(Seconds_Behind_Master)>5:
                        SLACK_MESSAGE = SLACK_MESSAGE + "\`Slave_SQL_Running: "+Slave_SQL_Running+"\`; \`Slave_IO_Running: "+Slave_IO_Running+"\`; \`Seconds_Behind_Master: "+Seconds_Behind_Master+"\`"
                        os.system("curl -X POST --data \"payload={\'text\':\'"+SLACK_MESSAGE+"\', \'username\':\'gcp-watchman\', \'icon_emoji\':\':bangbang:\'}\" "+SLACK_URL)
                os.system("curl -i -XPOST 'http://gcp-in-int-grafana.onedirect.in:8086/write?db=collectd' --data-binary 'mysql_slave_lag,slave_name='"+SERVER_NAME+"' value='"+Seconds_Behind_Master+"''")
                time.sleep(10);#!/usr/bin/python
#!/usr/bin/env bash

/usr/bin/docker exec -it $(docker ps -q --filter ancestor=200890773558.dkr.ecr.ap-southeast-2.amazonaws.com/vtrack/web) bash
mydate=`date +"%m/%d/%Y -%H:%M:%S"`
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
mytime=`date +%T`

USER=anirban
PW=Bose9711
filename="/var/lib/mysql-files/VFS_Ticket_data.csv"


rm -rf "/var/lib/mysql-files/VFS_Ticket_data.csv"
reportname="/tmp/VFS_Ticket_data_$current_time.csv"
mysql -u$USER -p$PW -e"call onedirect.get_export_to_excel_summary(8112,current_timestamp - interval 2 hour,current_timestamp)">/var/lib/mysql-files/VFS_Ticket_data.csv


mv /var/lib/mysql-files/VFS_Ticket_data.csv $reportname
echo " ****TRANSFER START**** "
echo $reportname
azcopy cp "$reportname" "https://prjdwuatadls.dfs.core.windows.net/vfsbiproject?sp=rwle&st=2022-08-05T05:47:28Z&se=2022-09-05T13:47:28Z&spr=https&sv=2021-06-08&sr=c&sig=GuyhDRcueFwQUdtL7%2FQ%2Bq5IdRFnd3QKpud1dusF%2Bu0E%3D"

echo " ****TRANSFER END**** "
mydate=`date +"%m/%d/%Y -%H:%M:%S"`
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
mytime=`date +%T`

USER=anirban
PW=Bose9711
filename="/var/lib/mysql-files/VFS_Ticket_data.csv"


rm -rf "/var/lib/mysql-files/VFS_Ticket_data.csv"
reportname="/tmp/VFS_Ticket_data_$current_time.csv"
mysql -u$USER -p$PW -e"call onedirect.get_export_to_excel_summary(8112,current_timestamp - interval 2 hour,current_timestamp)">/var/lib/mysql-files/VFS_Ticket_data.csv


mv /var/lib/mysql-files/VFS_Ticket_data.csv $reportname
echo " ****TRANSFER START**** "
echo $reportname
azcopy cp "$reportname" "https://prjdwuatadls.dfs.core.windows.net/vfsbiproject?sp=rwle&st=2022-08-05T05:47:28Z&se=2022-09-05T13:47:28Z&spr=https&sv=2021-06-08&sr=c&sig=GuyhDRcueFwQUdtL7%2FQ%2Bq5IdRFnd3QKpud1dusF%2Bu0E%3D"

echo " ****TRANSFER END**** "
grep -r <pattern> "dir/*/dir/dir/file"

or

grep -r <pattern> "*/dir/dir"

or

// generic
grep -r <pattern> *
//#########################################################################################//
/* -------------------------------------------------------------------

Name : Anon_Resampling

----------------------------------------------------------------------
Original Rule :	Replace with other values from the same domain:
1 - Table Name
2 - Field Name

-------------------------------------------------------------------*/

SUB Anon_Resampling (P_TABLENAME , P_FIELDNAME)


TRACE ##################################################;
TRACE ## Starting Function : Anon_Resampling  ##;
TRACE ## Anonymizing Field : $(P_FIELDNAME) #;
TRACE ##################################################;

//---------------------------------------//

[DistinctValues]:
Load Distinct 
[$(P_FIELDNAME)] as [OldDistinctValue],
RowNo() as [RowID],
Rand() as [Random]
Resident $(P_TABLENAME);

[AnonDistinctMapping]:
Mapping
Load
RowNo(),
[OldDistinctValue];
Load
[OldDistinctValue],
[Random]
Resident [DistinctValues]
Order By [Random];

[AnonDistinctValues]:
LOAD
*,
ApplyMap('AnonDistinctMapping',RowID,'Anon_Error') as [NewDistinctValue]
Resident DistinctValues;

Drop table DistinctValues;

[AnonMapping]:
Mapping
Load
[OldDistinctValue],
[NewDistinctValue]
Resident [AnonDistinctValues];

Drop table AnonDistinctValues;

[AnonValues]:
LOAD
*,
ApplyMap('AnonMapping',[$(P_FIELDNAME)],'Anon_Error') as [Anon_$(P_FIELDNAME)]
Resident $(P_TABLENAME);

Drop table $(P_TABLENAME);

Rename table AnonValues to $(P_TABLENAME);


END SUB

//#########################################################################################//
import pandas as pd
from codaio import Coda, Document, Cell

doc=Document.from_environment('XWykP50uN-')
transactions_table=doc.get_table('grid-bsHZ_AO1l5')

df_new=pd.DataFrame([
    {'Name':'Ricardo','transaction_id':'12dgt'},
    {'Name':'Manoel','transaction_id':'fklsod'},
])
df_new

mapping_dict={
    'Name':'Name',
    'transaction_id':'transaction_id'
}
all_data=[]
for i in range(len(df_new)):
    row_data=[]
    for j in range(len(df_new.columns)):
        row_data.append(Cell(column=mapping_dict[df_new.columns[j]],value_storage=df_new.iloc[i,j]))
    all_data.append(row_data)
transactions_table.upsert_rows(all_data)
#!/bin/sh

#create activate script
export ACTIVATE_PATH=$CONDA_PREFIX/etc/conda/activate.d
mkdir -p $ACTIVATE_PATH
touch $ACTIVATE_PATH/env_vars.sh
echo "#!/bin/sh" >> $ACTIVATE_PATH/env_vars.sh
echo "export VAR_NAME=\"VAR_VALUE\"" >> $ACTIVATE_PATH/env_vars.sh

#create deactivate script
export DEACTIVATE_PATH=$CONDA_PREFIX/etc/conda/deactivate.d
mkdir -p $DEACTIVATE_PATH
touch $DEACTIVATE_PATH/env_vars.sh
echo "#!/bin/sh" >> $DEACTIVATE_PATH/env_vars.sh
echo "unset VAR_NAME" >> $DEACTIVATE_PATH/env_vars.sh
Let vSource='lib://LoB demos:DataFiles/';
Let vDestination= 'lib://LoB demos:DataFiles/';

let vStoreType='qvd';

[Parameters]:
LOAD * INLINE [
    original_file_name, target_file_name
    Employee_Master, A001_Employee Master
	Employee All Regions, A001_Employee All Regions
	Employee Retention Predictions_v3, A001_Employee Retention Predictions
];

FOR i = 0 TO NoOfRows('Parameters') - 1
LET vOriginalFileName = peek('original_file_name', $(i), 'Parameters');
LET vTargetFileName = peek('target_file_name', $(i), 'Parameters');


[$(vOriginalFileName)]: LOAD * from [$(vSource)$(vOriginalFileName)] ($(vStoreTypeSourceFile));
STORE [$(vOriginalFileName)] INTO [$(vDestination)$(vTargetFileName)] ($(vStoreTypeDestinationFile));
DROP TABLE [$(vOriginalFileName)];

NEXT i

exit Script
branchName=$(git branch --show-current)
baseURL="https://$branchName-bmc-org.pantheonsite.io/"
npx percy exec -- cypress run --config baseUrl=$baseURL
Let vSource='lib://LoB demos:DataFiles/';
Let vDestination= 'lib://LoB demos:DataFiles/';

let vExtensionSourceFile='.csv';
let vExtensionDestinationFile='.qvd';
let vStoreTypeSourceFile='txt';
let vStoreTypeDestinationFile='qvd';

[Parameters]:
LOAD * INLINE [
    original_file_name, target_file_name
    Employee_Master, A001_Employee Master
	Employee All Regions, A001_Employee All Regions
	Employee Retention Predictions_v3, A001_Employee Retention Predictions


];

FOR i = 0 TO NoOfRows('Parameters') - 1
LET vOriginalFileName = peek('original_file_name', $(i), 'Parameters');
LET vTargetFileName = peek('target_file_name', $(i), 'Parameters');


[$(vOriginalFileName)]: LOAD * from [$(vSource)$(vOriginalFileName)$(vExtensionSourceFile)] ($(vStoreTypeSourceFile));
STORE [$(vOriginalFileName)] INTO [$(vDestination)$(vTargetFileName)$(vExtensionDestinationFile)] ($(vStoreTypeDestinationFile));
DROP TABLE [$(vOriginalFileName)];

NEXT i

exit Script
// parameters is the table to iterate over
// it contains 
FOR i = 0 TO NoOfRows('Table_Name') - 1
// save field1 value into vField
LET vField = peek('Field1', $(i), 'Table_Name');
NEXT i

[Parameters]:
LOAD * INLINE [
    Table_name, File_name
    Countries, AR_Countries V1
    Invoice Item Detail,AR_Invoice Item Detail V1
    Product Lines, AR_Product Lines V1
    Invoices, AR_Invoices V1        
    Items,AR_Items V1
    Comments,AR_Comments V1
    DSO,AR_DSO V1
    Link Table,AR_Link Table V1
    Subsidiaries,AR_Subsidiaries V1
    ExchangeRates,AR_ExchangeRates V1
    Accountants,AR_Accountants V1
];
conda env remove --name corrupted_env
#!/usr/bin/expect
set timeout 60
spawn ssh [lindex $argv 1]@[lindex $argv 0]
expect "*?assword" {
    send "[lindex $argv 2]\r"
    }
expect ":~$ " {
    send "
        mkdir -p /home/tools/baeldung/auto-test;
        cd /home/tools/baeldung/auto-test;
        tree
        sshpass -p 'Baels@123' scp -r tools@10.45.67.11:/home/tools/cw/baeldung/get_host_info.sh ./;
        tree
        bash get_host_info.sh\r"
    }
expect ":~$ " {
    send "exit\r"
    }
expect eof
# sudo loop
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
## starts with ## for comments
## starts with # for commented packages
## optional inline comment after package name with #

## resource monitors
dstat
iotop
sysstat # includes iostat
htop
ncdu
s-tui
ranger
## ranger dep https://github.com/ranger/ranger
python3-chardet
caca-utils
imagemagick
ffmpegthumbnailer
bat
atool
## atool depends on these already
# unrar
# p7zip-full
# unzip
lynx
w3m
elinks
poppler-utils
mupdf-tools
calibre
transmission-cli
mediainfo
libimage-exiftool-perl
odt2txt
jq
fontforge-nox
glances
## glances dep https://github.com/nicolargo/glances
python3-psutil
python3-bottle
hddtemp
python3-netifaces
python3-cpuinfo
python3-pysnmp4
python3-pystache
python3-zeroconf

## system
caffeine
gnome-shell-extensions
gnome-tweak-tool

## shell
neofetch
exa
openssh-server
mosh
tmux
tree
xsel
zsh
curl
git
hub # github

## command line utils
opencc
texlive # https://tex.stackexchange.com/a/504566/73420
lilypond
gitit
graphviz

## filesystem
cifs-utils
samba
# sshfs
zfsutils-linux
cryptsetup # for manually unlock full disk encrypted drives

## programming
cmake
mpich
parallel

## font
fonts-cwtex-kai
fonts-linuxlibertine

## hardware
gsmartcontrol
idle3-tools # WD Green HDD config
lm-sensors
psensor
smartmontools
vainfo # video acceleration
acpi
f3
fancontrol
hardinfo
input-utils # for lsinput

## GUI
keepassxc
chromium-browser
# google-chrome-stable # in pop OS's repo. May need more steps on Ubuntu: https://linuxhint.com/install_google_chrome_ubuntu_ppa/
kitty

## Video
ffmpeg
libbluray-bdj
kodi
vlc
mkvtoolnix # mkvinfo
mpv

## network
nmap
iperf3
wakeonlan
ifenslave
ethtool

## for sanoid
debhelper
libcapture-tiny-perl
libconfig-inifiles-perl
pv
lzop
mbuffer
##install packages 
#step 1: see the version required clearly
#step 2: go to git hub and use: curl -Lo [name of the package] [link]
#step3: move the file to /usr/local/bin
#step4: check permission of the file using ls -al [file]

curl -Lo "deno.zip" "https://github.com/denoland/deno/releases/latest/download/deno-x86_64-unknown-linux-gnu.zip"

# Make file immutable
chattr +i filename

# Make file mutable
chattr -i filename
yarn add @babel/plugin-transform-exponentiation-operator --dev
npm install react-icons --save
function hex() {
    printf "%%%02x\n" "'$1"
}

hex -   # Outputs %2d
hex _   # Outputs %5f
hex .   # Outputs %2e
#!/bin/bash
if hash ntpdate 2>/dev/null; then
    ntpdate pool.ntp.org
else
    echo "'ntpdate' is not installed. Aborting..."; exit 1
fi
#!/bin/sh
set -e
 
echo "Deploying application ..."
 
# Enter maintenance mode
(php artisan down --message 'The app is being (quickly!) updated. Please try again in a minute.') || true
    # Update codebase
    git fetch origin deploy
    git reset --hard origin/deploy
 
    # Install dependencies based on lock file
    composer install --no-interaction --prefer-dist --optimize-autoloader
 
    # Migrate database
    php artisan migrate --force
 
    # Note: If you're using queue workers, this is the place to restart them.
    # ...
 
    # Clear cache
    php artisan optimize
 
    # Reload PHP to update opcache
    echo "" | sudo -S service php7.4-fpm reload
# Exit maintenance mode
php artisan up
 
echo "Application deployed!"
npx cap open ios #abrir o projecto no xcode

npx cap open android #abrir o projecto no android
<dict>
+  <key>NSCameraUsageDescription</key>
+  <string>To be able to scan barcodes</string>
</dict>
<?xml version="1.0" encoding="utf-8"?>
<manifest
  xmlns:android="http://schemas.android.com/apk/res/android"
+  xmlns:tools="http://schemas.android.com/tools" <-- adicionas esta linha não removendo nada e seguindo esta lógica

  package="com.example">

  <application
+    android:hardwareAccelerated="true" <-- adicionas esta linha não removendo nada e seguindo esta lógica
  >
  </application>

+  <uses-permission android:name="android.permission.CAMERA" /><-- adicionas esta linha não removendo nada e seguindo esta lógica

+  <uses-sdk tools:overrideLibrary="com.google.zxing.client.android" /><-- adicionas esta linha não removendo nada e seguindo esta lógica
</manifest>
ionic build --prod

#caso quiseres colocar a app para android dás o seguinte comando:
npm install @capacitor/android
npx cap add android

#caso quiseres colocar a app para iOS dás o seguinte comando:
npm install @capacitor/ios
npx cap add ios


#no final dão estes dois ultimos comandos

npx cap sync
npx cap copy Android ou ios #dependendo do qual escolheram
...

<ion-content class="scanner-hide" *ngIf="scanStatus == false">
  <div class="padding-container center">
    <ion-button color="primary" (click)="scanCode()"><ion-icon slot="start" name="qr-code-outline"></ion-icon> Scanear Código</ion-button> <!-- Botão que chama a função do scanner-->
  </div>
  <ion-card>
    <ion-card-content><h1>{{ result }}</h1></ion-card-content> <!-- mostra o resultado do scan -->
  </ion-card>
  
  <div class="scanner-ui"> <!-- Quando estamos a scanear, chama esta classe-->
    ...Scanner Interface
    </div>
    <div class="ad-spot"></div>
</ion-content>
...
import { BarcodeScanner } from '@capacitor-community/barcode-scanner';



...



export class HomePage {
  public scanStatus:boolean = false; // no inicio da página, coloca o estado do código qr para falso
  public result:any;

  constructor() {}


  async scanCode () {

    this.setPermissions(); /* chama as permissões à camera */
  
    BarcodeScanner.hideBackground(); // coloca o fundo transparente
    this.scanStatus = true; // ao mudar esta variável para true, estamos a puxar o qr code scanner 
    document.body.classList.add("qrscanner"); // adiciona a classe css que fizemos no global
    const result = await BarcodeScanner.startScan(); // começa a fazer scan e espera por um result
  
  // se o qr scanner detectou algum número, então ele faz o código abaixo
    if (result.hasContent) {

        
        this.scanStatus = false; //como é obvio, ele tem de desligar o scanner ao obter o resultado
        BarcodeScanner.stopScan(); //para o scan
        this.result = result.content; // passa o resultado para a variável global result
        BarcodeScanner.showBackground(); //volta a mostrar o fundo
        document.body.classList.remove("qrscanner"); //remove a classe css que criamos no global
    
    }
  }

  async setPermissions(){
    const status = await BarcodeScanner.checkPermission({ force: true }); /* força a permissão para true, caso o utilizador não aceite, o scanner não funciona */
    if (status.granted) {
      // the user granted permission
      return true; // se o utilizador aceitou as permissões retorna true
    }
  
      return false; // se o utilizador não aceitou retorna false
  }
}
.scanner-ui { display: none; }
.scanner-hide { visibility: visible; }

body.qrscanner { background-color: transparent; }
body.qrscanner .scanner-ui { display: block; }
body.qrscanner .scanner-hide { visibility: hidden; }
ionic start qrcode blank --type=ionic-angular
#variáveis

nome = "Meu Nome" #Sempre que colocas o valor entre "" significa que a variável é do tipo string
#variável do tipo string é um tipo de variável que usa texto como base

idade = "28" #variável do tipo string

x = 2
y = 5
#Sempre que defines um valor numérico sem "" significa que a variável passa a ser do tipo int
#variáveis do tipo int, só aceitam valores numéricos inteiros, não podendo colocar texto à mistura



#exemplos do que não podes fazer
!var@ = 1 
#não podes, aliás não consegues usar pontuação na definição das variáveis

total = x + idade 
#não podes juntar diferentes tipos de variáveis numa só
#ou seja não consegues somar a idade "28" que está no formato string, com uma do formato int
#que é específicamente um número, é como se aquele 28 fosse escrito assim "vinte e oito"





#resultados retornados
print(x+y) #vai retornar na tela o valor 7
- mkdir work_dir_company
- nano work_dir_company/.gitignore_company

```
[user]
        email = user@mail.com
        name = userName
[core]
        sshCommand = ssh -i ~/.ssh/id_ed25519_company
```

- nano ~/.gitconfig

```
[includeIf "gitdir:~/work_dir_company/"]
	path = ~/work_dir_company/.gitignore_company
[user]
        email = user@mail.com
        name = userName
[core]
        sshCommand = ssh -i ~/.ssh/id_ed25519_company
```

- Verify with `git config --list`
Settings | Tools | Python Integrated Tools | Docstring format
echo "$USER ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/dont-prompt-$USER-for-sudo-password"
#!/bin/bash

set -e

if [ -d ~/.local/share/JetBrains/Toolbox ]; then
    echo "JetBrains Toolbox is already installed!"
    exit 0
fi

echo "Start installation..."

wget --show-progress -qO ./toolbox.tar.gz "https://data.services.jetbrains.com/products/download?platform=linux&code=TBA"

TOOLBOX_TEMP_DIR=$(mktemp -d)

tar -C "$TOOLBOX_TEMP_DIR" -xf toolbox.tar.gz
rm ./toolbox.tar.gz

"$TOOLBOX_TEMP_DIR"/*/jetbrains-toolbox

rm -r "$TOOLBOX_TEMP_DIR"

echo "JetBrains Toolbox was successfully installed!"
# Reset
Color_Off='\033[0m'       # Text Reset
 
# Regular Colors
Black='\033[0;30m'        # Black
Red='\033[0;31m'          # Red
Green='\033[0;32m'        # Green
Yellow='\033[0;33m'       # Yellow
Blue='\033[0;34m'         # Blue
Purple='\033[0;35m'       # Purple
Cyan='\033[0;36m'         # Cyan
White='\033[0;37m'        # White
 
# Bold
BBlack='\033[1;30m'       # Black
BRed='\033[1;31m'         # Red
BGreen='\033[1;32m'       # Green
BYellow='\033[1;33m'      # Yellow
BBlue='\033[1;34m'        # Blue
BPurple='\033[1;35m'      # Purple
BCyan='\033[1;36m'        # Cyan
BWhite='\033[1;37m'       # White
 
# Underline
UBlack='\033[4;30m'       # Black
URed='\033[4;31m'         # Red
UGreen='\033[4;32m'       # Green
UYellow='\033[4;33m'      # Yellow
UBlue='\033[4;34m'        # Blue
UPurple='\033[4;35m'      # Purple
UCyan='\033[4;36m'        # Cyan
UWhite='\033[4;37m'       # White
 
# Background
On_Black='\033[40m'       # Black
On_Red='\033[41m'         # Red
On_Green='\033[42m'       # Green
On_Yellow='\033[43m'      # Yellow
On_Blue='\033[44m'        # Blue
On_Purple='\033[45m'      # Purple
On_Cyan='\033[46m'        # Cyan
On_White='\033[47m'       # White
 
# High Intensity
IBlack='\033[0;90m'       # Black
IRed='\033[0;91m'         # Red
IGreen='\033[0;92m'       # Green
IYellow='\033[0;93m'      # Yellow
IBlue='\033[0;94m'        # Blue
IPurple='\033[0;95m'      # Purple
ICyan='\033[0;96m'        # Cyan
IWhite='\033[0;97m'       # White
 
# Bold High Intensity
BIBlack='\033[1;90m'      # Black
BIRed='\033[1;91m'        # Red
BIGreen='\033[1;92m'      # Green
BIYellow='\033[1;93m'     # Yellow
BIBlue='\033[1;94m'       # Blue
BIPurple='\033[1;95m'     # Purple
BICyan='\033[1;96m'       # Cyan
BIWhite='\033[1;97m'      # White
 
# High Intensity backgrounds
On_IBlack='\033[0;100m'   # Black
On_IRed='\033[0;101m'     # Red
On_IGreen='\033[0;102m'   # Green
On_IYellow='\033[0;103m'  # Yellow
On_IBlue='\033[0;104m'    # Blue
On_IPurple='\033[0;105m'  # Purple
On_ICyan='\033[0;106m'    # Cyan
On_IWhite='\033[0;107m'   # White
sudo apt install rename
rename 's/$/\.parquet/' *
# Reset
Color_Off='\033[0m'       # Text Reset

# Regular Colors
Black='\033[0;30m'        # Black
Red='\033[0;31m'          # Red
Green='\033[0;32m'        # Green
Yellow='\033[0;33m'       # Yellow
Blue='\033[0;34m'         # Blue
Purple='\033[0;35m'       # Purple
Cyan='\033[0;36m'         # Cyan
White='\033[0;37m'        # White

# Bold
BBlack='\033[1;30m'       # Black
BRed='\033[1;31m'         # Red
BGreen='\033[1;32m'       # Green
BYellow='\033[1;33m'      # Yellow
BBlue='\033[1;34m'        # Blue
BPurple='\033[1;35m'      # Purple
BCyan='\033[1;36m'        # Cyan
BWhite='\033[1;37m'       # White

# Underline
UBlack='\033[4;30m'       # Black
URed='\033[4;31m'         # Red
UGreen='\033[4;32m'       # Green
UYellow='\033[4;33m'      # Yellow
UBlue='\033[4;34m'        # Blue
UPurple='\033[4;35m'      # Purple
UCyan='\033[4;36m'        # Cyan
UWhite='\033[4;37m'       # White

# Background
On_Black='\033[40m'       # Black
On_Red='\033[41m'         # Red
On_Green='\033[42m'       # Green
On_Yellow='\033[43m'      # Yellow
On_Blue='\033[44m'        # Blue
On_Purple='\033[45m'      # Purple
On_Cyan='\033[46m'        # Cyan
On_White='\033[47m'       # White

# High Intensity
IBlack='\033[0;90m'       # Black
IRed='\033[0;91m'         # Red
IGreen='\033[0;92m'       # Green
IYellow='\033[0;93m'      # Yellow
IBlue='\033[0;94m'        # Blue
IPurple='\033[0;95m'      # Purple
ICyan='\033[0;96m'        # Cyan
IWhite='\033[0;97m'       # White

# Bold High Intensity
BIBlack='\033[1;90m'      # Black
BIRed='\033[1;91m'        # Red
BIGreen='\033[1;92m'      # Green
BIYellow='\033[1;93m'     # Yellow
BIBlue='\033[1;94m'       # Blue
BIPurple='\033[1;95m'     # Purple
BICyan='\033[1;96m'       # Cyan
BIWhite='\033[1;97m'      # White

# High Intensity backgrounds
On_IBlack='\033[0;100m'   # Black
On_IRed='\033[0;101m'     # Red
On_IGreen='\033[0;102m'   # Green
On_IYellow='\033[0;103m'  # Yellow
On_IBlue='\033[0;104m'    # Blue
On_IPurple='\033[0;105m'  # Purple
On_ICyan='\033[0;106m'    # Cyan
On_IWhite='\033[0;107m'   # White
#We will activate the service, to activate synchronization between the computer and the servers on the internet:

sudo systemctl enable systemd-timesyncd.service
# We start the service :
timedatectl set-ntp true
a=(a b c); x=`echo ${!a[@]}` ;echo ${x: -1} # ArrayMaxIdxNo
Press CTRL + V to enable Visual Mode.

Using the up and down arrow key, highlight the lines you wish to comment out.

Once you have the lines selected, press the SHIFT + I keys to enter insert mode.

Enter your command symbol, for example, # sign, and press the ESC key. Vim will comment out all the highlighted lines.
sudo ufw allow 8000
sudo ufw delete allow 8000
sudo ufw allow 'Nginx Full'


sudo ufw status #kontrol
zsh -xl
zsh -xl | tee output.file
grep -r "VARIABLE" /* [directories to search] | tee ~/[output file and path] 
timezsh() {
  shell=${1-$SHELL}
  for i in $(seq 1 10); do /usr/bin/time $shell -i -c exit; done
}
$ mkdir pytest_project
$ cd pytest_project
$ python3 -m venv pytest-env
# Download
# Create a folder
$ mkdir actions-runner && cd actions-runner
# Download the latest runner package
$ curl -o actions-runner-linux-x64-2.291.1.tar.gz -L https://github.com/actions/runner/releases/download/v2.291.1/actions-runner-linux-x64-2.291.1.tar.gz# Optional: Validate the hash
$ echo "1bde3f2baf514adda5f8cf2ce531edd2f6be52ed84b9b6733bf43006d36dcd4c  actions-runner-linux-x64-2.291.1.tar.gz" | shasum -a 256 -c
# Extract the installer
$ tar xzf ./actions-runner-linux-x64-2.291.1.tar.gz
# Configure
# Create the runner and start the configuration experience
$ ./config.sh --url https://github.com/TousssaintThomas/wren.v1.0.0 --token AB7YEM2R2HZDVVBJ3VEFFLLCSI5U6# Last step, run it!
$ ./run.sh
# Using your self-hosted runner
# Use this YAML in your workflow file for each job
# runs-on: self-hosted
kubectl get pods <-n namespace> <--all-namespace> -o jsonpath="{.items[*].spec.containers[*].name}" |tr -s '[[:space:]]' '\n' |sort |wc -l

kubectl get pods <-n namespace> <--all-namespace> -o jsonpath="{.items[*].spec.initContainers[*].name}" |tr -s '[[:space:]]' '\n' |sort |wc -l
while IFS= read -r line; do
    echo "Text read from file: $line"
done < my_filename.txt
curl -v -X GET https://api-m.sandbox.paypal.com/v1/catalogs/products?page_size=2&page=1&total_required=true \
-H "Content-Type: application/json" \
-H "Authorization: Bearer Access-Token"
curl -v -X GET https://api-m.sandbox.paypal.com/v1/catalogs/products?page_size=2&page=1&total_required=true \
-H "Content-Type: application/json" \
-H "Authorization: Bearer Access-Token"
npx wrangler init my-worker
# try it out

cd my-worker && npx wrangler dev
# and then publish it
npx wrangler publish
# visit https://my-worker.<your workers subdomain>.workers.dev
$ npm uninstall -g @cloudflare/wrangler
npx @11ty/eleventy
npx wrangler init my-worker
# try it out

cd my-worker && npx wrangler dev
# and then publish it
npx wrangler publish
# visit https://my-worker.<your workers subdomain>.workers.dev
sudo apt update; sudo apt upgrade -y; sudo apt autoremove -y; clear;
npx wrangler init my-worker
# try it out

cd my-worker && npx wrangler dev
# and then publish it
npx wrangler publish
# visit https://my-worker.<your workers subdomain>.workers.dev
#!/bin/bash
find $1 -type f -exec stat --format '%Y :%y %n' "{}" \; | sort -nr | cut -d: -f2- | head
# in source repo

git checkout -b <new branch> <source branch>

git filter-branch --subdirectory-filter <sub-directory path> -- -- all

git remote add <remote name> <remote URL>
  
git fetch <remote name>
  
git push <-U> <new remote> <new branch>
  
# or merge unrelated history and then push

git merge <new remote> --allow-unrelated-histories
sudo to root
yum install tcpdump
tcpdump -s1500 -ieth0 -vv 'host <IPaddr>' 
i=$((i+1))

((i=i+1))

let "i=i+1"
echo ${SEMVER} | sed 's/\..*//'
if git show-ref --quiet <branch_name>; then
	echo branch exists
fi
// delete branch locally
git branch -d localBranchName

// delete branch remotely
git push origin --delete remoteBranchName
az role definition list --query "sort_by([].{Name:roleName,Id:name}, &Name)" --output table
# Raspberry Pi Tips & Tricks - https://raspberrytips.nl

import Adafruit_DHT

humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4)

humidity = round(humidity, 2)
temperature = round(temperature, 2)

if humidity is not None and temperature is not None:

  print 'Temperatuur: {0:0.1f}*C'.format(temperature)
  print 'Luchtvochtigheid: {0:0.1f}%'.format(humidity)

else:

  print 'Geen data ontvangen'
#!/bin/sh

curl -s https://status.slack.com/api/v2.0.0/current | \
  jq -r '"Status: " + (if (.status == "active") then "Active Incident" else "Ok" end),"Last Updated: " + .date_updated,if (.active_incidents[] | length) > 0 then "Active Incidents\n" + .active_incidents[] .title else "" end'
gcloud builds submit --pack image=us-central1-docker.pkg.dev/analytics-dev-308300/functions/talentcard-reports-to-landing-zone,env=GOOGLE_FUNCTION_TARGET=start
#!/bin/bash

set -eu -o pipefail # fail on error and report it, debug all lines

sudo -n true
test $? -eq 0 || exit 1 "you should have sudo privilege to run this script"

echo installing the must-have pre-requisites
while read -r p ; do sudo apt-get install -y $p ; done < <(cat << "EOF"
    perl
    zip unzip
    exuberant-ctags
    mutt
    libxml-atom-perl
    postgresql-9.6
    libdbd-pgsql
    curl
    wget
    libwww-curl-perl
EOF
)

echo installing the nice-to-have pre-requisites
echo you have 5 seconds to proceed ...
echo or
echo hit Ctrl+C to quit
echo -e "\n"
sleep 6

sudo apt-get install -y tig
ssh -i mykeypair_openssh.ppk <user>@<host ip>
  
# with port
ssh -i mykeypair_openssh.ppk <user>@<host ip> -P 50055
puttygen ~/.ssh/my.ppk -O private-openssh -o ~/.ssh/my_openssh.ppk
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"

mkdir /mnt
mount /dev/vda /mnt
chroot /mnt

touch /etc/cloud/cloud-init.disabled

echo 'root:root' | chpasswd

ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa
ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa
ssh-keygen -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519

cat <<EOF > /etc/netplan/01-dhcp.yaml 
network:
    version: 2
    ethernets:
        enp0s1:
            dhcp4: true
            addresses: [192.168.64.2/24]
            nameservers:
                addresses: [8.8.8.8, 8.8.4.4]    
EOF

exit
umount /dev/vda
curl -o initrd https://cloud-images.ubuntu.com/focal/current/unpacked/focal-server-cloudimg-arm64-initrd-generic
curl -o kernel.gz https://cloud-images.ubuntu.com/focal/current/unpacked/focal-server-cloudimg-arm64-vmlinuz-generic
gunzip kernel.gz
curl -o disk.tar.gz https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-arm64.tar.gz
youtube-dl --merge-output-format mp4 --postprocessor-args "-strict experimental" -f "bestvideo+bestaudio/best" --embed-thumbnail --add-metadata [url]
if [[ $var ]]; then   # var is set and it is not empty
if [[ ! $var ]]; then # var is not set or it is set to an empty string
wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.11.0-Linux-x86_64.sh
bash Miniconda3-py39_4.11.0-Linux-x86_64.sh
conda install mamba -n base -c conda-forge
from flask import Flask, jsonify, request

from cashman.model.expense import Expense, ExpenseSchema
from cashman.model.income import Income, IncomeSchema
from cashman.model.transaction_type import TransactionType

app = Flask(__name__)

transactions = [
  Income('Salary', 5000),
  Income('Dividends', 200),
  Expense('pizza', 50),
  Expense('Rock Concert', 100)
]


@app.route('/incomes')
def get_incomes():
  schema = IncomeSchema(many=True)
  incomes = schema.dump(
    filter(lambda t: t.type == TransactionType.INCOME, transactions)
  )
  return jsonify(incomes.data)


@app.route('/incomes', methods=['POST'])
def add_income():
  income = IncomeSchema().load(request.get_json())
  transactions.append(income.data)
  return "", 204


@app.route('/expenses')
def get_expenses():
  schema = ExpenseSchema(many=True)
  expenses = schema.dump(
      filter(lambda t: t.type == TransactionType.EXPENSE, transactions)
  )
  return jsonify(expenses.data)


@app.route('/expenses', methods=['POST'])
def add_expense():
  expense = ExpenseSchema().load(request.get_json())
  transactions.append(expense.data)
  return "", 204


if __name__ == "__main__":
    app.run()
# start the cashman application
./bootstrap.sh &

# get incomes
curl http://localhost:5000/incomes

# add new income
curl -X POST -H "Content-Type: application/json" -d '{
  "description": "lottery",
  "amount": 1000.0
}' http://localhost:5000/incomes

# check if lottery was added
curl localhost:5000/incomes
#!/bin/sh
export FLASK_APP=./cashman/index.py
source $(pipenv --venv)/bin/activate
flask run -h 0.0.0.0
https://maltego-downloads.s3.us-east-2.amazonaws.com/linux/Maltego.v4.3.0.linux.zip
82FCD54A-74bf-2CE93-41d6-A4389de1bd2
npx tsc --init --rootDir src --outDir build \
--esModuleInterop --resolveJsonModule --lib es6 \
--module commonjs --allowJs true --noImplicitAny true
# Zsh
1. zsh: https://github.com/ohmyzsh/ohmyzsh/wiki/Installing-ZSH
2. Oh My Zsh: https://github.com/ohmyzsh/ohmyzsh
3. Powerlevel10k: https://github.com/romkatv/powerlevel10k

	// Don't forget to set the theme and install the fonts
	https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Regular.ttf
	https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Bold.ttf
	https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Italic.ttf
	https://github.com/romkatv/powerlevel10k-media/raw/master/MesloLGS%20NF%20Bold%20Italic.ttf

	// Execute this to configure the theme
	p10k configure

# Plugins:
// One Dark colour theme
  https://github.com/one-dark/iterm-one-dark-theme

// Look at history of previous commands used:-
  https://github.com/junegunn/fzf

// Command line auto suggestions:-
  https://github.com/zsh-users/zsh-autosuggestions
  
// Quick switch to directories
  https://github.com/agkozak/zsh-z
  
// Found these on this blog post https://udaraw.com/iterm-plugins
git remote set-url origin <new-url>
git push --set-upstream origin <branch-name>
pip freeze > requirements.txt
adduser USER_NAME
usermod -aG sudo USER_NAME

# Verify new user
grep '^sudo' /etc/group
npx create-html5-boilerplate new-site
#!/usr/bin/env bash

# install ZSH
sudo apt -y install zsh

# Install oh-my-zsh
git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh

# Install some external plugins:
git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugings/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-completions ~/.oh-my-zsh/custom/plugins/zsh-completions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting

# Set Zsh as your default shell:
chsh -s /bin/zsh
#!/bin/bash

curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh

source $HOME/.cargo/env
#!/usr/bin/env bash

# Update the list of packages
sudo apt-get update

# Install pre-requisite packages.
sudo apt-get install -y wget apt-transport-https software-properties-common

# Download the Microsoft repository GPG keys
wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb

# Register the Microsoft repository GPG keys
sudo dpkg -i packages-microsoft-prod.deb

# Update the list of packages after we added packages.microsoft.com
sudo apt-get update

# Install PowerShell
sudo apt-get install -y powershell

# Start PowerShell
pwsh
© 2022 GitHub, Inc.
#!/usr/bin/env bash

sudo apt-get -y update

# set config
XDG_CONFIG_HOME = ~/.config

# install nvm
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash

export NVM_DIR="$XDG_CONFIG_HOME/nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"  # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"  # This loads nvm bash_completion

# config
nvm alias default node # Always default to the latest available node version on a shell
nvm set-colors "yMeBg"

# install latest node and npm
nvm install node --latest-npm
nvm install-latest-npm

nvm use node
npm install -g npm

# setup npm
npm login
npm install -g eslint jshint prettier yarn npm-check doctoc tldr speedtest-cli serve
#!/bin/bash

wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin
sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget https://developer.download.nvidia.com/compute/cuda/11.5.1/local_installers/cuda-repo-wsl-ubuntu-11-5-local_11.5.1-1_amd64.deb
sudo dpkg -i cuda-repo-wsl-ubuntu-11-5-local_11.5.1-1_amd64.deb
sudo apt-key add /var/cuda-repo-wsl-ubuntu-11-5-local/7fa2af80.pub
sudo apt-get -y update 
sudo apt-get -y install cuda
#!/bin/bash

curl -fsSL https://deb.nodesource.com/setup_17.x | sudo -E bash -
sudo apt-get install -y nodejs
#!/usr/bin/env sh

sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y autoremove
sudo apt-get -y install neovim
sudo apt-get install python-neovim python3-neovim

mkdir -p ~/.dotfiles/neovim/.config/nvim
touch ~/.dotfiles/neovim/.config/nvim/init.vim

echo 'set runtimepath^=/.vim runtimepath+=~./vim/after\nlet &packpath = &runtimepath\nsource ~/.vimrc' >> ~/.dotfiles/neovim/.config/nvim/init.vim
cd .dotfiles
stow neovim
cd ~
!#/usr/bin/env bash

# update and ensure build-essentials/git
sudo apt update
sudo apt-get install build-essential curl file git

# install homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"

# add to PATH
echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"' >> /home/jimbrig/.profile
eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
source ~/.profile

# test
test -d /home/linuxbrew/.linuxbrew && eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
test -r ~/.bash_profile && echo "eval \$($(brew --prefix)/bin/brew shellenv)" >>~/.bash_profile
echo "eval \$($(brew --prefix)/bin/brew shellenv)" >>~/.profile

brew doctor

# gcc installation
brew install gcc

# initial installations
brew install topgrade git-crypt git-cliff
#!/bin/bash

wget https://release.gitkraken.com/linux/gitkraken-amd64.deb
sudo dpkg -i ./gitkraken-amd64.deb
sudo apt-get install -f
gitkraken
#!/usr/bin/env bash

# install github-cli
VERSION=`curl  "https://api.github.com/repos/cli/cli/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/' | cut -c2-`
echo $VERSION
mkdir ~/downloads
curl -sSL https://github.com/cli/cli/releases/download/v${VERSION}/gh_${VERSION}_linux_amd64.tar.gz -o ~/downloads/gh_${VERSION}_linux_amd64.tar.gz
cd ~/downloads
tar xvf gh_${VERSION}_linux_amd64.tar.gz
sudo cp gh_${VERSION}_linux_amd64/bin/gh /usr/local/bin/
gh version
sudo cp -r ~/downloads/gh_${VERSION}_linux_amd64/share/man/man1/* /usr/share/man/man1/
# man gh
gh auth login

rm -r ~/downloads
#!/usr/bin/env bash

# install cargo
sudo apt-get update -y
sudo apt-get install -y cargo

echo '\n# Add .cargo to $PATH\nexport PATH="~/.cargo/bin:$PATH"\n' >> ~/.zshrc

cargo install cargo-update
#!/usr/bin/env bash

curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az --version
az login
#!/usr/bin/env bash

# Install R on WSL
sudo apt-get update -qq -y
sudo apt-get install -y wget git
OS_DISTRIBUTION=$(lsb_release -cs)
wget -O- http://neuro.debian.net/lists/${OS_DISTRIBUTION}.us-nh.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
sudo apt-key adv --recv-keys --keyserver hkp://pool.sks-keyservers.net:80 0xA5D32F012649A5A9
sudo apt-get update

sudo apt-get install libopenblas-base r-base
sudo apt-get update -qq -y
sudo apt-get install -y libgit2-dev
sudo apt-get install -y libcurl4-openssl-dev libssl-dev
sudo apt-get install -y zlib1g-dev libssh2-1-dev libpq-dev libxml2-dev
curl -X POST -H "Content-Type: application/json" -H "x-api-key: YOUR_API_KEY" -d "{ 'name': 'Homer Simpson', 'email': 'homer@gmail.com', 'customer_id': '1234' }" https://api.trysend.com/customers
dmesg | grep "Intel Corporation" -A 1 | grep SerialNumber
#!/bin/bash
File="domainList.txt"
while IFS= read -r line
do
  cat sample.yml | sed 's/foo.com/'$line'/g' > /path_to_dir/dir_name/$line.yml

echo "domain: $line"
done < "$File"
#!/usr/bin/env bash
#
# Returns Magento document root if sourced or prints if launched as standalone
#

mageRoot="$(dirname $(realpath "${0}"))"

while [[ ! -e ${mageRoot}/app/Mage.php && ! -e ${mageRoot}/bin/magento ]]
do
    mageRoot="$(dirname $(realpath ${mageRoot}))"
done

return "${mageRoot}" 2>/dev/null || printf '%s' "${mageRoot}" && exit
sudo apt-get -y install xfce4 && sudo apt-get -y install xubuntu-desktop

sudo apt-get -y install xrdp

echo xfce4-session > ~/.xsession

sudo service xrdp restart

ifconfig | grep inet

<i>Then connect to the IP the is returned by the last command</i>

<img src="https://adamtheautomator.com/wp-content/uploads/2019/09/windows-subsystem-linux-gui.png"></img>
LOG=nightly-`date '+%Y-%m-%d_%H:%M:%S'`.log

rsync -av --delete --exclude '$RECYCLE.BIN' --exclude 'System\ Volume\ Information' --exclude 'found.000' --exclude 'Recovery' $SOURCE $DESTINATION | tee ~/logs/$LOG
wsl --shutdown
diskpart
# open window Diskpart
select vdisk file="C:\WSL-Distros\…\ext4.vhdx"
attach vdisk readonly
compact vdisk
detach vdisk
exit
# Stopping Zigbee2MQTT
sudo systemctl stop zigbee2mqtt

# Starting Zigbee2MQTT
sudo systemctl start zigbee2mqtt

# View the log of Zigbee2MQTT
sudo journalctl -u zigbee2mqtt.service -f
function show_colors() {
  color=16;
  
  while [ $color -lt 245 ]; do
    echo -e "$color: \\033[38;5;${color}mhello\\033[48;5;${color}mworld\\033[0m"
	((color++));
  done  
}
function colorgrid() {
    iter=16
    while [ $iter -lt 52 ]
    do
        second=$[$iter+36]
        third=$[$second+36]
        four=$[$third+36]
        five=$[$four+36]
        six=$[$five+36]
        seven=$[$six+36]
        if [ $seven -gt 250 ];then seven=$[$seven-251]; fi

        echo -en "\033[38;5;$(echo $iter)m█ "
        printf "%03d" $iter
        echo -en "   \033[38;5;$(echo $second)m█ "
        printf "%03d" $second
        echo -en "   \033[38;5;$(echo $third)m█ "
        printf "%03d" $third
        echo -en "   \033[38;5;$(echo $four)m█ "
        printf "%03d" $four
        echo -en "   \033[38;5;$(echo $five)m█ "
        printf "%03d" $five
        echo -en "   \033[38;5;$(echo $six)m█ "
        printf "%03d" $six
        echo -en "   \033[38;5;$(echo $seven)m█ "
        printf "%03d" $seven

        iter=$[$iter+1]
        printf '\r\n'
    done
}
git stash                       # skip if all changes are committed
git branch my_feature
git reset --hard origin/master
git checkout my_feature
git stash pop                   # skip if all changes were committed
<a class="twitter-share-button"
  href="https://twitter.com/intent/tweet"
  data-size="large"
  data-text="custom share text"
  data-url="https://dev.twitter.com/web/tweet-button"
  data-hashtags="example,demo"
  data-via="twitterdev"
  data-related="twitterapi,twitter">
Tweet
</a>
echo n > /sys/class/backlight/rpi_backlight/brightness
sudo bash -c "echo 0 > /sys/class/backlight/rpi_backlight/brightness" # if permission denied in above line
#### FIRST ####
sudo nano ~/.bash_profile

#Add this in your .bash_profile
if [ -r ~/.bashrc ]; then
   source ~/.bashrc
fi

#### SECOND ####
sudo nano ~/.bashrc
    
#Add this in your .bashrc
alias sail='bash vendor/bin/sail'
alias composer="/Users/username/composer.phar"
    
#!/bin/bash
# Bash Menu Script Example

PS3='Please enter your choice: '
options=("Option 1" "Option 2" "Option 3" "Quit")
select opt in "${options[@]}"
do
    case $opt in
        "Option 1")
            echo "you chose choice 1"
            ;;
        "Option 2")
            echo "you chose choice 2"
            ;;
        "Option 3")
            echo "you chose choice $REPLY which is $opt"
            ;;
        "Quit")
            break
            ;;
        *) echo "invalid option $REPLY";;
    esac
done
@echo off

SET %DOCKER%=”docker”

SET CONTAINER_NAME=”linux_sandbox”
SET IMAGE_TO_USE=”centos:latest”
SET IMAGE_TO_USE_SANDBOX=”sandbox:%CONTAINER_NAME%”

echo Container – %CONTAINER_NAME%
echo Select an Option to Continue:
echo [0] – Container – Create
echo [1] – Container – Start
echo [2] – Container – Stop
echo [3] – Container – Terminal
echo [4] – Container – Destroy

set /p CHOICE=”Enter Selection: ”

IF “%CHOICE%” == “0” (
%DOCKER% pull “%IMAGE_TO_USE%”

start /MIN “” “%DOCKER%” run -it –privileged –name %CONTAINER_NAME% %IMAGE_TO_USE% bash

timeout 10

%DOCKER% exec -it %CONTAINER_NAME% bash -c “yum -y update;yum clean all”
%DOCKER% exec -it %CONTAINER_NAME% bash -c “yum -y install openssh-server passwd; yum clean all”
%DOCKER% exec -it %CONTAINER_NAME% bash -c “mkdir /var/run/sshd”
%DOCKER% exec -it %CONTAINER_NAME% bash -c “ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ””
%DOCKER% exec -it %CONTAINER_NAME% bash -c “echo ‘root:password’ | chpasswd”

%DOCKER% commit %CONTAINER_NAME% %IMAGE_TO_USE_SANDBOX%
%DOCKER% stop %CONTAINER_NAME%
%DOCKER% rm %CONTAINER_NAME%

%DOCKER% run -d –privileged –name %CONTAINER_NAME% -p “22:22” %IMAGE_TO_USE_SANDBOX% /usr/sbin/sshd -D
)

IF “%CHOICE%” == “1” (
%DOCKER% start %CONTAINER_NAME%
)

IF “%CHOICE%” == “2” (
%DOCKER% stop %CONTAINER_NAME%
)

IF “%CHOICE%” == “3” (
%DOCKER% exec -it %CONTAINER_NAME% /bin/bash
)

IF “%CHOICE%” == “4” (
%DOCKER% stop %CONTAINER_NAME%
%DOCKER% rm %CONTAINER_NAME%
%DOCKER% rmi %IMAGE_TO_USE_SANDBOX%
)

pause
2 steps:
1/ Ctrl+ W
2/ Ctrl+ V
# WSL2 network port forwarding script v1
#   for enable script, 'Set-ExecutionPolicy -ExecutionPolicy Bypass -Scope CurrentUser' in Powershell,
#   for delete exist rules and ports use 'delete' as parameter, for show ports use 'list' as parameter.
#   written by Daehyuk Ahn, Aug-1-2020

# Display all portproxy information
If ($Args[0] -eq "list") {
    netsh interface portproxy show v4tov4;
    exit;
} 

# If elevation needed, start new process
If (-NOT ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole]::Administrator))
{
  # Relaunch as an elevated process:
  Start-Process powershell.exe "-File",('"{0}"' -f $MyInvocation.MyCommand.Path),"$Args runas" -Verb RunAs
  exit
}

# You should modify '$Ports' for your applications 
$Ports = (22,80,443,8080)

# Check WSL ip address
wsl hostname -I | Set-Variable -Name "WSL"
$found = $WSL -match '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}';
if (-not $found) {
  echo "WSL2 cannot be found. Terminate script.";
  exit;
}

# Remove and Create NetFireWallRule
Remove-NetFireWallRule -DisplayName 'WSL 2 Firewall Unlock';
if ($Args[0] -ne "delete") {
  New-NetFireWallRule -DisplayName 'WSL 2 Firewall Unlock' -Direction Outbound -LocalPort $Ports -Action Allow -Protocol TCP;
  New-NetFireWallRule -DisplayName 'WSL 2 Firewall Unlock' -Direction Inbound -LocalPort $Ports -Action Allow -Protocol TCP;
}

# Add each port into portproxy
$Addr = "0.0.0.0"
Foreach ($Port in $Ports) {
    iex "netsh interface portproxy delete v4tov4 listenaddress=$Addr listenport=$Port | Out-Null";
    if ($Args[0] -ne "delete") {
        iex "netsh interface portproxy add v4tov4 listenaddress=$Addr listenport=$Port connectaddress=$WSL connectport=$Port | Out-Null";
    }
}

# Display all portproxy information
netsh interface portproxy show v4tov4;

# Give user to chance to see above list when relaunched start
If ($Args[0] -eq "runas" -Or $Args[1] -eq "runas") {
  Write-Host -NoNewLine 'Press any key to close! ';
  $null = $Host.UI.RawUI.ReadKey('NoEcho,IncludeKeyDown');
}
ssh-keygen
cat ~/.ssh/id_rsa.pub
git checkout receiving-branch
git merge --squash branch-to-squash
git commit --amend 'commit message'
git push
git stash --include-untracked # stashing with untracked files
git stash push -m "stash name" # name stash
git stash list # list stashes
git stash pop stash@{n} # pop stash
python3 manage.py runserver
heroku run python manage.py db upgrade --app name_of_your_application # upgrade db to heroku
heroku local # run locally on heroku and debug
ocrmypdf “$1” “$1”
$ find . -name './test' -prune -o -name 'file_*' -print
docker run -it -v /var/run/docker.sock:/var/run/docker.sock --volume=/Users/steve/dev/my-project:/my-project --workdir="/node-api" --memory=2g --memory-swap=2g --memory-swappiness=0 --entrypoint=/bin/bash node:9.3.0



set -x
VER="17.12.0-ce"
curl -L -o /tmp/docker-$VER.tgz https://download.docker.com/linux/static/stable/x86_64/docker-$VER.tgz
tar -xz -C /tmp -f /tmp/docker-$VER.tgz
mv /tmp/docker/* /usr/bin
for f in * ; do mv -- "$f" "my-prefix-$f" ; done
Normal Version: 
echo 'nice12343game' | sed -n 's/nice\(.*\)game/\1/p'

Jenkins Version:
sed -n 's/.*exited with code \\(.*\\)/\\1/p' stdout
find . -iname '*.jpg' -exec convert \{} -verbose -sampling-factor 4:2:0 -strip -quality 85\> \{} \;
find . -iname '*.jpg' -exec convert \{} -verbose -resize 400x400\> \{} \;
$fire.auth().createUserWithEmailAndPassword('email', 'password')
/*
Accenture - 573.0, ACN 
Microsoft -  20601.0, MSFT 
SalesForce - 69444.0, CRM 
Apple - 2355.0, AAPL 
Alphabet - 116351.0, GOOGL 
Amazon - 1703.0, AMZN 
Tesla - 1650150.0, TSLA 
Facebook- 1820060.0, FB 
Adobe - 808.0, ADBE 
Oracle - 23295.0, ORCL 
*/

parm = {'boardids': (573.0, 20601.0, 69444.0, 2355.0, 116351.0, 1703.0, 1650150.0, 1820060.0, 808.0, 23295.0)}

//query
comp_net = conn.raw_sql('SELECT * FROM boardex.na_wrds_company_networks WHERE boardid in %(boardids)s limit 100000', params=parm)
import re
bods = conn.get_table('boardex','na_wrds_company_names', columns=['boardid', 'boardname','ticker'])
acn_ser = bods['boardname'].str.contains('accenture', regex=True)
count = 0
for i in acn_ser:
  if i is True:
    count += 1
print(count)
ssh suibhne@wrds-cloud.wharton.upenn.edu
CcSgD.V:grKp7Ct
scp -r <username>@<host>:/chemin/source/fichier /home/user/Desktop/local
Get-Content ~/.ssh/id_rsa.pub | Set-Clipboard
defaults write com.apple.finder AppleShowAllFiles TRUE
killall Finder
defaults write com.apple.dashboard mcx-disabled -boolean YES
killall Dock
defaults write com.apple.screencapture location <location>
killall SystemUIServer
kill -9 $(lsof -ti tcp:8080)
# folder: .idea
git rm --cached -r .idea
# file: myfile.log
git rm --cached myfile.log
rm -r Directory/
rm -rf Directory/ (Fill ignore file permissions)
rm index.html app.js
sudo find / -name "libgsl.so.0"
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<Location>
export LD_LIBRARY_PATH
samtools view -b -F 4 file.bam > mapped.bam
seqkit head -n 100000 input.fa
seqkit range -r 1:100000 input.fa
samtools view -b -f 4 sample.bam > sample.unmapped.bam
if [ "$#" -ne 1 ]
then
  echo "Usage: ..."
  exit 1
fi
#!/bin/bash
# Bash Menu Script Example

PS3='Please enter your choice: '
options=("Option 1" "Option 2" "Option 3" "Quit")
select opt in "${options[@]}"
do
    case $opt in
        "Option 1")
            echo "you chose choice 1"
            ;;
        "Option 2")
            echo "you chose choice 2"
            ;;
        "Option 3")
            echo "you chose choice $REPLY which is $opt"
            ;;
        "Quit")
            break
            ;;
        *) echo "invalid option $REPLY";;
    esac
done
# List all networks a container belongs to
docker inspect -f '{{range $key, $value := .NetworkSettings.Networks}}{{$key}} {{end}}' [container]
# List all containers belonging to a network by name
docker network inspect -f '{{range .Containers}}{{.Name}} {{end}}' [network]
curl -fsSL https://deb.nodesource.com/setup_current.x | sudo -E bash -
sudo apt-get install -y nodejs
use Drupal\Core\DrupalKernel;
use Symfony\Component\HttpFoundation\Request;

if (PHP_SAPI !== 'cli') {
  return;
}

if (version_compare(PHP_VERSION, '5.4.5') < 0) {
  $version = PHP_VERSION;
  echo <<<EOF

ERROR: This script requires at least PHP version 5.4.5. You invoked it with
       PHP version {$version}.
\n
EOF;
  exit;
Esc # exit insert mode
:wq | :x # write and quit
gunzip -c /Users/sethshapiro/Downloads/20210305-KochavaTransactions000000000000.gz | awk 'NR==1 {print; exit}'
#!/bin/bash

if [ ! -f composer.json ]; then
    echo "Please make sure to run this script from the root directory of this repo."
    exit 1
fi

composer install
cp .env.example .env
php artisan key:generate
source "$(dirname "$0")/checkout_latest_docs.sh"
npm install
npm run dev
/home/rezaeir/canu-2.1.1/bin/canu -p lambda -d lambda-assembly genomeSize=50000 -nanopore-raw lambda_subsample.fastq.gz
rasusa --input control_lambda_7_2_2021.fastq.gz --coverage 30 --genomes-size 50000 --output lambda_subsample.fastq.gz
cat *.fastq.gz > control_lambda_7_2_2021.fastq.gz
./guppy_basecaller --compress_fastq -i <input dir> -s <output dir> --flowcell FLO-MIN106 --kit SQK-LSK109 -x "cuda:0" --gpu_runners_per_device 4 --num_callers 4 --chunks_per_runner 2048 --barcode_kits "EXP-NBD104" --trim_barcodes
for file in *
do
file=${file//.bam/}
samtools sort -@ 14 $file.bam > $file.sorted.bam
samtools index -@ 14 -b $file.sorted.bam
samtools idxstats -@ 14 $file.sorted.bam > $file.idxstats.txt
done
#!/usr/bin/env python3
import sys
import subprocess

with open(sys.argv[1]) as f:
        for line in f:
                old_name = line.strip()
                new_name = old_name.replace("jane","jdoe")
                subprocess.run(["mv",old_name,new_name])
        f.close()
#!/bin/bash

>oldFiles.txt
files=$(grep " jane " ../data/list.txt | cut -d ' ' -f 3)
for file in $files; do
        if [ -e $HOME$file ]; then
        echo $HOME$file >> oldFiles.txt;
        fi
done
#!/bin/bash

for logfile in /var/log/*log; do
	echo "Processing: $logfile"
    # the following line splits the logfile entry line with an empty space as delimiter and then keeps only everything from field five onwards
    cut -d" " -f5- $logfile | sort | uniq -c | sort -nr | head -5
done
#!/bin/bash

for file in *.HTM; do
  name=$(basename "$file" .HTM) # surround the filename with double quotes to account for file names with spaces
  echo mv "$file" "$name.html" # put echo in front when testing to see what the program would do without actually changing anything yet
done
#!bin/bash

for fruit in peach orange apple; do # we represent a list in Bash by simply listing valkues/variables with spaces in between
  	echo "I like $fruit!"
done
#!/bin/bash

n=1 # in Bash, there are no spaces allowed when declaring variables
while [ $n -le 5]; do # the "[ condition ]"-syntax is equivalent to the "test" command
  echo "Iteration number $n"
  ((n+=1)) # in Bash, we use double parentheses to perform arithmetic operations on variables
done
#!/bin/bash

echo "Starting at: $(date)" # the dollar sign tells the program to execute this file inside of the string and then convert the output of it into a string
echo # print empty line

# add separating line
line="-----------------------------------"

echo "UPTIME"
uptime
echo $line

echo "FREE"
free
echo $line

echo "WHO"; echo; free # you can write commands on the same line separating them by semicolons

echo "Finishing at: $(date)"
# print message
echo "Test!"

# create new directory
mkdir new_directory

# change directory
cd new_director

# print current working directory
pwd

# copy file
cp ../spider.txt .

# create empty file
touch myfile.txt

# list files and directories in current directory (with additional information using -l, including hidden files -a)
ls
ls -l
ls- la

# combine ls via a pipe to the less command to show only so many entries at a time
ls -l | less # you can quit with "q"

# rename a file
mv my.file.txt emptyfile.txt

# delete a file
rm *
  
# delete an empty directory
rmdir new_directory/
  
# list all running process on the computer
ps-ax
ps-ax | grep ping # filter process names through grep

# kill a process
kill 4619 # where 4619 is the process ID (PID)
ffmpeg -i input.mp4 -vcodec libx265 -crf 28 output.mp4 
ffmpeg -i input.mkv -codec copy output.mp4
$ find /home/sk/ostechnix/ -type f -printf '%T+ %p\n' | sort | head -n 1
sudo service postgresql status # check db status
sudo service postgresql start # start running db
sudo service postgresql stop # stop running db
ffmpeg -ss 6.0 -t 70.0 -i /path/to/file.mp4 -filter_complex "[0:v] fps=12,scale=w=640:h=-1,setpts=0.5*PTS,split [a][b];[a] palettegen [p];[b][p] paletteuse" /path/to/output.gif
#!/bin/bash
#PBS -l nodes=1:ppn=16,walltime=0:00:59
#PBS -l mem=62000mb
#PBS -m abe

bar=${foo}
echo "${bar}"


qsub -v foo='qux' myRunScript.sh

git push heroku master
heroku addons:create heroku-postgresql:hobby-dev # create postgres db in heroku app
heroku run python # run python repl with heroku
from app import db
db.create_all()
sudo curl https://raw.githubusercontent.com/TheRemote/PiBenchmarks/master/Storage.sh | sudo bash
#!/usr/bin/env bash

set -e

# Dotfiles' project root directory
ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Host file location
HOSTS="$ROOTDIR/hosts"
# Main playbook
PLAYBOOK="$ROOTDIR/dotfiles.yml"

# Installs ansible
apt-get update && apt-get install -y ansible

# Runs Ansible playbook using our user.
ansible-playbook -i "$HOSTS" "$PLAYBOOK" --ask-become-pass

exit 0
# login/ssh to the machine that should host the webserver, then run:
> frecklecute hello-world.frecklet --domain example.com

# or, install the remote target machine from your local session:
> frecklecute --target admin@example.com \
      hello-world.frecklet --domain example.com
# configuration, save as: hello-world.frecklet

- static-website-from-folder:
    hostname: "{{:: domain ::}}"     # vhost config
    path: /var/www/html
    webserver: apache
    use_https: true
    server_admin: hello@example.com  # for let's Encrypt
- file-with-content:
    owner: www-data
    path: /var/www/html/index.html
    content: |
      <h1><i>{{:: domain ::}}</i> says "hello", World!</h1>
pip3 install --trusted-host pypi.org --trusted-host files.pythonhosted.org flask-wtf
sudo mkdir /mnt/z
sudo mount -t drvfs Z: /mnt/z
git branch --merged | egrep -v "(^\*|release|dev)" | xargs git branch -d
# Set URL for your scoped packages.
# For example package with name `@foo/bar` will use this URL for download
npm config set @my-org:registry https://private-gitlab.com/api/v4/projects/<your_project_id>/packages/npm/

# Add the token for the scoped packages URL. Replace <your_project_id>
# with the project where your package is located.
npm config set '//private-gitlab.com/api/v4/projects/<your_project_id>/packages/npm/:_authToken' "<your_token>"
defaults write com.apple.dt.Xcode IDEAdditionalCounterpartSuffixes -array-add "Router" "Interactor" "Builder" && killall Xcode
defaults write com.apple.dt.Xcode IDEAdditionalCounterpartSuffixes -array-add "ViewModel" "View" && killall Xcode
defaults write com.apple.screencapture include-date -bool true
wget https://raw.githubusercontent.com/composer/getcomposer.org/76a7060ccb93902cd7576b67264ad91c8a2700e2/web/installer -O - -q | php -- --quiet
sudo nano /etc/default/grub

# There is a line in that: GRUB_CMDLINE_LINUX_DEFAULT="quiet splash" (like this), replace with: GRUB_CMDLINE_LINUX_DEFAULT="quiet splash intel_idle.max_cstate=1"
# Save it (CTRL+O)

sudo update-grub
sudo reboot
firebase functions:config:set stripe.secret="STRIPE_SECRET_KEY_HERE"
// BASH
mkdir src
mkdir build
touch src/index.ts
touch .gitignore
touch README.md
tsc -y
npm init -y
npm install nodemon concurrently @types/express --save-dev

// package.json
...
"scripts": {
  "start:build": "tsc -w",
  "start:run": "nodemon ./build/index.js",
  "start": "concurrently npm:start:*"
},
...

// tsconfig.json
...
"outDir": "./build",
"rootDir": "./src",
...

// .gitignore
node_modules
*.env

// README.md
### Start
```bash
npm run start
```

// src/index.ts
import express from 'express'
const port = 3000
const app = express()

console.log("Hello, World!!!")

logSomething("This is a string that I'm logging")

app.listen(port, () => {
  console.log(`Listening on port ${port}`)
})
touch index.ts
mkdir src
mv index.ts ./src
//Error

PHP Fatal error: Uncaught exception 'ErrorException' with message 'proc_open(): fork failed - Cannot allocate memory' in phar

//Fix

/bin/dd if=/dev/zero of=/var/swap.1 bs=1M count=1024
/sbin/mkswap /var/swap.1
/bin/chmod 0600 /var/swap.1
/sbin/swapon /var/swap.1
hello@nobo-prod-server:~$ sudo lsof -i:8082
COMMAND    PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
node\x20/ 2993 root   20u  IPv6  30271      0t0  TCP *:8082 (LISTEN)
hello@nobo-prod-server:~$ ps 2993
  PID TTY      STAT   TIME COMMAND
 2993 ?        Ssl   28:43 node /home/yasas/nobo-angular/server.js
hello@nobo-prod-server:~$
Restricted : aucun script ne peut être exécuté.

AllSigned : seuls les scripts signés peuvent être exécutés.

RemoteSigned : les scripts téléchargés depuis Internet doivent être signés pour être exécutés. 
	Les scripts présents sur votre poste de travail ne sont pas concernés et peuvent être exécutés.

Unrestricted : pas de restrictions. Les scripts peuvent être exécutés.

connaitre le mode d'execution
	Get-ExecutionPolicy
   
changer le mode d'execution
	Set-ExecutionPolicy <mode>

cree un utilisateur 
	New-ADUser 
		-Name <login> 
		-SamAccountName <login> 
		-UserPrincipaleName <mail> 
		-AccountPassword (ConvertTo-SecureString -AsPlainText <mdp> -Force ) 
		-PasswordNeverExpires $true
		-CannotChangePassword $true

activer Utilisateur
	Enable-ADAccount <samaccountname>
	Desable-ADAccount <samaccountname>

lancer script 
	powershell .\PATH

rechercher des utilisateur
	Get-ADUser -Filter *
	Get-ADUser -Filter * | select samAccountName, Name, UserPrincipalName | Export-Csv UserAdUTF8.csv -Encoding UTF8


cree un groupe 
	New-ADGroup $groupe -GroupScope Global

inserer des utilisateur dans un groupe
	Add-ADGroupMember -identity $groupe -Members <samAccountName>

recherche groupe
	Get-ADGroup -Filter *

afficher les utilisateur dans un groupe
	 Get-ADGroupMember $group | Select-Object name  | Export-Csv SEC.csv -Encoding UTF8

ide for powershell
	Notepad.exe	
git rev-parse --show-toplevel

could be enough if executed within a git repo.
From git rev-parse man page:

--show-toplevel
# WARNING: This will LOCK ALL PUBLIC REPOSITORIES ON YOUR GITHUB ACCOUNT
#
# dependencies hub and jq
# - hub: hub.github.com
# - jq: https://stedolan.github.io/jq/
#
# A better alternative would be to pipe the repos into a temporary file:
# $ hub api --paginate users/amingilani/repos | jq -r '.[]."full_name"' > repos.txt
# Then manually remove your active repositories
# And archive the remaining:
# $ cat repos.txt | xargs -I {} -n 1 hub api -X PATCH -F archived=true /repos/{}
#
# Anyways, to archive all public repositories in your GitHub account:
#
hub api --paginate users/amingilani/repos | jq -r '.[]."full_name"' | xargs -I {} -n 1 hub api -X PATCH -F archived=true /repos/{}
 https://cronhub.io/ping/1f5e3410-254c-11e8-b61d-55875966d031
star

Fri Apr 26 2024 07:00:29 GMT+0000 (Coordinated Universal Time)

#cli #bash
star

Fri Apr 26 2024 06:58:59 GMT+0000 (Coordinated Universal Time)

#cli #bash
star

Fri Apr 26 2024 05:31:39 GMT+0000 (Coordinated Universal Time) https://www.sitepoint.com/react-with-typescript-best-practices/

#bash
star

Sat Apr 20 2024 18:11:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Apr 17 2024 14:11:56 GMT+0000 (Coordinated Universal Time) https://www.jetbrains.com/help/qodana/cloud-forward-reports.html?_gl

#bash
star

Sun Apr 14 2024 13:49:30 GMT+0000 (Coordinated Universal Time) https://gree2.github.io/mac/2015/07/18/mac-network-commands-cheat-sheet

#bash #terminal #mac #networking
star

Sun Apr 14 2024 13:47:13 GMT+0000 (Coordinated Universal Time) https://gree2.github.io/mac/2015/07/18/mac-network-commands-cheat-sheet

#bash #mac #terminal #networking
star

Sun Apr 14 2024 13:45:53 GMT+0000 (Coordinated Universal Time) https://gree2.github.io/mac/2015/07/18/mac-network-commands-cheat-sheet

#bash #mac #terminal #networking
star

Sun Apr 14 2024 13:44:27 GMT+0000 (Coordinated Universal Time) https://gree2.github.io/mac/2015/07/18/mac-network-commands-cheat-sheet

#bash #terminal #mac #networking
star

Sat Mar 30 2024 13:15:28 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/15006554/git-merge-branch-and-use-meaningful-merge-commit-message

#bash
star

Wed Mar 27 2024 23:43:17 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Mar 01 2024 13:30:46 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Feb 26 2024 11:50:29 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 19:18:40 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 17:28:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 15:21:13 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 15:20:52 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 14:28:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 14:07:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 25 2024 14:04:26 GMT+0000 (Coordinated Universal Time) https://authjs.dev/reference/adapter/prisma?_gl

#bash
star

Sun Feb 25 2024 13:08:13 GMT+0000 (Coordinated Universal Time) https://react-hook-form.com/get-started

#bash
star

Sun Feb 18 2024 18:33:11 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-budgie-desktop-on-debian-linux/

#bash
star

Sun Feb 18 2024 18:32:33 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-budgie-desktop-on-debian-linux/

#bash
star

Wed Feb 07 2024 01:23:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Feb 07 2024 01:21:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Feb 07 2024 01:20:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Feb 07 2024 01:14:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jan 30 2024 10:26:59 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jan 29 2024 12:33:43 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Jan 24 2024 10:10:18 GMT+0000 (Coordinated Universal Time) https://docs.snowplow.io/docs/destinations/forwarding-events/google-tag-manager-server-side/snowplow-client-for-gtm-ss/

#bash
star

Thu Jan 18 2024 18:00:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jan 18 2024 17:06:17 GMT+0000 (Coordinated Universal Time) https://unix.stackexchange.com/questions/111433/iptables-redirect-outside-requests-to-127-0-0-1

#bash
star

Fri Jan 12 2024 19:21:37 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jan 12 2024 09:52:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jan 09 2024 02:42:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jan 04 2024 11:05:53 GMT+0000 (Coordinated Universal Time) https://www.twilio.com/blog/image-compression-in-laravel

#bash
star

Tue Jan 02 2024 04:02:11 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jan 02 2024 03:58:54 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Dec 29 2023 20:02:12 GMT+0000 (Coordinated Universal Time) https://api.getmerlin.in/docs/gemini-models

#bash
star

Sun Dec 24 2023 16:45:29 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/927358/how-do-i-undo-the-most-recent-local-commits-in-git

#bash
star

Tue Dec 05 2023 20:16:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Dec 05 2023 19:44:08 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Dec 05 2023 19:37:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Nov 20 2023 16:27:34 GMT+0000 (Coordinated Universal Time) https://vuejsbr-docs-next.netlify.app/guide/migration/introduction.html#visao-geral

#bash
star

Mon Nov 20 2023 16:27:13 GMT+0000 (Coordinated Universal Time) https://vuejsbr-docs-next.netlify.app/guide/migration/introduction.html#visao-geral

#bash
star

Fri Nov 17 2023 19:56:32 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Nov 17 2023 19:50:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Nov 17 2023 10:25:25 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-microsoft-edge-on-ubuntu-linux/

#bash
star

Fri Nov 17 2023 10:25:21 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-microsoft-edge-on-ubuntu-linux/

#bash
star

Fri Nov 17 2023 10:25:19 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-microsoft-edge-on-ubuntu-linux/

#bash
star

Fri Nov 17 2023 10:25:16 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-microsoft-edge-on-ubuntu-linux/

#bash
star

Fri Nov 17 2023 10:25:13 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-microsoft-edge-on-ubuntu-linux/

#bash
star

Fri Nov 17 2023 10:25:11 GMT+0000 (Coordinated Universal Time) https://www.linuxcapable.com/how-to-install-microsoft-edge-on-ubuntu-linux/

#bash
star

Sat Nov 11 2023 05:16:19 GMT+0000 (Coordinated Universal Time) https://chromeos.dev/en/android-environment/deploying-apps#connect-to-adb-over-usb

#bash
star

Sat Nov 04 2023 15:41:02 GMT+0000 (Coordinated Universal Time) https://github.com/piktokenn/ChatGPTAPIFree

#bash
star

Fri Nov 03 2023 13:47:17 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Oct 30 2023 05:30:42 GMT+0000 (Coordinated Universal Time) https://pimylifeup.com/ubuntu-cron-jobs/

#bash
star

Mon Oct 30 2023 05:30:38 GMT+0000 (Coordinated Universal Time) https://pimylifeup.com/ubuntu-cron-jobs/

#bash
star

Tue Oct 24 2023 17:23:13 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/17541614/use-images-instead-of-radio-buttons

#terminal #bash
star

Sun Oct 22 2023 15:24:06 GMT+0000 (Coordinated Universal Time) https://gsap.com/docs/v3/Installation/guides/Club GSAP

#bash
star

Sun Sep 17 2023 19:12:30 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Sep 16 2023 12:18:22 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Sep 11 2023 13:54:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Sep 09 2023 02:28:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Sep 03 2023 00:47:22 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 15 2023 19:07:41 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 15 2023 15:54:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 15 2023 15:49:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 15 2023 15:45:41 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 15 2023 15:37:53 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/a/42021818

#bash
star

Tue Aug 15 2023 15:36:34 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 14 2023 20:17:22 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Aug 12 2023 15:30:34 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 09 2023 14:32:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 08 2023 17:12:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Aug 03 2023 15:52:28 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 02 2023 19:57:10 GMT+0000 (Coordinated Universal Time)

#terminal #bash
star

Wed Aug 02 2023 15:16:14 GMT+0000 (Coordinated Universal Time) https://communities.vmware.com/t5/VMware-Workstation-Pro/Shared-Folder-bedtween-Windows-10-Host-and-Ubuntu-22-04-Virtual/td-p/2908759

#bash
star

Fri Jul 28 2023 17:10:06 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jul 28 2023 14:59:17 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jul 27 2023 13:14:45 GMT+0000 (Coordinated Universal Time) https://git-lfs.com/

#bash
star

Wed Jul 26 2023 19:08:40 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Jul 26 2023 00:51:42 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jul 25 2023 20:02:45 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jul 25 2023 12:03:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jul 21 2023 11:52:13 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jul 21 2023 10:20:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jul 21 2023 09:07:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Jul 19 2023 14:54:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Jul 19 2023 14:41:37 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jul 18 2023 18:21:15 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jul 18 2023 18:19:05 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Jul 15 2023 11:23:30 GMT+0000 (Coordinated Universal Time) https://developer.chrome.com/docs/devtools/workspaces/

#bash
star

Fri Jul 14 2023 13:24:46 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jul 13 2023 07:26:07 GMT+0000 (Coordinated Universal Time) https://apple.stackexchange.com/questions/30670/how-do-i-gain-permission-to-generate-ssh-keys-on-my-mac

#bash
star

Wed Jul 12 2023 14:28:10 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jul 06 2023 08:08:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jul 04 2023 02:54:44 GMT+0000 (Coordinated Universal Time) https://mochajs.org/

#bash
star

Sun Jul 02 2023 18:58:55 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 30 2023 13:34:09 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 30 2023 12:33:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 30 2023 12:22:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Jun 27 2023 15:21:21 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 19 2023 16:20:57 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 16 2023 01:35:06 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 12 2023 19:14:43 GMT+0000 (Coordinated Universal Time) https://superuser.com/questions/554319/display-each-sub-directory-size-in-a-list-format-using-one-line-command-in-bash

#bash
star

Thu Jun 08 2023 16:59:03 GMT+0000 (Coordinated Universal Time) https://chat.openai.com/

#k8s #docker #bash
star

Thu Jun 08 2023 16:58:09 GMT+0000 (Coordinated Universal Time) https://chat.openai.com/

#bash #k8s #docker
star

Thu Jun 08 2023 15:46:35 GMT+0000 (Coordinated Universal Time) https://vpn-expert.info/vpn-router-raspberry-pi-raspap-and-nordvpn-wi-fi-hotspot-access-point/

#bash #linux #raspberry
star

Thu Jun 08 2023 15:45:57 GMT+0000 (Coordinated Universal Time) https://www.christopherlouvet.com/posts/raspberry-pi-dual-wifi-wireguard-vpn-apple-tv/

#bash #linux
star

Thu Jun 08 2023 15:43:14 GMT+0000 (Coordinated Universal Time)

#bash #linux #raspberry
star

Mon Jun 05 2023 17:58:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 05 2023 17:53:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 05 2023 17:52:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Jun 03 2023 14:33:13 GMT+0000 (Coordinated Universal Time)

#bash #linux
star

Sat Jun 03 2023 14:17:13 GMT+0000 (Coordinated Universal Time) https://my.nordaccount.com/dashboard/nordvpn/manual-configuration/

#bash #linux
star

Sat Jun 03 2023 12:25:58 GMT+0000 (Coordinated Universal Time)

#bash #mac
star

Sat Jun 03 2023 12:21:11 GMT+0000 (Coordinated Universal Time) https://superuser.com/questions/1714741/arp-cache-clear-on-mac

#bash #mac
star

Sat Jun 03 2023 12:20:31 GMT+0000 (Coordinated Universal Time) https://superuser.com/questions/1714741/arp-cache-clear-on-mac

#bash #mac
star

Sat Jun 03 2023 12:18:59 GMT+0000 (Coordinated Universal Time) https://superuser.com/questions/1714741/arp-cache-clear-on-mac

#bash #mac
star

Fri Jun 02 2023 00:21:06 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jun 01 2023 18:57:06 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jun 01 2023 13:19:41 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue May 30 2023 15:33:59 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue May 30 2023 15:29:36 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue May 30 2023 15:22:35 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue May 30 2023 13:03:47 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue May 30 2023 13:03:02 GMT+0000 (Coordinated Universal Time) https://crontab.guru/

#bash
star

Sat May 27 2023 15:16:22 GMT+0000 (Coordinated Universal Time) https://askubuntu.com/questions/1145501/how-to-launch-a-python-script-from-desktop-launcher-in-ubuntu-19-04

#bash
star

Fri May 26 2023 18:40:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed May 24 2023 18:12:56 GMT+0000 (Coordinated Universal Time) https://chromeos.dev/en/linux/setup

#bash
star

Fri May 12 2023 07:16:54 GMT+0000 (Coordinated Universal Time) https://www.freecodecamp.org/news/a-beginners-guide-to-git-how-to-create-your-first-github-project-c3ff53f56861/

#git #bash
star

Thu May 11 2023 13:42:15 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Apr 18 2023 19:51:11 GMT+0000 (Coordinated Universal Time) https://hygraph.com/blog/react-authentication

#bash
star

Tue Apr 18 2023 19:50:16 GMT+0000 (Coordinated Universal Time) https://hygraph.com/blog/react-authentication

#bash
star

Mon Apr 17 2023 07:59:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Apr 15 2023 08:07:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Apr 14 2023 18:43:31 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:43:04 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:42:36 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:42:31 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:42:11 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:42:05 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:41:57 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:41:44 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:41:39 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:41:34 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:41:28 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:41:21 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:40:19 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:40:12 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:40:04 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:39:58 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:39:51 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:39:43 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:39:34 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 18:39:21 GMT+0000 (Coordinated Universal Time) https://jonmircha.com/git

#bash
star

Fri Apr 14 2023 09:17:54 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Apr 04 2023 04:16:03 GMT+0000 (Coordinated Universal Time) https://devconnected.com/how-to-push-git-branch-to-remote/

#bash #shell #git
star

Tue Apr 04 2023 03:27:29 GMT+0000 (Coordinated Universal Time) https://iqcode.com/code/python/create-requirementstxt-conda

#python #bash
star

Sun Apr 02 2023 19:43:04 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/automating-initial-server-setup-with-ubuntu-18-04

#bash
star

Fri Mar 31 2023 13:46:29 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Mar 29 2023 21:00:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Mar 29 2023 20:03:33 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Mar 29 2023 18:44:47 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Mar 28 2023 11:57:54 GMT+0000 (Coordinated Universal Time) https://stephenallwright.com/no-module-named-numpy/

#bash
star

Thu Mar 16 2023 12:23:01 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/47190861/how-can-the-default-node-version-be-set-using-nvm

#bash
star

Mon Mar 06 2023 07:18:36 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-create-a-new-user-and-grant-permissions-in-mysql

#bash
star

Mon Mar 06 2023 07:18:11 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-create-a-new-user-and-grant-permissions-in-mysql

#bash
star

Mon Mar 06 2023 07:17:46 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-create-a-new-user-and-grant-permissions-in-mysql

#bash
star

Mon Mar 06 2023 07:17:40 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-create-a-new-user-and-grant-permissions-in-mysql

#bash
star

Sun Mar 05 2023 18:21:09 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Feb 16 2023 20:19:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Feb 14 2023 14:24:18 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Feb 02 2023 02:59:08 GMT+0000 (Coordinated Universal Time) https://courenligne.com/learn/493639f1367650772f8878881d89e96f

#bash
star

Wed Feb 01 2023 15:58:11 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Feb 01 2023 15:55:31 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jan 29 2023 00:19:11 GMT+0000 (Coordinated Universal Time) https://www.freecodecamp.org/news/git-reverting-to-previous-commit-how-to-revert-to-last-commit/

#bash #git #reset #hard #github
star

Fri Jan 27 2023 20:29:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jan 27 2023 13:54:47 GMT+0000 (Coordinated Universal Time) https://kafka.apache.org/quickstart

#bash
star

Tue Jan 24 2023 09:43:45 GMT+0000 (Coordinated Universal Time)

#bash #sh #shell #curl #http
star

Wed Jan 18 2023 21:30:27 GMT+0000 (Coordinated Universal Time) https://www.freecodecamp.org/news/the-linux-cp-command-how-to-copy-files-in-linux/

#bash
star

Wed Jan 18 2023 00:14:13 GMT+0000 (Coordinated Universal Time) https://geekflare.com/nmap-vulnerability-scan/

#bash
star

Mon Jan 02 2023 21:15:44 GMT+0000 (Coordinated Universal Time) https://docs.cpanel.net/installation-guide/install/?utm_source

#bash
star

Tue Dec 13 2022 19:48:40 GMT+0000 (Coordinated Universal Time) https://devconnected.com/how-to-push-git-branch-to-remote/

#shell #git #bash
star

Tue Dec 13 2022 06:30:42 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/37926940/how-to-specify-new-environment-location-for-conda-create

#shell #bash #conda
star

Tue Dec 13 2022 06:24:01 GMT+0000 (Coordinated Universal Time) https://www.freecodecamp.org/news/git-delete-branch-how-to-remove-a-local-or-remote-branch/

#shell #bash #git
star

Tue Dec 13 2022 06:21:47 GMT+0000 (Coordinated Universal Time) https://iqcode.com/code/python/create-requirementstxt-conda

#shell #bash #pip #conda
star

Mon Dec 12 2022 11:15:08 GMT+0000 (Coordinated Universal Time) https://linuxconfig.org/how-to-upgrade-ubuntu-to-22-04-lts-jammy-jellyfish

#bash
star

Tue Dec 06 2022 16:58:38 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-20-04

#bash
star

Tue Dec 06 2022 16:58:32 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-20-04

#bash
star

Tue Dec 06 2022 16:58:16 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-20-04

#bash
star

Tue Dec 06 2022 16:58:09 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-20-04

#bash
star

Tue Dec 06 2022 16:58:02 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-20-04

#bash
star

Tue Dec 06 2022 07:22:36 GMT+0000 (Coordinated Universal Time) https://blog.wijman.net/disable-suspend-and-hibernation-modes-in-linux/

#bash
star

Tue Nov 29 2022 19:04:52 GMT+0000 (Coordinated Universal Time) undefined

#bash
star

Tue Nov 15 2022 14:02:45 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/67443265/error-regarding-undefined-method-map-for-nilnilclass-for-flutter-app-cocoap

#bash #m1 #pods
star

Mon Nov 14 2022 13:50:38 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/10325599/delete-all-branches-that-are-more-than-x-days-weeks-old

#bash
star

Thu Oct 20 2022 17:27:47 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Oct 20 2022 17:11:55 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Oct 20 2022 17:09:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Oct 20 2022 17:08:09 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Oct 20 2022 17:05:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Oct 20 2022 16:56:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Oct 19 2022 03:55:00 GMT+0000 (Coordinated Universal Time) https://kodi.wiki/view/HOW-TO:Install_Kodi_for_Linux

#bash
star

Sat Oct 15 2022 22:33:12 GMT+0000 (Coordinated Universal Time) https://shotstack.io/learn/use-ffmpeg-to-trim-video/

#bash
star

Tue Oct 11 2022 13:56:28 GMT+0000 (Coordinated Universal Time) https://itsfoss.com/apt-key-deprecated/

#shell #bash #zsh
star

Fri Oct 07 2022 14:47:24 GMT+0000 (Coordinated Universal Time) https://devhints.io/bash

#bash
star

Fri Oct 07 2022 14:47:19 GMT+0000 (Coordinated Universal Time) https://devhints.io/bash

#bash
star

Thu Oct 06 2022 22:52:08 GMT+0000 (Coordinated Universal Time) https://unix.stackexchange.com/questions/276474/how-can-i-execute-any-command-as-a-normal-user-without-sudo

#bash
star

Wed Sep 28 2022 08:37:33 GMT+0000 (Coordinated Universal Time) https://unix.stackexchange.com/questions/336876/simple-shell-script-to-send-socket-message

#bash #tcp #socket
star

Wed Sep 21 2022 14:36:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Sep 17 2022 23:14:03 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-push-an-existing-project-to-github

#bash #git
star

Thu Sep 15 2022 17:58:38 GMT+0000 (Coordinated Universal Time) https://linuxhint.com/xargs-find-linux/

#bash
star

Thu Sep 15 2022 17:51:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Sep 15 2022 17:49:31 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Sep 15 2022 17:47:48 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Sep 13 2022 21:38:55 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Sep 13 2022 04:21:04 GMT+0000 (Coordinated Universal Time) https://gist.github.com/henrik242/65d26a7deca30bdb9828e183809690bd

#bash
star

Thu Sep 08 2022 18:14:59 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/37971961/docker-error-bind-address-already-in-use

#bash #docker
star

Wed Sep 07 2022 06:51:12 GMT+0000 (Coordinated Universal Time) https://fordodone.com/2016/02/24/start-screen-session-with-4-way-split-screen/

#bash #linux #screen
star

Tue Sep 06 2022 12:44:01 GMT+0000 (Coordinated Universal Time) https://www.codewithharry.com/blogpost/mongodb-cheatsheet/

#bash
star

Tue Aug 30 2022 14:27:34 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 30 2022 13:51:16 GMT+0000 (Coordinated Universal Time)

#javascript #bash
star

Mon Aug 29 2022 21:06:32 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 20:08:43 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 20:03:10 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 19:53:39 GMT+0000 (Coordinated Universal Time)

#bash #sql
star

Mon Aug 29 2022 19:51:25 GMT+0000 (Coordinated Universal Time)

#bash #docker #oracle
star

Mon Aug 29 2022 19:44:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 19:41:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 19:35:02 GMT+0000 (Coordinated Universal Time)

#sql #bash
star

Mon Aug 29 2022 19:22:58 GMT+0000 (Coordinated Universal Time)

#osm #osrm #bash
star

Mon Aug 29 2022 19:19:41 GMT+0000 (Coordinated Universal Time)

#bash #docker #sql
star

Mon Aug 29 2022 19:10:43 GMT+0000 (Coordinated Universal Time)

#bash #sql
star

Mon Aug 29 2022 19:09:59 GMT+0000 (Coordinated Universal Time)

#bash #sql
star

Mon Aug 29 2022 19:08:02 GMT+0000 (Coordinated Universal Time)

#bash #docker
star

Mon Aug 29 2022 18:42:20 GMT+0000 (Coordinated Universal Time)

#bash #sql
star

Mon Aug 29 2022 18:38:18 GMT+0000 (Coordinated Universal Time)

#bash #docker
star

Mon Aug 29 2022 18:33:43 GMT+0000 (Coordinated Universal Time)

#sql #bash #docker
star

Mon Aug 29 2022 18:30:27 GMT+0000 (Coordinated Universal Time)

#bash #docker #delphi
star

Mon Aug 29 2022 18:26:07 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 18:24:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 18:23:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 18:13:16 GMT+0000 (Coordinated Universal Time)

#bash #docker
star

Mon Aug 29 2022 17:20:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 17:16:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 17:10:45 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:50:41 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:40:41 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:39:39 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:38:46 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:24:40 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:20:46 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:09:32 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 16:02:07 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:58:55 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:50:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:47:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:42:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:39:30 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:38:46 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:35:55 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 15:34:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:57:29 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:56:16 GMT+0000 (Coordinated Universal Time)

#bash #python
star

Mon Aug 29 2022 14:54:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:53:22 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:50:39 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:49:26 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:48:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:46:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:45:25 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:44:37 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:43:00 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:33:17 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:32:43 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:31:56 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:31:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:30:31 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:28:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:27:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:25:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:24:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:21:40 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:20:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:19:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:17:33 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:16:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:15:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:10:52 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:09:43 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:08:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:03:56 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 14:02:30 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:58:21 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:57:06 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:54:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:53:13 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:48:11 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:47:14 GMT+0000 (Coordinated Universal Time)

#bash #sql
star

Mon Aug 29 2022 13:45:39 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:44:47 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:40:08 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 29 2022 13:38:43 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 23:09:31 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 23:06:08 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 23:04:48 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 23:04:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 23:00:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 22:59:22 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 22:49:43 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 22:44:18 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 22:18:41 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 22:16:57 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 22:11:28 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:59:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:58:32 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:52:40 GMT+0000 (Coordinated Universal Time)

#bash #php
star

Sun Aug 28 2022 12:50:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:24:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:16:48 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:06:35 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:01:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 12:00:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 11:58:58 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 11:52:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 11:52:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 11:52:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:09:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:08:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:06:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:04:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:02:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:01:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 02:00:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 01:56:21 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 01:54:33 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 01:52:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 01:47:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 01:46:30 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 28 2022 01:44:59 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Aug 20 2022 20:07:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Aug 20 2022 19:57:16 GMT+0000 (Coordinated Universal Time) https://www.computerhope.com/unix/test.htm

#bash
star

Wed Aug 17 2022 23:25:31 GMT+0000 (Coordinated Universal Time)

#bash #cc #aws-cli
star

Wed Aug 17 2022 05:42:53 GMT+0000 (Coordinated Universal Time)

#bash #mysql #slack
star

Wed Aug 17 2022 05:04:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 16 2022 15:21:47 GMT+0000 (Coordinated Universal Time)

#bash #mysql
star

Tue Aug 16 2022 15:21:46 GMT+0000 (Coordinated Universal Time)

#bash #mysql
star

Wed Aug 10 2022 12:25:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Aug 07 2022 02:59:56 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/4998290/how-to-find-all-file-extensions-recursively-from-a-directory

#bash
star

Sun Aug 07 2022 02:59:42 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/4998290/how-to-find-all-file-extensions-recursively-from-a-directory

#bash
star

Sat Aug 06 2022 22:54:33 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Aug 04 2022 03:44:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 03 2022 15:12:18 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 03 2022 14:33:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 03 2022 14:31:42 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 03 2022 14:25:59 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Aug 03 2022 14:22:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jul 29 2022 21:50:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jul 29 2022 21:46:13 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jul 28 2022 04:40:07 GMT+0000 (Coordinated Universal Time) https://www.baeldung.com/linux/run-shell-script-remote-ssh

#bash #expect
star

Wed Jul 20 2022 02:02:26 GMT+0000 (Coordinated Universal Time) https://github.com/ickc/bootstrapping-os-environments/tree/master/debian

#bash #apt
star

Wed Jul 20 2022 02:00:25 GMT+0000 (Coordinated Universal Time) https://github.com/ickc/bootstrapping-os-environments/tree/master/debian

#bash #apt
star

Wed Jul 20 2022 01:57:28 GMT+0000 (Coordinated Universal Time) https://github.com/Josef-Friedrich/shell-scripts/blob/70bb8c181d748fb9532e7032bb8fd0ed3c754b80/figlet-fonts.sh

#figlet #bash
star

Wed Jul 20 2022 01:56:59 GMT+0000 (Coordinated Universal Time) https://github.com/Josef-Friedrich/shell-scripts/blob/70bb8c181d748fb9532e7032bb8fd0ed3c754b80/figlet-comment.sh

#figlet #bash
star

Mon Jul 18 2022 06:52:51 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-install-the-deno-javascript-runtime-on-ubuntu-20-04

#bash
star

Thu Jul 14 2022 06:56:02 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/20584267/git-remove-in-between-commit

#bash
star

Tue Jul 12 2022 07:46:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jul 11 2022 07:15:33 GMT+0000 (Coordinated Universal Time) https://npm.io/package/@babel/plugin-transform-exponentiation-operator

#bash
star

Mon Jul 11 2022 07:15:00 GMT+0000 (Coordinated Universal Time) https://react-icons.github.io/react-icons/

#bash
star

Sun Jul 10 2022 13:14:59 GMT+0000 (Coordinated Universal Time) https://unix.stackexchange.com/questions/1316/convert-ascii-code-to-hexadecimal-in-unix-shell-script

#bash
star

Tue Jul 05 2022 10:29:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 26 2022 08:40:16 GMT+0000 (Coordinated Universal Time)

#php #laravel #bash #apache #server
star

Fri Jun 24 2022 14:41:35 GMT+0000 (Coordinated Universal Time)

#bash #ionic #angular #capacitor #qrcode #reader #css
star

Fri Jun 24 2022 14:39:58 GMT+0000 (Coordinated Universal Time)

#bash #ionic #angular #capacitor #qrcode #reader #javascript
star

Fri Jun 24 2022 14:38:23 GMT+0000 (Coordinated Universal Time)

#bash #ionic #angular #capacitor
star

Fri Jun 24 2022 14:35:07 GMT+0000 (Coordinated Universal Time)

#python #bash #variables #variáveis
star

Fri Jun 24 2022 14:33:49 GMT+0000 (Coordinated Universal Time)

#python #bash #variables #variáveis
star

Fri Jun 24 2022 14:31:24 GMT+0000 (Coordinated Universal Time)

#python #hello #world #mundo #olá #bash
star

Mon Jun 20 2022 03:35:09 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 20 2022 03:34:46 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 20 2022 03:34:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 20 2022 03:33:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 20 2022 03:32:16 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jun 20 2022 03:31:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Jun 18 2022 07:30:02 GMT+0000 (Coordinated Universal Time)

#bash #shell
star

Wed Jun 15 2022 07:35:08 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 10 2022 00:38:38 GMT+0000 (Coordinated Universal Time)

#bash #shell #cli
star

Sun Jun 05 2022 23:43:21 GMT+0000 (Coordinated Universal Time) https://linuxhint.com/comment-multiple-lines-vim/

#shell #bash #zsh #vim #nvim
star

Sat Jun 04 2022 16:02:01 GMT+0000 (Coordinated Universal Time) https://www.digitalocean.com/community/tutorials/how-to-set-up-django-with-postgres-nginx-and-gunicorn-on-ubuntu-22-04

#bash
star

Tue May 31 2022 18:16:39 GMT+0000 (Coordinated Universal Time) https://unix.stackexchange.com/questions/813/how-to-determine-where-an-environment-variable-came-from

#zsh #bash
star

Mon May 30 2022 18:16:52 GMT+0000 (Coordinated Universal Time) https://github.com/microsoft/vscode/issues/113869

#shell #bash #zsh
star

Sun May 29 2022 17:09:50 GMT+0000 (Coordinated Universal Time) https://semaphoreci.com/community/tutorials/testing-python-applications-with-pytest

#bash
star

Sun May 29 2022 17:09:47 GMT+0000 (Coordinated Universal Time) https://semaphoreci.com/community/tutorials/testing-python-applications-with-pytest

#bash
star

Sat May 28 2022 14:42:18 GMT+0000 (Coordinated Universal Time) https://github.com/TousssaintThomas/wren.v1.0.0/settings/actions/runners/new?arch=x64&os=linux

#bash #linux #ubuntu
star

Fri May 27 2022 13:38:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon May 23 2022 07:02:00 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/10929453/read-a-file-line-by-line-assigning-the-value-to-a-variable

#bash
star

Fri May 20 2022 20:16:08 GMT+0000 (Coordinated Universal Time) https://developer.paypal.com/docs/api/catalog-products/v1/

#bash
star

Fri May 20 2022 20:14:26 GMT+0000 (Coordinated Universal Time) https://developer.paypal.com/docs/api/catalog-products/v1/

#bash
star

Thu May 19 2022 21:40:31 GMT+0000 (Coordinated Universal Time) https://developers.cloudflare.com/workers/wrangler/get-started/

#bash
star

Thu May 19 2022 21:40:25 GMT+0000 (Coordinated Universal Time) https://developers.cloudflare.com/workers/wrangler/get-started/

#bash
star

Thu May 19 2022 04:35:21 GMT+0000 (Coordinated Universal Time) https://www.11ty.dev/docs/getting-started/

#bash
star

Wed May 18 2022 02:55:41 GMT+0000 (Coordinated Universal Time) https://developers.cloudflare.com/workers/wrangler/get-started/

#bash
star

Sun May 15 2022 17:04:29 GMT+0000 (Coordinated Universal Time)

#ubuntu #linux #bash #sariohara
star

Wed May 11 2022 21:47:41 GMT+0000 (Coordinated Universal Time) https://developers.cloudflare.com/workers/wrangler/get-started/

#bash
star

Wed May 11 2022 07:05:32 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/5566310/how-to-recursively-find-and-list-the-latest-modified-files-in-a-directory-with-s

#bash
star

Tue May 10 2022 10:13:05 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon May 09 2022 06:00:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu May 05 2022 09:35:21 GMT+0000 (Coordinated Universal Time) https://linuxize.com/post/bash-increment-decrement-variable/

#bash
star

Wed May 04 2022 07:22:25 GMT+0000 (Coordinated Universal Time) https://typeofnan.dev/how-to-stop-all-docker-containers/

#bash
star

Mon May 02 2022 09:18:08 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon May 02 2022 08:09:04 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/a/41991368

#bash
star

Thu Apr 28 2022 10:07:14 GMT+0000 (Coordinated Universal Time) https://www.freecodecamp.org/news/how-to-delete-a-git-branch-both-locally-and-remotely/

#bash
star

Wed Apr 27 2022 10:51:53 GMT+0000 (Coordinated Universal Time) https://axios-http.com/docs/intro

#bash
star

Mon Apr 25 2022 22:53:59 GMT+0000 (Coordinated Universal Time) https://blog.jongallant.com/2020/05/azure-roles/

#bash
star

Sat Apr 23 2022 20:14:19 GMT+0000 (Coordinated Universal Time) https://simplernerd.com/docker-jupyter-notebook/

#bash
star

Fri Apr 22 2022 17:12:36 GMT+0000 (Coordinated Universal Time) https://www.ssls.com/knowledgebase/how-to-install-an-ssl-certificate-on-apache/?gclid

#bash #ssl #linux #apache #certificates
star

Fri Apr 22 2022 16:54:00 GMT+0000 (Coordinated Universal Time) https://devcenter.heroku.com/articles/ssl-certificate-self

#bash #ssl #linux #unix #certificate
star

Mon Apr 11 2022 20:00:49 GMT+0000 (Coordinated Universal Time) https://www.tecmint.com/clear-ram-memory-cache-buffer-and-swap-space-on-linux/

#bash
star

Tue Apr 05 2022 19:22:41 GMT+0000 (Coordinated Universal Time) https://raspberrytips.nl/dht11-temperatuursensor-aansluiten/

#bash #raspberrypi
star

Mon Mar 28 2022 15:24:17 GMT+0000 (Coordinated Universal Time)

#bash #wtfutil #slack
star

Mon Mar 21 2022 01:29:18 GMT+0000 (Coordinated Universal Time) https://medium.com/google-cloud/cloud-functions-to-cloud-run-e297aac28eb8

#bash
star

Wed Mar 16 2022 02:14:12 GMT+0000 (Coordinated Universal Time) https://askubuntu.com/questions/519/how-do-i-write-a-shell-script-to-install-a-list-of-applications

#bash #shell
star

Tue Mar 15 2022 04:10:40 GMT+0000 (Coordinated Universal Time)

#ubuntu #bash #ssh
star

Tue Mar 15 2022 03:32:29 GMT+0000 (Coordinated Universal Time) https://serverfault.com/questions/854208/ssh-suddenly-returning-invalid-format

#ubuntu #bash #ssh
star

Thu Mar 10 2022 23:49:22 GMT+0000 (Coordinated Universal Time) https://ohmyz.sh/

#bash
star

Thu Mar 10 2022 23:48:20 GMT+0000 (Coordinated Universal Time) https://medium.com/carvago-development/my-docker-on-macos-part-1-setup-ubuntu-virtual-machine-both-intel-and-apple-silicon-cpu-5d886af0ebba

#bash
star

Thu Mar 10 2022 23:47:39 GMT+0000 (Coordinated Universal Time) https://medium.com/carvago-development/my-docker-on-macos-part-1-setup-ubuntu-virtual-machine-both-intel-and-apple-silicon-cpu-5d886af0ebba

#bash
star

Thu Mar 10 2022 23:46:42 GMT+0000 (Coordinated Universal Time) https://medium.com/carvago-development/my-docker-on-macos-part-1-setup-ubuntu-virtual-machine-both-intel-and-apple-silicon-cpu-5d886af0ebba

#bash
star

Thu Mar 10 2022 23:46:15 GMT+0000 (Coordinated Universal Time) https://medium.com/carvago-development/my-docker-on-macos-part-1-setup-ubuntu-virtual-machine-both-intel-and-apple-silicon-cpu-5d886af0ebba

#bash
star

Thu Mar 10 2022 23:45:57 GMT+0000 (Coordinated Universal Time) https://medium.com/carvago-development/my-docker-on-macos-part-1-setup-ubuntu-virtual-machine-both-intel-and-apple-silicon-cpu-5d886af0ebba

#bash
star

Thu Mar 10 2022 23:45:23 GMT+0000 (Coordinated Universal Time) https://medium.com/carvago-development/my-docker-on-macos-part-1-setup-ubuntu-virtual-machine-both-intel-and-apple-silicon-cpu-5d886af0ebba

#bash
star

Thu Mar 10 2022 22:01:06 GMT+0000 (Coordinated Universal Time) https://apple.stackexchange.com/questions/58234/override-itunes-media-keys-play-pause-etc-for-spotify

#bash
star

Wed Mar 09 2022 19:53:36 GMT+0000 (Coordinated Universal Time) https://auth0.com/docs/quickstart/backend/nodejs/01-authorization

#bash
star

Tue Mar 08 2022 11:33:18 GMT+0000 (Coordinated Universal Time) https://superuser.com/questions/1527402/youtube-dl-download-best-audio-video-embed-thumbnail-and-convert-to-mp4

#bash
star

Fri Mar 04 2022 21:20:17 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/3869072/test-for-non-zero-length-string-in-bash-n-var-or-var

#sh #bash
star

Thu Mar 03 2022 23:37:21 GMT+0000 (Coordinated Universal Time)

#cli #bash
star

Thu Mar 03 2022 07:17:40 GMT+0000 (Coordinated Universal Time) https://auth0.com/blog/developing-restful-apis-with-python-and-flask/

#bash
star

Thu Mar 03 2022 07:16:46 GMT+0000 (Coordinated Universal Time) https://auth0.com/blog/developing-restful-apis-with-python-and-flask/

#bash
star

Thu Mar 03 2022 07:16:06 GMT+0000 (Coordinated Universal Time) https://auth0.com/blog/developing-restful-apis-with-python-and-flask/

#bash
star

Sat Feb 26 2022 04:37:33 GMT+0000 (Coordinated Universal Time) web.Roblox.com

#java #actionscript3 #dart #css #bash
star

Tue Feb 15 2022 21:14:57 GMT+0000 (Coordinated Universal Time) https://maltego-downloads.s3.us-east-2.amazonaws.com/linux/Maltego.v4.3.0.linux.zip

#bash #hax
star

Tue Feb 15 2022 21:14:13 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Feb 14 2022 20:56:44 GMT+0000 (Coordinated Universal Time) https://askubuntu.com/questions/410244/is-there-a-command-to-list-all-users-also-to-add-delete-modify-users-in-the

#bash
star

Mon Feb 14 2022 13:26:23 GMT+0000 (Coordinated Universal Time)

#bash #git
star

Mon Feb 14 2022 10:04:08 GMT+0000 (Coordinated Universal Time) https://khalilstemmler.com/blogs/typescript/node-starter-project/

#bash
star

Sun Feb 13 2022 10:23:02 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Feb 11 2022 23:22:04 GMT+0000 (Coordinated Universal Time)

#url #zsh #ohmyzsh #terminal #ubuntu #git #shell #bash
star

Mon Feb 07 2022 09:48:33 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Feb 07 2022 09:08:00 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Feb 07 2022 09:00:48 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Feb 06 2022 11:34:48 GMT+0000 (Coordinated Universal Time) https://www.liquidweb.com/kb/how-to-add-a-user-and-grant-root-privileges-on-ubuntu-16-04/

#linux #commandline #bash
star

Fri Feb 04 2022 19:41:26 GMT+0000 (Coordinated Universal Time) https://cli.vuejs.org/

#bash
star

Thu Feb 03 2022 18:39:15 GMT+0000 (Coordinated Universal Time) https://github.com/h5bp/create-html5-boilerplate

#bash
star

Wed Feb 02 2022 22:16:48 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-zsh.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:16:16 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-rust.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:15:45 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-pwsh.sh

#installation #linux #bash #wsl #powershell
star

Wed Feb 02 2022 22:15:12 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-nvm-node-npm.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:14:32 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-nvidia-cuda.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:14:04 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-nodesrc.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:13:33 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-neovim.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:12:47 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-homebrew.sh

#installation #linux #bash #wsl
star

Wed Feb 02 2022 22:09:23 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-gitkraken.sh

#installation #linux #bash #wsl #git
star

Wed Feb 02 2022 22:08:42 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-gh-cli.sh

#installation #linux #bash #wsl #github #cli
star

Wed Feb 02 2022 22:07:52 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-cargo.sh

#installation #linux #bash #wsl #cargo #rust
star

Wed Feb 02 2022 22:07:13 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-az-cli.sh

#installation #linux #bash #wsl #azure
star

Wed Feb 02 2022 22:06:40 GMT+0000 (Coordinated Universal Time) https://github.com/jimbrig/dotfiles-wsl/blob/main/scripts/dev/scripts/install-R.sh

#r #installation #linux #bash #wsl
star

Tue Feb 01 2022 11:05:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jan 27 2022 17:41:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Jan 24 2022 19:53:09 GMT+0000 (Coordinated Universal Time)

#bash #depthai
star

Sat Jan 22 2022 07:34:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Jan 08 2022 06:07:58 GMT+0000 (Coordinated Universal Time) https://gist.github.com/juliyvchirkov/2c5c8d54b182528e39d4565d1632f8ee#file-magentodocumentroot-sh

#bash #magento #document_root #resolve #shell
star

Tue Jan 04 2022 01:24:10 GMT+0000 (Coordinated Universal Time) https://xwordpress.org

#bash #windows #wsl #wsl2 #gui
star

Mon Jan 03 2022 23:14:30 GMT+0000 (Coordinated Universal Time) https://spin.atomicobject.com/2019/05/18/smb-self-hosting-rsync/

#bash #windows
star

Mon Dec 27 2021 12:25:33 GMT+0000 (Coordinated Universal Time) https://github.com/microsoft/WSL/issues/4699#issuecomment-627133168

#bash #powershell
star

Tue Dec 21 2021 14:38:32 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/4157189/how-to-git-pull-while-ignoring-local-changes

#bash #git
star

Mon Dec 20 2021 14:38:13 GMT+0000 (Coordinated Universal Time) https://nodejs.dev/learn/update-all-the-nodejs-dependencies-to-their-latest-version

#bash #nmp
star

Fri Dec 17 2021 17:26:37 GMT+0000 (Coordinated Universal Time) https://www.zigbee2mqtt.io/guide/installation/01_linux.html#determine-location-of-the-adapter-and-checking-user-permissions

#bash #mqtt
star

Thu Dec 16 2021 17:14:02 GMT+0000 (Coordinated Universal Time)

#bash #colors
star

Thu Dec 16 2021 17:13:34 GMT+0000 (Coordinated Universal Time)

#bash #colors
star

Mon Dec 06 2021 16:51:34 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/2941517/how-to-fix-committing-to-the-wrong-git-branch

#bash
star

Fri Dec 03 2021 12:28:53 GMT+0000 (Coordinated Universal Time) https://developer.twitter.com/en/docs/twitter-for-websites/tweet-button/guides/parameter-reference1

#bash
star

Mon Nov 15 2021 04:01:22 GMT+0000 (Coordinated Universal Time) https://raspberrypi.stackexchange.com/questions/46225/adjusting-the-brightness-of-the-official-touchscreen-display

#bash
star

Sun Nov 07 2021 22:22:10 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Oct 18 2021 07:57:50 GMT+0000 (Coordinated Universal Time) https://askubuntu.com/questions/1705/how-can-i-create-a-select-menu-in-a-shell-script

#bash #sh #script #shell #shellscript
star

Wed Oct 06 2021 00:42:00 GMT+0000 (Coordinated Universal Time) https://garrett.dev/2018/12/11/docker-sandbox-script/

#dockerfile #bash
star

Sun Oct 03 2021 18:44:44 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Sep 04 2021 08:05:25 GMT+0000 (Coordinated Universal Time)

#bash #powershell
star

Thu Aug 26 2021 17:41:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:54:07 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:53:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:52:48 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:50:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:49:52 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:48:11 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:37:07 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:35:48 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Aug 17 2021 07:33:21 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Aug 16 2021 03:55:49 GMT+0000 (Coordinated Universal Time) https://github.com/jbarlow83/OCRmyPDF

#bash
star

Wed Aug 04 2021 07:39:33 GMT+0000 (Coordinated Universal Time) https://www.linuxshelltips.com/exclude-directory-while-finding-files-in-linux/

#bash
star

Fri Jul 30 2021 08:44:21 GMT+0000 (Coordinated Universal Time) https://medium.com/@steveliles/debugging-bitbucket-pipelines-locally-35d13f1adcd5

#bash
star

Tue Jul 20 2021 05:02:34 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jul 18 2021 19:18:06 GMT+0000 (Coordinated Universal Time)

#bash #shell
star

Thu Jun 24 2021 04:55:58 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu Jun 24 2021 04:55:05 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 20 2021 10:59:15 GMT+0000 (Coordinated Universal Time) https://vueschool.io/lessons/sign-up-in-or-out-with-firebase-authentication

#bash
star

Tue Jun 15 2021 02:34:39 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 13 2021 00:52:05 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Jun 12 2021 22:26:48 GMT+0000 (Coordinated Universal Time) https://wrds-www.wharton.upenn.edu/pages/support/the-wrds-cloud/using-ssh-connect-wrds-cloud/

#bash
star

Fri Jun 11 2021 21:10:14 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 21:08:56 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 21:08:28 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 21:02:58 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 21:02:18 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 21:00:02 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 20:59:03 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 20:58:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 20:57:47 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 20:52:59 GMT+0000 (Coordinated Universal Time)

#bash #php
star

Fri Jun 11 2021 20:50:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri Jun 11 2021 20:49:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:22:12 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:20:34 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:19:45 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:12:38 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:12:17 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:05:00 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:03:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:01:37 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 08:01:11 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:53:42 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:51:52 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:51:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:43:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:40:31 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:37:30 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:35:00 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Jun 06 2021 07:32:49 GMT+0000 (Coordinated Universal Time)

#bash
star

Thu May 27 2021 20:17:23 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/22222666/error-while-loading-shared-libraries-libgsl-so-0-cannot-open-shared-object-fil

#bash
star

Mon May 24 2021 06:22:23 GMT+0000 (Coordinated Universal Time) https://www.cyberciti.biz/faq/unix-linux-check-if-port-is-in-use-command/

#bash
star

Sun May 23 2021 15:11:19 GMT+0000 (Coordinated Universal Time) https://www.biostars.org/p/56246/

#bash
star

Fri May 21 2021 20:40:46 GMT+0000 (Coordinated Universal Time) https://www.biostars.org/p/131837/

#bash
star

Sat May 08 2021 18:59:44 GMT+0000 (Coordinated Universal Time) https://www.biostars.org/p/67268/

#bash
star

Sat Apr 24 2021 21:04:50 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Apr 24 2021 21:00:41 GMT+0000 (Coordinated Universal Time) https://askubuntu.com/questions/1705/how-can-i-create-a-select-menu-in-a-shell-script

#bash
star

Tue Apr 20 2021 17:12:01 GMT+0000 (Coordinated Universal Time) https://maximorlov.com/4-reasons-why-your-docker-containers-cant-talk-to-each-other/

#bash
star

Tue Apr 20 2021 17:11:50 GMT+0000 (Coordinated Universal Time) https://maximorlov.com/4-reasons-why-your-docker-containers-cant-talk-to-each-other/

#bash
star

Sun Apr 18 2021 16:35:19 GMT+0000 (Coordinated Universal Time)

#ubuntu #bash #linux
star

Tue Apr 06 2021 15:27:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Apr 06 2021 08:53:58 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Mar 31 2021 14:58:05 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Mar 31 2021 06:35:25 GMT+0000 (Coordinated Universal Time) https://github.com/laravel/laravel.com-next/blob/master/bin/setup.sh

#bash
star

Sun Mar 28 2021 12:54:21 GMT+0000 (Coordinated Universal Time) https://www.notion.so/rezaeir/Code-Snippets-f347e2e7bc2d4217854f17cb0a5e0746

#bash #gzip
star

Sun Mar 28 2021 12:53:33 GMT+0000 (Coordinated Universal Time) https://www.notion.so/rezaeir/Code-Snippets-f347e2e7bc2d4217854f17cb0a5e0746

#bash #assembly #canu
star

Sun Mar 28 2021 12:52:57 GMT+0000 (Coordinated Universal Time) https://www.notion.so/rezaeir/Code-Snippets-f347e2e7bc2d4217854f17cb0a5e0746

#bash #wgs #subsample #rasusa
star

Sun Mar 28 2021 12:51:52 GMT+0000 (Coordinated Universal Time) https://www.notion.so/rezaeir/Code-Snippets-f347e2e7bc2d4217854f17cb0a5e0746

#bash #fastq #join
star

Sun Mar 28 2021 12:51:14 GMT+0000 (Coordinated Universal Time) https://www.notion.so/rezaeir/Code-Snippets-f347e2e7bc2d4217854f17cb0a5e0746

#bash #guppy #nanopore #basecall
star

Sun Mar 28 2021 12:49:54 GMT+0000 (Coordinated Universal Time) https://www.notion.so/rezaeir/Code-Snippets-f347e2e7bc2d4217854f17cb0a5e0746

#bash #samtools #bam #count
star

Sun Mar 28 2021 11:12:42 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 11:12:11 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 10:01:51 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 09:50:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 09:48:07 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 09:42:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 09:19:34 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 28 2021 09:18:21 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 21 2021 04:37:24 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Mar 21 2021 04:34:01 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Mar 20 2021 07:37:59 GMT+0000 (Coordinated Universal Time) https://ostechnix.com/find-oldest-file-directory-tree-linux/

#bash
star

Thu Mar 18 2021 08:35:27 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Mar 16 2021 09:49:01 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/4181703/how-to-concatenate-string-variables-in-bash

#bash
star

Mon Mar 15 2021 13:38:05 GMT+0000 (Coordinated Universal Time) https://engineering.giphy.com/how-to-make-gifs-with-ffmpeg/

#bash
star

Fri Mar 12 2021 21:07:11 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/a/17068891

#bash
star

Fri Mar 12 2021 11:02:42 GMT+0000 (Coordinated Universal Time)

#python #bash
star

Thu Mar 11 2021 12:17:01 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/54066437/date-month-in-bash-without-leading-0-or-space

#bash
star

Sat Mar 06 2021 23:08:44 GMT+0000 (Coordinated Universal Time) https://jamesachambers.com/raspberry-pi-4-usb-boot-config-guide-for-ssd-flash-drives/

#bash
star

Wed Mar 03 2021 04:09:21 GMT+0000 (Coordinated Universal Time) https://thebroken.link/managing-dotfiles-with-ansible/

#bash #ansible
star

Wed Mar 03 2021 03:43:25 GMT+0000 (Coordinated Universal Time) https://freckles.io/

#bash #freckles
star

Wed Mar 03 2021 03:43:09 GMT+0000 (Coordinated Universal Time) https://freckles.io/

#bash #freckles
star

Mon Feb 15 2021 16:00:50 GMT+0000 (Coordinated Universal Time)

#bash #python
star

Mon Feb 15 2021 09:17:53 GMT+0000 (Coordinated Universal Time)

#bash
star

Mon Feb 08 2021 16:11:59 GMT+0000 (Coordinated Universal Time)

#bash
star

Wed Jan 13 2021 06:38:23 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Dec 29 2020 20:45:20 GMT+0000 (Coordinated Universal Time)

#bash
star

Sat Dec 12 2020 01:14:26 GMT+0000 (Coordinated Universal Time) https://macos-defaults.com/xcode/ideadditionalcounterpartsuffixes.html

#bash
star

Sat Dec 12 2020 01:14:26 GMT+0000 (Coordinated Universal Time) https://macos-defaults.com/xcode/ideadditionalcounterpartsuffixes.html

#bash
star

Sat Dec 12 2020 01:13:52 GMT+0000 (Coordinated Universal Time) https://macos-defaults.com/screenshots/include-date.html

#bash
star

Wed Dec 09 2020 00:38:46 GMT+0000 (Coordinated Universal Time) https://getcomposer.org/doc/faqs/how-to-install-composer-programmatically.md

#bash
star

Sun Dec 06 2020 21:15:55 GMT+0000 (Coordinated Universal Time) https://askubuntu.com/questions/761706/ubuntu-15-10-and-16-04-keep-freezing-randomly

#bash #ubuntu
star

Wed Dec 02 2020 19:20:04 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Nov 29 2020 16:44:07 GMT+0000 (Coordinated Universal Time) https://forums.docker.com/t/start-a-gui-application-as-root-in-a-ubuntu-container/17069

#bash #docker #ubuntu
star

Wed Nov 18 2020 12:29:43 GMT+0000 (Coordinated Universal Time) https://blog.sodifrance.fr/nmap-commandes-utiles/

#bash
star

Wed Nov 18 2020 00:08:27 GMT+0000 (Coordinated Universal Time) https://youtu.be/1__Leh5MBEM

#bash
star

Sun Nov 08 2020 17:58:52 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Oct 27 2020 05:59:09 GMT+0000 (Coordinated Universal Time) https://getcomposer.org/doc/articles/troubleshooting.md#proc-open-fork-failed-errors

#bash
star

Thu Oct 08 2020 05:35:47 GMT+0000 (Coordinated Universal Time)

#bash
star

Tue Oct 06 2020 09:30:19 GMT+0000 (Coordinated Universal Time)

#bash
star

Sun Oct 04 2020 00:52:52 GMT+0000 (Coordinated Universal Time)

#shell #bash
star

Sat Jun 13 2020 08:20:55 GMT+0000 (Coordinated Universal Time) https://www.computerhope.com/unix/signals.htm

#bash #linux
star

Sat Jun 06 2020 16:35:29 GMT+0000 (Coordinated Universal Time) chrome-extension://annlhfjgbkfmbbejkbdpgbmpbcjnehbb/images/saveicon.png

#bash
star

Thu Jun 04 2020 19:50:21 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/12293944/how-to-find-the-path-of-the-local-git-repository-when-i-am-possibly-in-a-subdire

#bash
star

Wed Jun 03 2020 13:35:00 GMT+0000 (Coordinated Universal Time)

#bash
star

Fri May 22 2020 13:55:18 GMT+0000 (Coordinated Universal Time) https://docs.cronhub.io/

#bash

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension