Snippets Collections
Run df -h to verify your root partition is full (100%)
Run lsblk and then lsblk -f to get block device details
sudo mount -o size=10M,rw,nodev,nosuid -t tmpfs tmpfs /tmp
sudo growpart /dev/DEVICE_ID PARTITION_NUMBER ex /dev/nvme0n1 1
lsblk to verify partition has expanded 
sudo resize2fs /dev/DEVICE_IDPARTITION_NUMBER ex /dev/nvme0n1p1
Run df -h to verify your resized disk
sudo umount /tmp
# AWS : aws-cli > 2.0

# CREATE AUTHORIZER !!!
# authorizer-name: Assign an authorizer name
# api-id: Http APIGateway ID
# audience: audience specified by the authorizer
# Issuer: Authorizer URL
aws apigatewayv2 create-authorizer \
    --name authorizer-name \
    --api-id api-id \
    --authorizer-type JWT \
    --identity-source '$request.header.Authorization' \
    --jwt-configuration Audience=audience,Issuer=https://cognito-idp.us-east-2.amazonaws.com/userPoolID


    # ADD LAMBDA (CUSTOM) AUTHORIZER TO ROUTE !!!
# api-id: Http APIGateway ID
# route-id: Gateway Route ID
# authorizer-id: You authorizer ID created by above command    
aws apigatewayv2 update-route \
   --api-id $api_id  \
   --route-id $route_id  \
   --authorization-type "CUSTOM" \
   --authorizer-id $auth_id    

   
# ADD JWT AUTHORIZER TO ROUTE !!!
# api-id: Http APIGateway ID
# route-id: Gateway Route ID
# authorizer-id: You authorizer ID created by above command
# authorization-scopes: if authorizer requires extra scopes
aws apigatewayv2 update-route \
   --api-id api-id  \
   --route-id route-id  \
   --authorization-type JWT \
   --authorizer-id authorizer-id \
   --authorization-scopes user.email   
Resources:
  HelloWorldFunction:
    Type: AWS::Serverless::Function
    Properties:
      PackageType: Image
      Architectures:
        - x86_64
      Policies:
        - AWSSecretsManagerGetSecretValuePolicy:
            SecretArn: !Sub "arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:openweatherapi-mSlPys"
      Events:
        HelloWorld:
          Type: Api
          Properties:
            Path: /hello
            Method: get
            
            

  FunctionName:
    Type: AWS::Serverless::Function
    Properties:
      CodeUri: function_name/
      Handler: app.lambda_handler
      Runtime: python3.9
      Architectures:
        - x86_64
      Policies:
        - AWSSecretsManagerGetSecretValuePolicy:
            SecretArn: !Sub "arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:openweatherapi-[xxxxxx]"

## aws sam Cloudwatch Alarm ######################
LambdaFunctionAlarm:
  Type: 'AWS::CloudWatch::Alarm'
  Properties:
    ActionsEnabled: true
    AlarmName: !Sub '${FunctionName}-alarm'
    AlarmActions:
      - !Sub 'arn:aws:sns:${AWS::Region}:${AWS::AccountId}:${SnsTopicName}'
    Namespace: 'AWS/Lambda'
    MetricName: Errors
    Dimensions:
        - Name: FunctionName
          Value: !Sub '${FunctionName}'
    Statistic: Average
    ComparisonOperator: GreaterThanOrEqualToThreshold
    Threshold: 1
    DatapointsToAlarm: 1
    Period: 5
    EvaluationPeriods: 1
phases:
  build:
    commands:
       - composer update --no-interaction --ignore-platform-req=ext-redis --ignore-platform-req=ext-redis
       - bin/console assets:install
       - bin/console pimcore:deployment:classes-rebuild -v -c
       - bin/console cache:clear
artifacts:
  files:
    - '**/*'
import boto3

# Create an S3 client
s3 = boto3.client('s3')

# Set the name of the S3 bucket and the file key
bucket_name = 'my-bucket'
file_key = 'path/to/file.txt'

# Use the S3 client to get the object
response = s3.get_object(Bucket=bucket_name, Key=file_key)

# Extract the file content from the response
file_content = response['Body'].read()

# Print the file content
print(file_content)
import boto3

# Create an S3 client
s3 = boto3.client('s3')

# Set the name of the S3 bucket and the folder prefix
bucket_name = 'my-bucket'
folder_prefix = 'path/to/folder/'

# Use the S3 client to list the objects in the bucket
result = s3.list_objects_v2(Bucket=bucket_name, Prefix=folder_prefix)

# Extract the list of files from the response
files = [content['Key'] for content in result.get('Contents', [])]

# Print the list of files
print(files)
git config --local credential.helper \
    '!aws codecommit credential-helper \
    --profile _profile_name_ $@'
git config --local credential.UseHttpPath true
import logging
import boto3
from botocore.exceptions import ClientError
import os


def upload_file(file_name, bucket, object_name=None):
    """Upload a file to an S3 bucket

    :param file_name: File to upload
    :param bucket: Bucket to upload to
    :param object_name: S3 object name. If not specified then file_name is used
    :return: True if file was uploaded, else False
    """

    # If S3 object_name was not specified, use file_name
    if object_name is None:
        object_name = os.path.basename(file_name)

    # Upload the file
    s3_client = boto3.client('s3')
    try:
        response = s3_client.upload_file(file_name, bucket, object_name)
    except ClientError as e:
        logging.error(e)
        return False
    return True
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
#
#~@filename: install-aws-cli.sh
#~@source: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html


curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
./aws/install --install-dir /usr/local/aws-cli --bin-dir /usr/local/bin

echo "#-------------------#
#        AWS        #
#-------------------#
BIN: $(which aws)
VERSION:  $(aws --version)
"
import * as nodemailer from "nodemailer";

//config nodemailer transporter using environment variables
const transporter = nodemailer.createTransport({
	host: `${process.env.AWS_SMTP_HOST}` ,
	auth: {
		user: `${process.env.AWS_ACCESS_KEY_ID}`,
		pass: `${process.env.AWS_SECRET_ACCESS_KEY}`
	}
});

export const emailSender = async (data) => {
  try {
    const response = await transporter.sendMail({
      from: process.env.EMAIL_FROM,
      to: process.env.EMAIL_TO || data.email,
      subject: process.env.EMAIL_SUBJECT || data.subject,
      html: `<DOCTYPE html>
            <html>
            <body>
			  ${data.message}
			</body>
			</html>
		`
    });
    return response?.messageId
      	? { success: true }
  		: { success: false, error: "Failed to send email" };
  } catch ( error ){
    console.log("ERROR", error.message);
    return { success: false, error: error.message };
  }
}
import logging
 
from botocore.exceptions import ClientError
import boto3
 
 
def create_bucket(bucket_name, region=None):
    """Create an S3 bucket in a specified region
    If a region is not specified, the bucket is created in the S3 default
    region (us-east-1).
    :param bucket_name: Bucket to create
    :param region: String region to create bucket in, e.g., 'us-west-2'
    :return: True if bucket created, else False
    """
 
    # Create bucket
    try:
        if region is None:
            s3_client = boto3.client('s3')
            s3_client.create_bucket(Bucket=bucket_name)
        else:
            s3_client = boto3.client('s3', region_name=region)
            location = {'LocationConstraint': region}
            s3_client.create_bucket(Bucket=bucket_name,
                                    CreateBucketConfiguration=location)
    except ClientError as e:
        logging.error(e)
        return False
    return True
import logging

from botocore.exceptions import ClientError
import boto3


def create_bucket(bucket_name, region=None):
    """Create an S3 bucket in a specified region
    If a region is not specified, the bucket is created in the S3 default
    region (us-east-1).
    :param bucket_name: Bucket to create
    :param region: String region to create bucket in, e.g., 'us-west-2'
    :return: True if bucket created, else False
    """

    # Create bucket
    try:
        if region is None:
            s3_client = boto3.client('s3')
            s3_client.create_bucket(Bucket=bucket_name)
        else:
            s3_client = boto3.client('s3', region_name=region)
            location = {'LocationConstraint': region}
            s3_client.create_bucket(Bucket=bucket_name,
                                    CreateBucketConfiguration=location)
    except ClientError as e:
        logging.error(e)
        return False
    return True
AWS_ACCESS_KEY_ID = your_access_key_id
AWS_SECRET_ACCESS_KEY = your_secret_access_key
AWS_STORAGE_BUCKET_NAME = 'sibtc-static'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
    'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
  
STATIC_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage
# Standard
import json  # Not jason.
import random  # Fair dice roll.
import time  # As if we could.
import uuid  # Not GUID!

# Boto3/Moto
import boto3  # Client factory of typing doom.
import moto  # Love and sanity for the masses.

queue_name = "EventQueue"
fifo_queue_name = "EventQueueSorted.fifo"
event_types = ["solid", "liquid", "gas"]

# Mock me if you must!
sqs_mocker = moto.mock_sqs()
sqs_mocker.start()

# Set up client.
sqs_client = boto3.client("sqs", region_name="us-east-1")

# Create standard queue for incoming events so that we can batch it up.
sqs_create_queue_result = sqs_client.create_queue(
    QueueName=queue_name,
)

queue_url = sqs_create_queue_result["QueueUrl"]

# Create FIFO queue and allow for deduplication using event ID from
# event provider.
sqs_create_fifo_queue_result = sqs_client.create_queue(
    QueueName=fifo_queue_name,
    Attributes={
        "FifoQueue": "true",  # Stringy boolean!
        "DeduplicationScope": "messageGroup",  # No comment!
        "FifoThroughputLimit": "perMessageGroupId",  # I said no comment!
    },
)

fifo_queue_url = sqs_create_fifo_queue_result["QueueUrl"]

# Make some fake records using fake timestamps. Ignore this awful.
records = [
    {
        "timestamp": int(
            (time.time() * 1000) + random.randint(-10000, 10000),
        ),
        "event_type": random.choice(event_types),
        "event_id": str(uuid.uuid1()),
    }
    for timestamp in range(10)
]

# Do the hokey pokey
random.shuffle(records)

for record in records:
    print(record)

# {'timestamp': 1628905019640, 'event_type': 'liquid', 'event_id': '1d6d987b-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905015766, 'event_type': 'liquid', 'event_id': '1d6d987f-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905019341, 'event_type': 'gas', 'event_id': '1d6d9881-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905012019, 'event_type': 'solid', 'event_id': '1d6d9880-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905026503, 'event_type': 'liquid', 'event_id': '1d6d987c-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905024388, 'event_type': 'gas', 'event_id': '1d6d9882-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905017491, 'event_type': 'gas', 'event_id': '1d6d9883-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905013437, 'event_type': 'solid', 'event_id': '1d6d987e-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905012744, 'event_type': 'solid', 'event_id': '1d6d987d-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905010779, 'event_type': 'gas', 'event_id': '1d6d987a-fca0-11eb-9b68-cbb48e88eb48'}

# Pretend we are uberkitten.io and we are sending you 10 out of order
# records

entries = []

for record in records:
    sqs_send_message_result = sqs_client.send_message(
        QueueUrl=queue_url,
        MessageBody=json.dumps(record),
    )

# ... Pretend we pooled up a bunch of messages over 300 seconds.

# With a Lambda based subscription we can pool thousands of messages
# over a very long period up to a 1MB payload... here in moto land
# we don't have the luxury but we can imitate.

sqs_receiver_message_result = sqs_client.receive_message(
    QueueUrl=queue_url,
    MaxNumberOfMessages=10,
)

# ... Pretend we are a LAMBDA that is now receiving a ton of messages.

# Note: This would usually be a "Records" event and use different key
# casing just because.. yeh.

messages = sqs_receiver_message_result["Messages"]

# Convert body back to JSON for all the messages before we go on.
for message in messages:
    message["DecodedBody"] = json.loads(message["Body"])

# I'm still undecided about MSG
messages.sort(key=lambda message: message["DecodedBody"]["timestamp"])

entries = []

# Iterate through messages and create a new bulk of entries from the
# newly sorted list.
for message in messages:
    record = message["DecodedBody"]
    entries.append(
        {
            "Id": str(uuid.uuid1()),
            "MessageBody": message["Body"],
            "MessageGroupId": record["event_type"],
            "MessageDeduplicationId": record["event_id"],
        }
    )

# Enqueue FiFoables.
sqs_send_message_batch_results = sqs_client.send_message_batch(
    QueueUrl=fifo_queue_url,
    Entries=entries,
)

# ... Pretend we pooled up the maximum batch size for a FIFO queue..
# which is not hard to pretend at all.

sqs_receiver_message_result = sqs_client.receive_message(
    QueueUrl=fifo_queue_url,
    MaxNumberOfMessages=10,
)

# ... Pretend we are ANOTHER LAMBDA that will be processing the "sorted"
# messages.

messages = sqs_receiver_message_result["Messages"]

records_by_event_type = {}

for message in messages:
    message["DecodedBody"] = json.loads(message["Body"])
    record = message["DecodedBody"]

    records_by_event_type.setdefault(record["event_type"], [])
    records_by_event_type[record["event_type"]].append(record)


# And now demonstrate how out of control an article script can get.
for event_type, records in records_by_event_type.items():
    print(event_type)
    for record in records:
        print(record)

# gas
# {'timestamp': 1628905010779, 'event_type': 'gas', 'event_id': '1d6d987a-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905017491, 'event_type': 'gas', 'event_id': '1d6d9883-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905019341, 'event_type': 'gas', 'event_id': '1d6d9881-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905024388, 'event_type': 'gas', 'event_id': '1d6d9882-fca0-11eb-9b68-cbb48e88eb48'}
# solid
# {'timestamp': 1628905012019, 'event_type': 'solid', 'event_id': '1d6d9880-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905012744, 'event_type': 'solid', 'event_id': '1d6d987d-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905013437, 'event_type': 'solid', 'event_id': '1d6d987e-fca0-11eb-9b68-cbb48e88eb48'}
# liquid
# {'timestamp': 1628905015766, 'event_type': 'liquid', 'event_id': '1d6d987f-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905019640, 'event_type': 'liquid', 'event_id': '1d6d987b-fca0-11eb-9b68-cbb48e88eb48'}
# {'timestamp': 1628905026503, 'event_type': 'liquid', 'event_id': '1d6d987c-fca0-11eb-9b68-cbb48e88eb48'}

# How dare you mock me!
sqs_mocker.stop()
docker login -u AWS -p <password> -e none https://<aws_account_id>.dkr.ecr.<region>.amazonaws.com
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Sid": "PublicReadGetObject",
            "Effect": "Allow",
            "Principal": "*",
            "Action": [
                "s3:GetObject"
            ],
            "Resource": [
                "arn:aws:s3:::Bucket-Name/*"
            ]
        }
    ]
}
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

class hadoop_hive {

  class deploy ($roles) {

    if ('hive-client' in $roles) {
      include hadoop_hive::client
    }

    if ('hive-metastore-server' in $roles) {
      include hadoop_hive::metastore
    }

    if ('hive-server2' in $roles) {
      include hadoop_hive::server2
      if ('hive-metastore-server' in $roles) {
        Class['Hadoop_hive::Metastore_server'] -> Class['Hadoop_hive::Server']
      }
    }

    if ('hive-hbase' in $roles) {
      include hadoop_hive::hbase
    }

    # Need to make sure local mysql server is setup correctly (in case hive is
    # using it) before initializing the schema
    if ('hive-client' or 'hive-metastore-server' or 'hive-server2' in $roles) {
      if ('mysql-server' in $roles) {
        Class['Bigtop_mysql::Server'] -> Exec<| title == 'init hive-metastore schema' |>
      }
    }
  }

  class client_package {
    package { "hive":
      ensure => latest,
    }
  }

  class hive_keytab {
    include hadoop_hive::client_package
    require kerberos::client
    kerberos::host_keytab { "hive":
      spnego => true,
      require => Package["hive"],
    }
  }

  class common_config ($hbase_master = "",
                       $hbase_zookeeper_quorum = "",
                       $kerberos_realm = "",
                       $server2_thrift_port = "10000",
                       $server2_thrift_http_port = "10001",
                       $hive_execution_engine = "mr",
                       $metastore_server_uris = [],
                       $metastore_database_type = 'postgres',
                       $metastore_database_host = $fqdn,
                       $metastore_database_port = '5432',
                       $metastore_database_name = 'hive',
                       $metastore_database_user = 'postgres',
                       $metastore_database_password = 'root1234',
                       $hdfs_uri = undef,
                       $hive_env_overrides = {},
                       $hive_site_overrides = {},
                       $hive_log4j2_overrides = {},
                       $hive_exec_log4j2_overrides = {},
                       $hive_beeline_log4j2_overrides = {},
                       $hive_parquet_logging_overrides = {},
                       $hiveserver2_site_overrides = {},
                       $hive_llap_daemon_log4j2_overrides = {},
                       $user_log_dir = undef,
                       $java_tmp_dir = undef,
                       $use_dynamodb = false,
                       $use_aws_hm_client = false,
                       $use_emr_goodies = false,
                       $use_emr_s3_select = false,
                       $use_kinesis = false,
                       $use_hudi = false) {
    include hadoop_hive::client_package
    if ($kerberos_realm and $kerberos_realm != "") {
      include hadoop_hive::hive_keytab
    }

    $sticky_dirs = delete_undef_values([$java_tmp_dir, $user_log_dir])

    file { $sticky_dirs :
      ensure => "directory",
      owner  => "root",
      group  => "root",
      mode   => "1777",
      require => Package['hive']
    }

    if ($use_dynamodb) {
      include emr_ddb::library

      file { '/usr/lib/hive/auxlib/emr-ddb-hive.jar':
        ensure  => link,
        target  => '/usr/share/aws/emr/ddb/lib/emr-ddb-hive.jar',
        tag     => 'hive-aux-jar',
        require => [Package['emr-ddb'], Package['hive']]
      }
    }

    if ($use_aws_hm_client) {
      include aws_hm_client::library

      file { '/usr/lib/hive/auxlib/aws-glue-datacatalog-hive2-client.jar':
        ensure  => link,
        target  => '/usr/share/aws/hmclient/lib/aws-glue-datacatalog-hive2-client.jar',
        tag     => 'hive-aux-jar',
        require => [Package['aws-hm-client'], Package['hive']]
      }

      file { '/usr/lib/hive/auxlib/hive-openx-serde.jar':
        ensure  => link,
        target  => '/usr/share/java/Hive-JSON-Serde/hive-openx-serde.jar',
        tag     => 'hive-aux-jar',
        require => [Package['aws-hm-client'], Package['hive']]
      }
    }

    if ($use_emr_s3_select) {
      include emr_s3_select::library

      file { '/usr/lib/hive/auxlib/emr-s3-select-hive-connector.jar':
        ensure  => link,
        target  => '/usr/share/aws/emr/s3select/lib/emr-s3-select-hive-connector.jar',
        tag     => 'hive-aux-jar',
        require => [Package['emr-s3-select'], Package['hive']]
      }
    }

    if ($use_emr_goodies) {
      include emr_goodies::library

      file { '/usr/lib/hive/auxlib/emr-hive-goodies.jar':
        ensure  => link,
        target  => '/usr/share/aws/emr/goodies/lib/emr-hive-goodies.jar',
        tag     => 'hive-aux-jar',
        require => [Package['emr-goodies'], Package['hive']]
      }
    }

    if ($use_kinesis) {
      include emr_kinesis::library

      file { '/usr/lib/hive/auxlib/emr-kinesis-hive.jar':
        ensure  => link,
        target  => '/usr/share/aws/emr/kinesis/lib/emr-kinesis-hive.jar',
        tag     => 'hive-aux-jar',
        require => [Package['emr-kinesis'], Package['hive']]
      }
    }

    if ($use_hudi) {
      include hudi::library

      file { '/usr/lib/hive/auxlib/hudi-hadoop-mr-bundle.jar':
        ensure  => link,
        target  => '/usr/lib/hudi/hudi-hadoop-mr-bundle.jar',
        tag     => 'hive-aux-jar',
        require => [Package['hudi'], Package['hive']]
      }
    }

    $metastore_database_url = generate_metastore_url(
      $metastore_database_type,
      $metastore_database_host,
      $metastore_database_port,
      $metastore_database_name
    )
    $metastore_database_driver_class = get_metastore_driver_class($metastore_database_type)
    $metastore_database_schema_type = get_metastore_schema_type($metastore_database_type)

    bigtop_file::site { '/etc/hive/conf/hive-site.xml':
      content => template('hadoop_hive/hive-site.xml'),
      overrides => $hive_site_overrides,
      require => Package['hive'],
    }

    bigtop_file::site { '/etc/hive/conf/hiveserver2-site.xml':
      content => template('hadoop_hive/hiveserver2-site.xml'),
      overrides => $hiveserver2_site_overrides,
      require => Package['hive'],
    }

    bigtop_file::properties { '/etc/hive/conf/hive-log4j2.properties':
      content => template('hadoop_hive/hive-log4j2.properties'),
      overrides => $hive_log4j2_overrides,
      require => Package['hive'],
    }

    bigtop_file::properties { '/etc/hive/conf/hive-exec-log4j2.properties':
      source => '/etc/hive/conf.dist/hive-exec-log4j2.properties.default',
      overrides => $hive_exec_log4j2_overrides,
      require => Package['hive'],
    }

    bigtop_file::properties { '/etc/hive/conf/beeline-log4j2.properties':
      source => '/etc/hive/conf.dist/beeline-log4j2.properties.default',
      overrides => $hive_beeline_log4j2_overrides,
      require => Package['hive'],
    }

    bigtop_file::properties { '/etc/hive/conf/parquet-logging.properties':
      source => '/etc/hive/conf.dist/parquet-logging.properties.default',
      overrides => $hive_parquet_logging_overrides,
      require => Package['hive'],
    }

    bigtop_file::properties { '/etc/hive/conf/llap-daemon-log4j2.properties':
      source => '/etc/hive/conf.dist/llap-daemon-log4j2.properties.default',
      overrides => $hive_llap_daemon_log4j2_overrides,
      require => Package['hive'],
    }

    bigtop_file::env { '/etc/hive/conf/hive-env.sh':
      overrides => $hive_env_overrides,
      content => template('hadoop_hive/hive-env.sh'),
      require => Package['hive'],
    }
    

    include hadoop_hive::init_metastore_schema
  }

  class client($hbase_master = "",
      $hbase_zookeeper_quorum = "",
      $hive_execution_engine = "mr") {

      include hadoop_hive::common_config
  }

  class server2 {
    include hadoop_hive::common_config

    package { 'hive-server2':
      ensure => latest,
    }

    service { 'hive-server2':
      ensure    => running,
      require   => [Package['hive'], Package['hive-server2'], Class['Hadoop_hive::Init_metastore_schema']],
      subscribe => [Bigtop_file::Site['/etc/hive/conf/hive-site.xml'], Bigtop_file::Env['/etc/hive/conf/hive-env.sh']],
      hasrestart => true,
      hasstatus => true,
    }
    Kerberos::Host_keytab <| title == "hive" |> -> Service["hive-server2"]
    Service <| title == "hive-metastore" |> -> Service["hive-server2"]
    File <| tag == 'hive-aux-jar' |> -> Service['hive-server2']
    Bigtop_file::Env <| title == '/etc/hadoop/conf/hadoop-env.sh' |> ~> Service['hive-server2']
    Bigtop_file::Site <| tag == 'hadoop-plugin' or title == '/etc/hadoop/conf/core-site.xml' |> ~> Service['hive-server2']
  }

  class metastore {
    include hadoop_hive::common_config

    package { 'hive-metastore':
      ensure => latest,
    }

    service { 'hive-metastore':
      ensure    => running,
      require   => [Package['hive'], Package['hive-metastore'], Class['Hadoop_hive::Init_metastore_schema']],
      subscribe => [Bigtop_file::Site['/etc/hive/conf/hive-site.xml'], Bigtop_file::Env['/etc/hive/conf/hive-env.sh']],
      hasrestart => true,
      hasstatus => true,
    }
    Kerberos::Host_keytab <| title == "hive" |> -> Service["hive-metastore"]
    File <| title == "/etc/hadoop/conf/core-site.xml" |> -> Service["hive-metastore"]
    File <| tag == 'hive-aux-jar' |> -> Service['hive-metastore']
    Bigtop_file::Env <| title == '/etc/hadoop/conf/hadoop-env.sh' |> ~> Service['hive-metastore']
    Bigtop_file::Site <| tag == 'hadoop-plugin' or title == '/etc/hadoop/conf/core-site.xml' |> ~> Service['hive-metastore']
  }

  class database_connector {
    include hadoop_hive::common_config

    case $common_config::metastore_database_type {
      'mysql': {
        mysql_connector::link {'/usr/lib/hive/lib/mysql-connector-java.jar':
          require => Package['hive'],
         }
      }
      'mariadb': {
         mariadb_connector::link {'/usr/lib/hive/lib/mariadb-connector-java.jar':
          require => Package['hive']
         }
      }
        'postgres': {
         postgresql_connector::link {'/usr/lib/hive/lib/postgresql-9.4.1208.jre7.jar':
          require => Package['hive']
         }
      }
      'derby': {
        # do nothing
      }
      default: {
        fail("$common_config::metastore_database_type is not supported. Supported database types are ", $common_config::supported_database_types)
      }
    }
  }

  class init_metastore_schema($init_schema = true, $skip_init_schema = false) {

    include hadoop_hive::common_config
    include hadoop_hive::database_connector
    
    if (! $skip_init_schema) {
      if ($init_schema) {
        exec { 'init hive-metastore schema':
          command   => "/usr/lib/hive/bin/schematool -dbType postgres -initSchema -verbose",
          require   => [Package['hive'], Class['Hadoop_hive::Database_connector']],
          subscribe => [Bigtop_file::Site['/etc/hive/conf/hive-site.xml'], Bigtop_file::Env['/etc/hive/conf/hive-env.sh']],
          logoutput => true,
          unless    => "/usr/lib/hive/bin/schematool -dbType postgres -info",
          tries     => hiera('hadoop::ha', 'disabled') ? {"auto" => 10, default => 1},
          try_sleep => 5,
        }
      } else {
        exec { 'get hive-metastore info':
          command   => "/usr/lib/hive/bin/schematool -dbType postgres -info",
          require   => [Package['hive'], Class['Hadoop_hive::Database_connector']],
          subscribe => [Bigtop_file::Site['/etc/hive/conf/hive-site.xml'], Bigtop_file::Env['/etc/hive/conf/hive-env.sh']],
          logoutput => true,
          tries     => 120,
          try_sleep => 5,
        }
      }
    }
  }

  class hbase {
    package { 'hive-hbase':
      ensure => latest,
    }
  }
}
[
    {
        "Classification": "hive-site",
        "Properties": {
            "javax.jdo.option.ConnectionUserName": "postgres",
            "javax.jdo.option.ConnectionDriverName": "org.postgresql.Driver",
            "javax.jdo.option.ConnectionPassword": "root1234",
            "javax.jdo.option.ConnectionURL": "jdbc:postgresql://database-1.cxv0eh6uhsan.us-east-1.rds.amazonaws.com:5432/hive_db"
        }
    },
    {
        "Classification": "hue-ini",
        "Properties": {},
        "Configurations": [
            {
                "Classification": "desktop",
                "Properties": {},
                "Configurations": [
                    {
                        "Classification": "database",
                        "Properties": {
                            "password": "root1234",
                            "engine": "postgres",
                            "port": "5432",
                            "host": "database-1.cxv0eh6uhsan.us-east-1.rds.amazonaws.com",
                            "name": "hue_db",
                            "user": "postgres"
                        }
                    }
                ]
            }
        ]
    },
    {
        "Classification": "oozie-site",
        "Properties": {
            "oozie.service.JPAService.jdbc.password": "root1234",
            "oozie.service.JPAService.jdbc.url": "jdbc:postgresql://database-1.cxv0eh6uhsan.us-east-1.rds.amazonaws.com:5432/oozie_db",
            "oozie.service.JPAService.jdbc.driver": "org.postgresql.Driver",
            "oozie.service.JPAService.jdbc.username": "postgres"
        }
    }
]
#!/bin/bash
sudo yum install -y gcc python-setuptools python-devel postgresql-devel
sudo easy_install psycopg2
sudo python -m pip install psycopg2-binary
sudo yum install postgresql -y
PGPASSWORD=root1234 psql --username=postgres --host=database-1.cxv0eh6uhsan.us-east-1.rds.amazonaws.com --command='CREATE DATABASE hive_db'
PGPASSWORD=root1234 psql --username=postgres --host=database-1.cxv0eh6uhsan.us-east-1.rds.amazonaws.com --command='CREATE DATABASE oozie_db'
PGPASSWORD=root1234 psql --username=postgres --host=database-1.cxv0eh6uhsan.us-east-1.rds.amazonaws.com --command='create database hue_db with lc_collate="en_US.UTF-8"'
cd /var/aws/emr/bigtop-deploy/puppet/modules/hadoop_hive/manifests
File="init.pp"
if [ -f "$File" ]; then
mkdir /home/hadoop/code
sudo mv "$File" /home/hadoop/code/
aws s3 cp s3://nkityd-bucket/init.pp /home/hadoop/
sudo cp /home/hadoop/init.pp "$File"
fi
echo "Successful"
package com.amazonaws.emr.knet;

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount {

  public static class TokenizerMapper
       extends Mapper<Object, Text, Text, IntWritable>{

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();

    public void map(Object key, Text value, Context context
                    ) throws IOException, InterruptedException {
    	
      StringTokenizer itr = new StringTokenizer(value.toString());
      while (itr.hasMoreTokens()) {
        word.set(itr.nextToken());
        context.write(word, one);
      }
    }
  }

  public static class IntSumReducer
       extends Reducer<Text,IntWritable,Text,IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values,
                       Context context
                       ) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      context.write(key, result);
    }
  }

  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = new Job(conf,"word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setCombinerClass(IntSumReducer.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
  <modelVersion>4.0.0</modelVersion>
  <groupId>com.amazonaws.emr</groupId>
  <artifactId>knet</artifactId>
  <version>0.0.1-SNAPSHOT</version>
  <name>firstMapReduce</name>
  
   <properties>
    <maven-compiler-plugin.version>3.1</maven-compiler-plugin.version>
    <java.version>1.8</java.version>
    <hadoop.version>2.8.3</hadoop.version>
  </properties>
  <dependencies>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-common</artifactId>
      <version>${hadoop.version}</version>
      <scope>provided</scope>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-mapreduce-client-common</artifactId>
      <version>${hadoop.version}</version>
      <scope>provided</scope>
  </dependency>
</dependencies>
<build>
          <plugins>
            <plugin>
              <artifactId>maven-compiler-plugin</artifactId>
              <version>${maven-compiler-plugin.version}</version>
              <configuration>
                <source>${java.version}</source>
                <target>${java.version}</target>
              </configuration>
            </plugin>
    </plugins>
  </build>
  
</project>
scp -i /directory/to/abc.pem /your/local/file/to/copy  user@ec2-xx-xx-xxx-xxx.compute-1.amazonaws.com:path/to/file
scp -i /directory/to/abc.pem user@ec2-xx-xx-xxx-xxx.compute-1.amazonaws.com:path/to/file  /your/local/directory/files/to/download
yarn node -list -showDetails
yarn application -kill application_id
yarn jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar pi 5 10
hadoop jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar terasort /nkityd/teragendata /nkityd/teragensorteddata/
hadoop jar /usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar teragen 10000000 /nkityd/teragendata
mysql -h ip-172-31-39-192.ec2.internal  -u hive -p 
beeline -u "jdbc:hive2://localhost:10000/default" -n hdfs
"""
A simple example demonstrating basic Spark SQL features using fictional
data inspired by a paper on determining the optimum length of chopsticks.
https://www.ncbi.nlm.nih.gov/pubmed/15676839
Run with:
  ./bin/spark-submit OptimumChopstick.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.storagelevel import StorageLevel
#rdd.persist(StorageLevel.MEMORY_ONLY_SER)
​
# Get avg Food pinching effeciency by length
def AvgEffeciencyByLength(df):
    meansDf = df.groupby('ChopstickLength').mean('FoodPinchingEffeciency').orderBy('avg(FoodPinchingEffeciency)',ascending=0)
    return meansDf
​
# init
spark = SparkSession.builder.appName("Optimum Chopstick").getOrCreate()
sc = spark.sparkContext
input_loc = "s3://llubbe-gdelt-open-data/ChopstickEffeciency/"
​
# Read input by line
lines = sc.textFile(input_loc)
parts = lines.map(lambda l: l.split(","))
parts.persist(StorageLevel.MEMORY_ONLY_SER)
# Each line is converted to a tuple.
chopstickItems = parts.map(lambda p: (str(p[0]), float(p[1]), int(p[2]), int(p[3].strip())))
​
# Define a schema
fields = [StructField("TestID", StringType()),
          StructField("FoodPinchingEffeciency", DoubleType()), 
          StructField("Individual", IntegerType()), 
          StructField("ChopstickLength", IntegerType())]
schema = StructType(fields)
​
# Apply the schema to the RDD
chopsticksDF = spark.createDataFrame(chopstickItems, schema)
​
effeciencyByLength = AvgEffeciencyByLength(chopsticksDF)
effeciencyByLength.distinct().count()
​
moar_chopsticksDF = spark.read.load(input_loc, format="csv", schema=schema)
moar_effeciencyByLength = AvgEffeciencyByLength(moar_chopsticksDF)
moar_effeciencyByLength.distinct().count()
​
spark.stop()
ssh -i ~/Documents/nkityd.pem hadoop@ec2-54-236-239-19.compute-1.amazonaws.com cat /etc/hive/conf/hive-site.xml | grep "thrift://" | sed 's/\<value>//g' | sed 's/\<\/value>//g' | awk '{print "HMS URI: " $1 }'
aws --profile default ec2 describe-images --owners self --query 'Images[*].[Tags[?Key==`ImageType`] | [0].Value]'
kubectl describe configmap -n kube-system aws-auth
cat << EoF > username_cred.sh
export AWS_SECRET_ACCESS_KEY=$(jq -r .AccessKey.SecretAccessKey /tmp/create_output.json)
export AWS_ACCESS_KEY_ID=$(jq -r .AccessKey.AccessKeyId /tmp/create_output.json)
EoF
aws iam create-access-key --user-name username | tee /tmp/create_output.json
aws iam create-user --user-name username
kubectl run frontend --image=nginx --port=80
kubectl get pods -o yaml | grep -C 5 labels:
aws sts get-caller-identity
star

Wed Jan 03 2024 05:06:30 GMT+0000 (Coordinated Universal Time)

#aws #ec2 #volume
star

Sat Jun 24 2023 18:14:13 GMT+0000 (Coordinated Universal Time)

#aws
star

Tue Mar 28 2023 15:24:21 GMT+0000 (Coordinated Universal Time)

#pimcore #aws #codebuild
star

Thu Jan 26 2023 15:50:57 GMT+0000 (Coordinated Universal Time)

#s3 #aws
star

Thu Jan 26 2023 15:39:27 GMT+0000 (Coordinated Universal Time)

#s3 #aws
star

Tue Jan 17 2023 10:29:34 GMT+0000 (Coordinated Universal Time)

#aws #codecommit #aws-cli
star

Fri Dec 02 2022 21:30:35 GMT+0000 (Coordinated Universal Time) https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html

#python #aws
star

Fri Dec 02 2022 08:28:24 GMT+0000 (Coordinated Universal Time) https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html

#aws #install #cli
star

Wed Nov 23 2022 16:29:54 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/11/cloudfront-function-and-association.html

#aws-cdk #aws #cloudfront #nodejs #typescript
star

Wed Nov 02 2022 16:30:52 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/11/add-custom-header-in-cloudfrontpass.html

#aws #cloudfront #custom #originrequest
star

Tue Oct 25 2022 07:30:09 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/static-website-hosting-using-s3.html

#aws #s3
star

Tue Oct 25 2022 07:16:32 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/getting-secrets-from-secret-manager.html

#secretmanager #aws-cdk #aws #sdk
star

Tue Oct 25 2022 07:05:26 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/unit-test-and-mocking-axios-jest.html

#aws #cdk #typescript #nodejs #axios #unittest
star

Sun Oct 23 2022 10:09:53 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/call-external-api-from-aws-lambda-using.html

#aws #cdk #typescript #nodejs
star

Mon Oct 17 2022 14:35:06 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/rest-api-vs-http-api-differences-aws.html

#aws-cdk #cdk #aws
star

Mon Oct 17 2022 14:34:18 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/deep-level-understanding-of-aws-cdk.html

#aws-cdk #cdk #aws
star

Mon Oct 17 2022 14:31:59 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/aws-cloudfront-function-aws-has.html

#aws-cdk #aws #cdk
star

Mon Oct 17 2022 14:30:59 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/replication-redis-cluster-mode-disabled.html

#aws-cdk #aws #cdk #redis #cluster
star

Mon Oct 17 2022 14:29:59 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/disadvantages-of-event-bridge.html

#aws-cdk #aws #cloud
star

Mon Oct 17 2022 14:29:13 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/aws-route53.html

#aws #cdk #cloud
star

Mon Oct 17 2022 14:28:12 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/

#aws #cdk
star

Sat Oct 15 2022 07:20:17 GMT+0000 (Coordinated Universal Time) https://docs.aws.amazon.com/cli/latest/reference/kinesis/list-streams.html

#kinesis #aws #list
star

Thu Oct 13 2022 12:35:44 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/find-security-group-based-on-name-aws.html

#aws #cdk #nodejs #typescript
star

Sat Oct 08 2022 08:35:55 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/pass-query-params-from-cloudfront-to.html

#aws #aws-cdk #nodejs #cloudfront
star

Fri Oct 07 2022 15:53:30 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/add-cloudfront-behavior-and-origin.html

#aws #aws-cdk #cloudfront #behavior
star

Fri Oct 07 2022 15:52:41 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/httpskuchbhilearning.blogspot.comimport-existing-vpc-in-aws-cdk.html

#aws #aws-cdk #vpc #nodejs #typescript
star

Wed Oct 05 2022 08:08:54 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/10/get-security-group-from-id-aws-cdk.html

#aws #aws-cdk #securitygroup #iam #typescript
star

Tue Oct 04 2022 11:32:44 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/attaching-lambda-secret-manager.html

#aws #aws-cdk #secret-manager #lambda #nodejs
star

Tue Oct 04 2022 11:31:08 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/aws-cdk-custom-constructs.html

#aws #aws-cdk #constructs
star

Tue Oct 04 2022 11:29:58 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/cdk-get-role-by-name.html

#aws #aws-cdk #iam #roles
star

Tue Oct 04 2022 11:27:49 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/add-policy-to-existing-bucket-aws-cdk.html

#aws-cdk #aws #s3 #iam #policy
star

Tue Oct 04 2022 11:26:02 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/api-gateway-custom-domain-using-aws-cdk.html

#apigateway #aws-cdk #aws #lambda #route53 #nodejs #restapi
star

Tue Oct 04 2022 11:23:39 GMT+0000 (Coordinated Universal Time) https://kuchbhilearning.blogspot.com/2022/09/creating-redis-cluster-enabled-using.html

#aws #aws-cdk #redis #elasticache
star

Mon Jan 31 2022 02:14:11 GMT+0000 (Coordinated Universal Time)

#python #aws #s3
star

Sun Jan 30 2022 16:43:36 GMT+0000 (Coordinated Universal Time)

#python #aws
star

Mon Oct 04 2021 19:48:40 GMT+0000 (Coordinated Universal Time) https://stackabuse.com/serving-static-files-in-python-with-django-aws-s3-and-whitenoise/

#django #aws #awss3
star

Sat Aug 14 2021 01:40:11 GMT+0000 (Coordinated Universal Time)

#aws #boto3 #sqs #fifo #sort
star

Wed Jul 07 2021 19:25:34 GMT+0000 (Coordinated Universal Time)

#docker #aws
star

Fri May 14 2021 20:30:25 GMT+0000 (Coordinated Universal Time)

#aws #policy
star

Tue Apr 20 2021 19:56:43 GMT+0000 (Coordinated Universal Time)

#aws #emr
star

Tue Apr 20 2021 19:56:06 GMT+0000 (Coordinated Universal Time) https://kumo-knowledge-ui-iad-prod.amazon.com/management/article_16118

#aws #emr
star

Tue Apr 20 2021 19:55:05 GMT+0000 (Coordinated Universal Time)

#aws #emr
star

Mon Apr 19 2021 20:08:18 GMT+0000 (Coordinated Universal Time)

#aws #emr #mr
star

Mon Apr 19 2021 20:07:49 GMT+0000 (Coordinated Universal Time)

#aws #emr #mr
star

Mon Apr 19 2021 13:40:51 GMT+0000 (Coordinated Universal Time)

#aws
star

Mon Apr 19 2021 13:40:00 GMT+0000 (Coordinated Universal Time)

#aws
star

Mon Apr 19 2021 12:47:53 GMT+0000 (Coordinated Universal Time)

#aws #emr #mr
star

Mon Apr 19 2021 12:46:57 GMT+0000 (Coordinated Universal Time)

#aws #emr #mr
star

Mon Apr 19 2021 12:45:30 GMT+0000 (Coordinated Universal Time)

#aws #emr #mr
star

Sat Apr 17 2021 19:46:14 GMT+0000 (Coordinated Universal Time)

#aws #emr
star

Sat Apr 17 2021 19:45:55 GMT+0000 (Coordinated Universal Time)

#aws #emr
star

Sat Apr 17 2021 19:45:22 GMT+0000 (Coordinated Universal Time)

#aws #emr #hms
star

Tue Apr 06 2021 19:00:02 GMT+0000 (Coordinated Universal Time)

#aws #emr
star

Thu Apr 01 2021 17:59:16 GMT+0000 (Coordinated Universal Time)

#aws #emr #spark
star

Thu Apr 01 2021 01:58:42 GMT+0000 (Coordinated Universal Time)

#aws #emr #hms
star

Thu Mar 18 2021 21:22:11 GMT+0000 (Coordinated Universal Time)

#aws #awscli #ec2 #describe-images
star

Sun Dec 06 2020 20:01:24 GMT+0000 (Coordinated Universal Time)

#aws #eks
star

Sun Dec 06 2020 19:51:47 GMT+0000 (Coordinated Universal Time)

#aws
star

Sun Dec 06 2020 19:50:52 GMT+0000 (Coordinated Universal Time)

#aws
star

Sun Dec 06 2020 19:50:19 GMT+0000 (Coordinated Universal Time)

#aws
star

Sun Nov 29 2020 18:18:30 GMT+0000 (Coordinated Universal Time)

#aws #ckad
star

Sun Nov 29 2020 16:32:36 GMT+0000 (Coordinated Universal Time)

#aws #ckad
star

Tue Nov 03 2020 17:36:11 GMT+0000 (Coordinated Universal Time)

#aws

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension