Snippets Collections
import random
import numpy as np
def create_square_array(n):
    return np.array([random.randint(0, n**2) for i in range(n*n)]).reshape(n,n)
create_square_array(8)
from google.colab import drive
drive.mount('/content/drive')
from PIL import Image  
import matplotlib.pyplot as plt
%matplotlib inline

img_paht = "/content/drive/MyDrive/Imagenes/"
file = img_paht+'6.jpg'

I = Image.open(file)
plt.imshow(I)
plt.title('Imagen_1'),plt.axis('off') 
plt.show()
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
root_dir = "/content/drive/My Drive/"
base_dir = root_dir + '[nombre_directorio_especifico]'
print(base_dir)
# Build a Generative Adversarial Network (GAN) - Deep Learning with PyTorch

import torch
import numpy as np
import matplotlib.pyplot as plt

## Configurations

device = 'cuda' # image = image.to(device)

batch_size = 128 # trainloader, training loop

noise_dim = 64 # generator model

# optimizer parameters
lr = 0.0002
beta_1 = 0.5
beta_2 = 0.99

# training variables
epochs = 50

## Load MNIST dataset

Loading the dataset from torch library

from torchvision import datasets, transforms as T

train_augs = T.Compose([
                        T.RandomRotation((-20, +20)),
                        T.ToTensor()
])

# image format -> (height, width, channel)
# tensor format of the image -> (channel, height, width)

trainset = datasets.MNIST('MNIST/', download = True, train=True, transform = train_augs)

trainset

image, label = trainset[50]

plt.imshow(image.squeeze(), cmap='gray')

## Load dataset into batches

from torch.utils.data import DataLoader
from torchvision.utils import make_grid

trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)

len(trainloader) # one batch size -> 60000/128

### Python iter()

The Python iter() function returns an iterator for the given object.

The iter() function creates an object which can be iterated one element at a time.

These objects are useful when coupled with loops like for loop, while loop.

The syntax of the iter() function is:

iter(object, sentinel)

dataiter = iter(trainloader)

images, _ = dataiter.next()

print(images.shape)

### Function to plot some images from a batch

def show_tensor_images(tensor_img, num_images=16, nrow=4, size=(1,28,28)):
  unflat_img = tensor_img.detach().cpu()
  img_grid = make_grid(unflat_img[:num_images], nrow=nrow)
  plt.imshow(img_grid.permute(1,2,0).squeeze())
  plt.show()

show_tensor_images(images, num_images=32, nrow=8)

# if we don't pass parameters, run with default values

## Create Discriminator Network

from torch import nn
from torchsummary import summary

from torch.nn.modules.activation import LeakyReLU
from torch.nn.modules.batchnorm import BatchNorm2d

def get_discriminator_block(in_channels,  out_channels, kernel_size, stride):
  return nn.Sequential(
      nn.Conv2d(in_channels, out_channels, kernel_size, stride),
      nn.BatchNorm2d(out_channels),
      nn.LeakyReLU(0.2)
  )


We're not using a sigmoid layer, because we'll use a binarycrossentropy with logic loss which takes raw outputs


class Discriminator(nn.Module): # inheriting from nn.Module class

  def __init__(self):

    # call super() in inheritance to access the parent class.
    # if not call super(), it will overide the __init__() by child class
    super(Discriminator, self).__init__() 

    self.block_1 = get_discriminator_block(1,16,(3,3),2)
    self.block_2 = get_discriminator_block(16,32,(5,5),2)
    self.block_3 = get_discriminator_block(32,64,(5,5),2)

    self.flatten = nn.Flatten()
    self.linear = nn.Linear(in_features=64, out_features=1)
    
  def forward(self, images):
    
    x1 = self.block_1(images)
    x2 = self.block_2(x1)
    x3 = self.block_3(x2)

    x4 = self.flatten(x3)
    x5 = self.linear(x4)

    return x5

D = Discriminator()
D.to(device)

summary(D, input_size=(1,28,28))

## Create Generator Network

def get_generator_block(in_channels, out_channels, kernel_size, stride, final_block=False):

  if final_block == True:
    return nn.Sequential(
        nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride),
        nn.Tanh()
    )
    
  return nn.Sequential(
      nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride),
      nn.BatchNorm2d(out_channels),
      nn.ReLU()
  )

class Generator(nn.Module):

  def __init__(self, noise_dim):
    super(Generator, self).__init__()

    self.noise_dim = noise_dim
    self.block_1 = get_generator_block(noise_dim, 256, (3,3), 2)
    self.block_2 = get_generator_block(256, 128, (4,4), 1)
    self.block_3 = get_generator_block(128, 64, (3,3), 2)
    
    self.block_4 = get_generator_block(64, 1, (4,4), 2, final_block=True)

  def forward(self, random_noise_vector):

    x = random_noise_vector.view(-1, self.noise_dim, 1, 1)

    x1 = self.block_1(x)
    x2 = self.block_2(x1)
    x3 = self.block_3(x2)
    x4 = self.block_4(x3)

    return x4


G = Generator(noise_dim)
G.to(device)

summary(G, input_size=(1, noise_dim))

### Replace random initialized weights to normal weights for the robust training


def weights_init(m):

  if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
    nn.init.normal_(m.weight, 0.0, 0.02)

  if isinstance(m, nn.BatchNorm2d):
    nn.init.normal_(m.weight, 0.0, 0.02)
    nn.init.constant_(m.bias, 0)

D = D.apply(weights_init)
G = G.apply(weights_init)

## Create Loss function and Optimizer

def real_loss(discriminator_prediction):
  criterion = nn.BCEWithLogitsLoss()
  ground_truth = torch.ones_like(discriminator_prediction)
  loss = criterion(discriminator_prediction, ground_truth)

  return loss

def fake_loss(discriminator_prediction):
  criterion = nn.BCEWithLogitsLoss()
  ground_truth = torch.zeros_like(discriminator_prediction)
  loss = criterion(discriminator_prediction, ground_truth)

  return loss

D_opt = torch.optim.Adam(D.parameters(), lr=lr, betas=(beta_1, beta_2))
G_opt = torch.optim.Adam(G.parameters(), lr=lr, betas=(beta_1, beta_2))

## Training Loop

from tqdm import tqdm

for i in range(epochs):

  total_d_loss = 0.0
  total_g_loss = 0.0

  for real_image, _ in tqdm(trainloader):

    real_image = real_image.to(device)
    noise = torch.randn(batch_size, noise_dim, device=device)

    # find loss and update weights for Discriminator

    D_opt.zero_grad()

    fake_image = G(noise)
    D_pred = D(fake_image)
    D_fake_loss = fake_loss(D_pred)

    D_pred = D(real_image)
    D_real_loss = real_loss(D_pred)

    D_loss = (D_fake_loss + D_real_loss) / 2

    total_d_loss += D_loss.item()

    D_loss.backward()
    D_opt.step()

    # find loss and update weights for Generator

    G_opt.zero_grad()
    noise = torch.randn(batch_size, noise_dim, device=device)

    fake_image = G(noise)
    D_pred = D(fake_image)
    G_loss = real_loss(D_pred)

    total_g_loss += G_loss.item()

    G_loss.backward()
    G_opt.step()


  avg_d_loss = total_d_loss / len(trainloader)
  avg_g_loss = total_g_loss / len(trainloader)

  print(f'Epoch: {i+1} | D_loss: {avg_d_loss} | G_loss: {avg_g_loss}')

  show_tensor_images(fake_image)



import transformers 
from transformers import BertModel, BertTokenizer
import torch

import copy
import pandas as pd
import numpy as np
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from tqdm import tqdm

from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report

from collections import defaultdict
from textwrap import wrap

from torch import nn, optim
from torch.utils import data

%matplotlib inline
%config InlineBackend.figure_format='retina'

sns.set(style='whitegrid', palette='muted', font_scale=1.2)

HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]

sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))

rcParams['figure.figsize'] = 6, 4

RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)

import torch
torch.cuda.is_available()

print('cuda available: ', torch.cuda.is_available())
print('device count: ', torch.cuda.device_count())
print('cuda current device: ', torch.cuda.current_device())
print('cude device name: ', torch.cuda.get_device_name(0))
print('allocated memory: ', torch.cuda.memory_allocated())
print('allocated memory: ', torch.cuda.memory_cached())

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
#Run this from cmd.
$ rclone authorize "onedrive"

# Download & Install Latest Setup
!curl https://rclone.org/install.sh | sudo bash

# Authenticate One Drive
!rclone config

# Mount One Drive
#To stream files we need to mount One Drive.
!sudo mkdir /content/onedrive
!nohup rclone --vfs-cache-mode writes mount onedrive: /content/onedrive &
from IPython.display import HTML, display

def set_css():
  display(HTML('''
  <style>
    pre {
        white-space: pre-wrap;
    }
  </style>
  '''))
get_ipython().events.register('pre_run_cell', set_css)
#--------------install pytorch geometric
!python -c "import torch; print(torch.version.cuda)"
!python -c "import torch; print(torch.__version__)"
# check above version and edit below accordingly

!pip install torch==1.9.0
!pip uninstall -y torch-scatter
!pip uninstall -y torch-sparse
!pip uninstall -y torch-cluster
!pip uninstall -y torch-geometric
!pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html
!pip install torch-sparse -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html
!pip install torch-cluster -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html
!pip install torch-spline-conv -f https://pytorch-geometric.com/whl/torch-1.9.0+cu102.html
!pip install torch-geometric

#--------------mount drive-------------------
from google.colab import drive
drive.mount('/content/drive')
### File path
TRAIN_ID_PATH = '/content/drive/MyDrive/folder/pytorch/train.csv'
from google.colab import drive
drive.mount('/content/drive/')
import sys
sys.path.insert(0,'/content/drive/MyDrive/')
train_df = pd.read_csv("/content/drive/MyDrive/train.csv")
star

Mon Oct 17 2022 09:04:51 GMT+0000 (Coordinated Universal Time)

#vscode #python #colab
star

Mon Oct 10 2022 10:05:57 GMT+0000 (Coordinated Universal Time)

#vscode #python #colab
star

Sat Oct 01 2022 03:27:29 GMT+0000 (Coordinated Universal Time)

#python #colab
star

Fri Sep 30 2022 06:30:36 GMT+0000 (Coordinated Universal Time)

#python #colab
star

Tue Apr 26 2022 02:12:16 GMT+0000 (Coordinated Universal Time) https://www.coursera.org/learn/deep-learning-with-pytorch-generative-adversarial-network/home/welcome

#colab #deeplearning #gan #mnist
star

Mon Apr 25 2022 07:51:01 GMT+0000 (Coordinated Universal Time)

#colab #torch #cuda #gpu
star

Sun Apr 24 2022 13:33:43 GMT+0000 (Coordinated Universal Time) https://www.youtube.com/watch?v=U6YPgARhRzA

#colab #onedrive #rclone
star

Thu Jan 20 2022 05:28:05 GMT+0000 (Coordinated Universal Time)

#colab
star

Wed Sep 01 2021 03:55:45 GMT+0000 (Coordinated Universal Time)

#pandas #nlp #colab
star

Fri Aug 13 2021 08:28:34 GMT+0000 (Coordinated Universal Time)

#colab #drive #data

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension