from layoutlm import LayoutlmConfig, LayoutlmForTokenClassification 
from transformers import BertTokenizer,AdamW 
from torch.utils.data import DataLoader, RandomSampler 
import torch 
from tqdm import tqdm, trange

MODEL_CLASSES = { "layoutlm": (LayoutlmConfig, LayoutlmForTokenClassification, BertTokenizer), }

def train( train_dataset, model, tokenizer, labels, pad_token_label_id):

    """ Train the model """
    if torch.cuda.is_available():
    	device = torch.device("cuda") 
		print("GPU is available")
    else:
    	device = torch.device("cpu")
	    print("GPU is not available, using CPU instead")

    train_sampler = RandomSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, 		   batch_size=args.train_batch_size, collate_fn=None )
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer = AdamW( lr=learning_rate, eps=adam_epsilon)
    model = torch.nn.Module(model, find_unused_parameters=True)

    global_step, tr_loss = 0, 0.0
    model.zero_grad()
    train_iterator = trange(num_train_epochs, desc="Epoch")
    for in trainiterator:
    	epoch_iterator = tqdm(train_dataloader, desc="Iteration")
    	for step, batch in enumerate(epoch_iterator):
    		model.train()
    		inputs = {"input_ids": batch[0].to(device), "attention_mask": batch[1].to(device), 	"labels": batch[3].to(device)}
    		inputs["bbox"] = batch[4].to(device)
    		inputs["token_type_ids"] = batch[2].to(device)

    outputs = model(**inputs)
    loss = outputs[0]
    loss.backward()
    tr_loss += loss.item()

    optimizer.step() 
    # scheduler.step() # Update learning rate schedule
    model.zero_grad()
    global_step += 1

    return global_step, tr_loss / global_step