from token_classifcation import LayoutLMv2ForTokenClassification
import torch
from configs.config import _C as config
from torch.optim import AdamW
from examples.dataloder import dataloder
from dataset.vocab import WordVocab
from seqeval.metrics import (classification_report,
                             f1_score,
                             precision_score,
                             recall_score)
import tqdm
import numpy as np
from handle_train_data import label_to_id
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
model = LayoutLMv2ForTokenClassification(config)
model.load_state_dict(torch.load("/mnt/myproject/pretrain/my-lm/saved/lmv2_trained.model19"), strict=False)

label_map = {label:ix for ix,label in label_to_id.items()}
def train():

    model.to(device)

    optimizer = AdamW(model.parameters(), lr=5e-5)
    train_dataloader = dataloder(config)
    global_step = 0
    num_train_epochs = 5
    t_total = len(train_dataloader) * num_train_epochs  # total number of training steps

    # put the model in training mode
    model.train()

    for epoch in range(num_train_epochs):
        data_iter = tqdm.tqdm(enumerate(train_dataloader),
                              desc="EP_%s:%d" % ("train", epoch),
                              total=len(train_dataloader),
                              bar_format="{l_bar}{r_bar}")
        for i, batch in data_iter:
            input_ids = batch["curpus_input"].to(device)
            bbox = batch["box_input"].to(device)
            image = batch['img_input'].to(device)
            labels = batch["curpus_label"].to(device)

            # forward pass
            outputs = model(input_ids=input_ids, bbox=bbox, image=image,
                            labels=labels)
            loss = outputs.loss

            # print loss every 100 steps
            if global_step % 100 == 0:
                print(f"Loss after {global_step} steps: {loss.item()}")
            # backward pass to get the gradients
            loss.backward()

            # print("Gradients on classification head:")
            # print(model.classifier.weight.grad[6,:].sum())

            # update
            optimizer.step()
            optimizer.zero_grad()
            global_step += 1

        val(epoch)

def val(epoch):
    eval_loss =0.0
    nb_eval_steps = 0
    preds = None
    out_label_ids = None
    pad_token_label_id = torch.nn.CrossEntropyLoss().ignore_index
    model.eval()
    val_dataloder = dataloder(config)
    val_iter = tqdm.tqdm(enumerate(val_dataloder),desc="EP_%s:%d"%("train",epoch))
    with torch.no_grad():
        for i, batch in val_iter:
            input_ids = batch["curpus_input"].to(device)
            bbox = batch["box_input"].to(device)
            image = batch['img_input'].to(device)
            labels = batch["curpus_label"].to(device)
            outputs = model(input_ids=input_ids, bbox=bbox, image=image,
                            labels=labels)
            tmp_eval_loss = outputs.loss
            logits = outputs.logits

            eval_loss += tmp_eval_loss.item()
            nb_eval_steps+=1

            if preds is None:
                preds = logits.detach().cpu().numpy()
                out_label_ids = labels.detach().cpu().numpy()
            else:
                preds = np.append(preds,logits.detach().cpu().numpy(),axis=0)

                out_label_ids = np.append(
                    out_label_ids,labels.detach().cpu().numpy(),axis = 0
                )
    eval_loss = eval_loss/nb_eval_steps

    preds = np.argmax(preds,axis=1)
    out_label_list=[[] for _ in range(out_label_ids.shape[0])]
    preds_list = [[] for _ in range(out_label_ids.shape[0])]
    print(preds)
    for i in range(out_label_ids.shape[0]):
        for j in range(out_label_ids.shape[1]):
            if out_label_ids[i, j] != pad_token_label_id:
                out_label_list[i].append(label_map[out_label_ids[i][j]])
                preds_list[i].append(label_map[preds[i]])
    results = {
        'loss':eval_loss,
        "precision":precision_score(out_label_list, preds_list),
        "recall":recall_score(out_label_list, preds_list),
        "f1": f1_score(out_label_list, preds_list),
    }
    print(results)


if __name__=="__main__":
    train()


