import torch
from tqdm import tqdm

class CrfTrainer:
    def __init__(self, model, id2type,train_dataloader, val_dataloader) -> None:
        
        cuda_condition = torch.cuda.is_available()
        self._device = torch.device("cuda:0" if cuda_condition else "cpu")
        
        self._model = model.to(self._device)
        for n,p in self._model.named_parameters():
            if "bert" in n:
                p.requires_grad = False
        params_group = [{"params":[p for p in self._model.parameters() if p.requires_grad], "lr":1e-4}]
        self._optimizer = torch.optim.Adam(params_group, lr=1e-5)
        
        self._id2type = id2type

        self._train_dataloader = train_dataloader
        self._val_dataloader = val_dataloader
        self._global_step = 0

    def train(self,epoch):
        metrics = self.iterate(self._train_dataloader, epoch, True)
        return metrics
    
    def validate(self,epoch):
        metrics = self.iterate(self._val_dataloader, epoch, False)
        return metrics
        
    def iterate(self, dataloader, epoch, train=True):

        total_correct = 0
        total_pred = 0
        total_gold = 0
        total_loss = 0.0

        data_iterator = tqdm(dataloader, f"Epoch-{epoch}: ", total=len(dataloader))
        for data in data_iterator:
            golds = data["golds"]
            data = {key:value.to(self._device) for key,value in data.items() if "gold" not in key}

            if train:
                self._model.train()
                loss, emmi = self._model(data["input_ids"], data["attention_mask"], data["token_type_ids"], data["labels"])
                self._optimizer.zero_grad()
                loss.backward()
                self._optimizer.step()
                self._global_step += 1
            else:
                self._model.eval()
                loss, emmi = self._model(data["input_ids"], data["attention_mask"], data["token_type_ids"], data["labels"])
            total_loss += loss.item()
            
            pred_tags = self._model.crf.decode(emmi, data["attention_mask"], pad_tag=0)[0]
            pred_entities = self._model.decode(pred_tags, self._id2type)
            
            for gold_ent, pred_ent in zip(golds, pred_entities):
                pred_set = set(pred_ent)
                gold_set = set(gold_ent)
                correct_set = pred_set & gold_set
                
                total_correct += len(correct_set)
                total_pred += len(pred_set)
                total_gold += len(gold_set)

        precision = 0.0 if total_pred == 0 else total_correct / total_pred
        recall = 0.0 if total_gold == 0 else  total_correct / total_gold
        f1_score = 0.0 if precision == 0 else 2*precision*recall/(precision+recall)
        loss = total_loss / len(data_iterator)
        return {"precision":precision, "recall":recall, "f1-score":f1_score, "loss":loss}

    def dump_checkpoint(self, epoch, score, filename):

        dump_obj = {
            "epoch":epoch,
            "score":score,
            "checkpoint":self._model,
            "optimizer_status":self._optimizer.state_dict(),
        }

        torch.save(dump_obj, filename)
        return None
