

from numpy import record
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.nn.functional import cross_entropy
from transformers import AdamW, get_constant_schedule_with_warmup, get_linear_schedule_with_warmup
import wandb
from tqdm import tqdm


from utils import to_cuda, compute_acc, compute_measures, print_measures, print_measures_simple


class Trainer():
    def __init__(self, config, model, foward_calculator, trainset_size, batch_size, warmup_ratio, report_to_wandb=True) -> None:
        self.config = config
        self.loss_fn = cross_entropy
        self.metrics_fn = compute_measures
        
        self.foward_calculator = foward_calculator

        self.trainset_size = trainset_size
        self.batch_size = batch_size

        self.report_to_wandb = report_to_wandb

        self.optim = AdamW(model.parameters(), 
                          lr=self.config['lr'],
                          weight_decay=self.config['weight_decay']
                          )
    
        training_step = int(self.config['epochs'] * (trainset_size / batch_size))
        warmup_step = int(warmup_ratio * training_step)  
        self.optim_schedule = get_linear_schedule_with_warmup(optimizer=self.optim, 
                                                            num_warmup_steps=warmup_step, 
                                                            num_training_steps=training_step)
        self.scaler = torch.cuda.amp.GradScaler(enabled=config['fp16'])
    
    def train(self, 
              model: nn.Module, 
              train_iter: DataLoader, 
              val_iter: DataLoader,
              class_balance: bool = False):
        model.train()
        if self.config["cuda"] and torch.cuda.is_available():
            model.cuda()

        best_res = [0, {"accuracy": 0, 
                        "bi_precision": 0, "bi_recall": 0, "bi_f1": 0, 
                        "micro_precision": 0, "micro_recall": 0, "micro_f1": 0, 
                        "macro_precision": 0, "macro_recall": 0, "macro_f1": 0, 
                        "weighted_precision": 0, "weighted_recall": 0, "weighted_f1": 0,
                        "auc": 0}]
        best_model = None
        step, early_stop_cnt = 0, 0
        logits, labels = [], []  # for print
        for epoch in tqdm(range(self.config["epochs"])):
            for batch in tqdm(train_iter):
                logit, loss = self.foward_calculator(batch, model, cuda=self.config['cuda'], class_balance=class_balance)

                self.optim.zero_grad()
                if self.config['fp16']:
                    self.scaler.scale(loss).backward()
                    self.scaler.unscale_(self.optim)

                else:
                    loss.backward()

                nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), max_norm=self.config['clip'])

                if self.config['fp16']:
                    self.scaler.step(self.optim)
                    self.scaler.update()
                else:
                    self.optim.step()
                self.optim_schedule.step()
                
                logits.append(logit)  # type: ignore
                labels.append(batch[2])  # type: ignore

                if step > 0 and step % self.config['print_every'] == 0:
                    print_logits = torch.cat(logits, dim=0) # type: ignore
                    print_labels = torch.cat(labels, dim=0).cuda() # type: ignore
                    print_loss, print_metrics = self.loss_fn(print_logits, print_labels), self.metrics_fn(print_logits, print_labels)  # type: ignore
                    print(f"--Epoch {epoch}, Step {step}, Loss {print_loss}")
                    print_measures_simple(print_loss, print_metrics)
                    logits, labels = [], []
                
                if self.config['eval_strategy'] == 'step' and epoch > 0 and step % self.config['eval_every'] == 0:
                    avg_loss, avg_metrics = self.evaluate(model, val_iter)
                    if self.report_to_wandb:
                        wandb.log({"val_loss": avg_loss, "val_macro_f1": avg_metrics['macro_f1'], "val_micro_f1": avg_metrics['micro_f1']})
                    res = [avg_loss, avg_metrics]
                    if avg_metrics['macro_f1'] >= best_res[1]['macro_f1']:   # type: ignore
                        best_res = res
                        best_model = model.cpu().state_dict()
                        model.cuda()
                        early_stop_cnt = 0
                    else:
                        early_stop_cnt += 1
                    print("--Best Evaluation: ")
                    print_measures_simple(best_res[0], best_res[1])
                    model.train()
                
                if epoch > 10 and self.config.get('early_stop', None) and early_stop_cnt >= self.config['early_stop']:
                    print("--early stopping, training finished.")
                    return best_res, best_model

                step += 1

            if self.config['eval_strategy'] == 'epoch':
                avg_loss, avg_metrics = self.evaluate(model, val_iter)
                if self.report_to_wandb:
                    wandb.log({"epoch": epoch, 
                    "val_loss": avg_loss, 
                    "val_accuracy": avg_metrics['accuracy'], 
                    "val_macro_f1": avg_metrics['macro_f1'], 
                    "val_micro_f1": avg_metrics['micro_f1']})
                res = [avg_loss, avg_metrics]
                if avg_metrics['macro_f1'] >= best_res[1]['macro_f1']:   # type: ignore
                    best_res = res
                    # best_model = model.cpu().state_dict()
                    model.cuda()
                    early_stop_cnt = 0
                else:
                    early_stop_cnt += 1
                print("--Best Evaluation: ")
                print_measures_simple(best_res[0], best_res[1])
                model.train()
            
            if self.config['train_by_dist']:
                train_iter.sampler.set_epoch(epoch)  # type: ignore
            
        if best_model is not None:  # type: ignore
            return best_res, best_model
        return best_res, model.cpu().state_dict()  # type: ignore

    # eval func
    def evaluate(self, model: nn.Module, eval_iter: DataLoader, save_file: str = "", save_title: str = ""):
        model.eval()

        logits, labels = [], []
        for batch in tqdm(eval_iter):
            logit = self.foward_calculator(batch, model, cuda=self.config['cuda'], evaluate=True)
            logits.append(logit)

            labels.append(batch[2])

        logits = torch.cat(logits, dim=0) # type: ignore
        labels = torch.cat(labels, dim=0).cuda()  # type: ignore
        loss, metrics = self.loss_fn(logits, labels), self.metrics_fn(logits, labels)  # type: ignore
        print("--Evaluation:")

        print_measures_simple(loss, metrics)
        if save_file != "":
            results = [save_title, avg_loss, avg_metrics.values()]  # type: ignore
            results = [str(x) for x in results]
            with open(save_file, "a+") as f:
                f.write(",".join(results) + "\n")  # type: ignore

        return loss, metrics  # type: ignore