from datetime import datetime

import torch
import torch.nn.functional as F
from torch.cuda.amp import autocast as autocast

from tqdm import tqdm
import wandb

from util.metrics import Metrics
from .optim_schedule import TPLinkerOptimScheduler

class TPlinkerTrainer:
    def __init__(self, model, optim_scheduler:TPLinkerOptimScheduler, train_dataloader, 
                 label_weights=None, val_dataloader=None, logfreq=10) -> None:
        
        self._optim_scheduler = optim_scheduler
        self._train_dl = train_dataloader
        self._val_dl = val_dataloader
        self._label_weights = label_weights
        self._logfreq = logfreq
        
        self._h2t_metrics_fn = Metrics(2)
        self._h2h_t2t_metrics_fn = Metrics(3)
        
        time_postfix = datetime.now().strftime(r"%Y%m%d-%H%M%S")
        self._logger_db = wandb.init(project="TPLinker", name=f"TPlinker@{time_postfix}")
        
        cuda_condition = torch.cuda.is_available()
        self._device = torch.device("cuda:0" if cuda_condition else "cpu")
        self._model = model.to(self._device)
        
        self.global_epoch_num = 0
        self.global_step_num = 0
    
    def train(self):
        outputs = self.iterate(self._train_dl)
        return outputs

    
    def validate(self):
        outputs = self.iterate(self._val_dl, train=False)
        return outputs
    
    def save_checkpoint(self, filename):
        target = {
            "epoch_num": self.global_epoch_num,
            "scheduler_state": self._optim_scheduler.scheduler.state_dict(),
            "model":self._model.to("cpu")
        }
        torch.save(target, filename)
        self._model.to(self._device)
        return None
    
    def iterate(self, dataloader, train=True):
        
        # 统计当前信息
        current_epoch_num = self.global_epoch_num
        current_step_num = self.global_step_num
        
        # update
        if train:
            current_epoch_num += 1
            
        self._logger_db.log({"epoch_num":current_epoch_num, 
                             "tplinker_lr":self._optim_scheduler.get_lr()[0],
                             "bert_lr":self._optim_scheduler.get_lr()[1]})
        
        # 训练时显示一个进度条
        epoch_step_num = len(dataloader)
        data_iterator = tqdm(dataloader,f"Epoch-{current_epoch_num}", total=epoch_step_num)

        # 统计所有样本的信息
        total_loss = 0.0
        total_precision = 0.0
        total_recall = 0.0
        total_accuracy = 0.0
        total_f1 = 0.0

        for data in data_iterator:
            # update
            if train:
                current_step_num += 1

            labels = data.pop("labels")

            # move to gpu or cpu
            data = {key: value.to(self._device) for key, value in data.items()}
            labels = {key: value.to(self._device) for key, value in labels.items()}

            with autocast():
                model_outputs = self._model(**data)
            output_logits = model_outputs["logits"]
            # 计算loss
            h2t_loss = self.cross_entroy_loss(output_logits["h2t"], labels["h2t"], 
                                            None if self._label_weights is None else self._label_weights["h2t"].to(self._device))
            h2h_loss = self.cross_entroy_loss(output_logits["h2h"], labels["h2h"], 
                                            None if self._label_weights is None else self._label_weights["h2h"].to(self._device))
            t2t_loss = self.cross_entroy_loss(output_logits["t2t"], labels["t2t"], 
                                            None if self._label_weights is None else self._label_weights["t2t"].to(self._device))
            loss = (h2t_loss + h2h_loss + t2t_loss)/3

            # 需要优化
            if train:
                self._optim_scheduler.zero_grad()
                loss.backward()
                self._optim_scheduler.step()

            
            predictions = {
                "h2t":torch.argmax(output_logits["h2t"], 2),
                "h2h":torch.argmax(output_logits["h2h"], 3),
                "t2t":torch.argmax(output_logits["t2t"], 3),
            }

            # 性能度量， 这里度量的是当前Step的性能
            metrics_output = self.calc_metrics(predictions, labels)
            # 累加得到到目前位置所有step的性能
            total_precision += metrics_output["precision"]
            total_recall += metrics_output["recall"]
            total_accuracy += metrics_output["accuracy"]
            total_f1 += metrics_output["f1"]
            total_loss += loss.item()
            
            # 日志
            if current_step_num%self._logfreq == 0:
                data_iterator.write(f"\n--------------------------------------------------------------------------------------------------\n\
                    Epoch-{current_epoch_num}||Step-{current_step_num}\n\
                    Precision: {metrics_output['precision']*100:.3f}%,  Recall: {metrics_output['recall']*100:.3f}%\n\
                    Accuracy: {metrics_output['accuracy']*100:.3f}%,  F1-Score: {metrics_output['f1']*100:.3f}%\n\
                    Loss:{loss:.8f}\n\
                    --------------------------------------------------------------------------------------------------\n")
            # log step
            if train:
                step_logging_data = {
                    "step_num":current_step_num,
                    "train_step_precision":metrics_output["precision"],
                    "train_step_recall":metrics_output["recall"],
                    "train_step_accuracy":metrics_output["accuracy"],
                    "train_step_f1":metrics_output["f1"],
                    "train_step_loss":loss,
                }
                self._logger_db.log(step_logging_data)

        # log epoch
        if train:
            epoch_logging_data = {
                "epoch_num":current_epoch_num,
                "train_epoch_precision":total_precision/epoch_step_num,
                "train_epoch_recall":total_recall/epoch_step_num,
                "train_epoch_accuracy":total_accuracy/epoch_step_num,
                "train_epoch_f1":total_f1/epoch_step_num,
                "train_epoch_loss":total_loss/epoch_step_num,
            }
            # update global
            self.global_step_num = current_step_num
            self.global_epoch_num = current_epoch_num
            self._optim_scheduler.update_lr()
        else:
            epoch_logging_data = {
                "epoch_num":current_epoch_num,
                "validation_epoch_precision":total_precision/epoch_step_num,
                "validation_epoch_recall":total_recall/epoch_step_num,
                "validation_epoch_accuracy":total_accuracy/epoch_step_num,
                "validation_epoch_f1":total_f1/epoch_step_num,
                "validation_epoch_loss":total_loss/epoch_step_num,
            }

        self._logger_db.log(epoch_logging_data)
        
        return epoch_logging_data
    
    @staticmethod
    def cross_entroy_loss(pred_y, true_y, weights=None):
        pred_y = pred_y.view(-1, pred_y.size()[-1])
        true_y = true_y.view(-1)
        return F.cross_entropy(pred_y, true_y, weights)  # DONE
    
    def calc_metrics(self, predictions, labels):
        
        pred_h2t = predictions["h2t"]
        pred_h2h = predictions["h2h"]
        pred_t2t = predictions["t2t"]
    
        true_h2t = labels["h2t"]
        true_h2h = labels["h2h"]
        true_t2t = labels["t2t"]
        
        h2t_metrics = self._h2t_metrics_fn(pred_h2t, true_h2t)
        h2h_metrics = self._h2h_t2t_metrics_fn(pred_h2h, true_h2h)
        t2t_metrics = self._h2h_t2t_metrics_fn(pred_t2t, true_t2t)
        
        mean_precision = (h2t_metrics["precision"] + h2h_metrics["precision"] + t2t_metrics["precision"]) / 3
        mean_recall = (h2t_metrics["recall"] + h2h_metrics["recall"] + t2t_metrics["recall"])/3
        mean_accuracy = (h2t_metrics["accuracy"] + h2h_metrics["accuracy"] + t2t_metrics["accuracy"]) / 3
        mean_f1 = (h2t_metrics["f1"] + h2h_metrics["f1"] + t2t_metrics["f1"]) / 3
        
        return {
            "precision":mean_precision,
            "recall":mean_recall,
            "accuracy":mean_accuracy,
            "f1":mean_f1
        }
