import os
import random
import torch
from torch.utils.data import DataLoader
import numpy as np

from ..models.modeling_utils import ModelConfig, ModelBase
from ..dataprocess.textprocess_utils import TextProcess
from ..utils import path_utils
from ..utils import log_utils
from ..optimizer.optimizer_utils import get_linear_schedule_with_warmup
from ..utils.adversarial_utils import FGM, PGD


def _setup_seed_(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


_setup_seed_(2022)


class TrainerConfig(ModelConfig):
    def __init__(self,
                 train_flag,  # 训练标记
                 dataset_dir=None,  # 数据集地址
                 vocab_file=None,  # 词表文件地址
                 model_config_file=None,  # 模型配置文件地址
                 continue_train=True,  # 继续训练，True 接着之前的模型继续训练
                 n_epochs=100,  # 训练周期数
                 early_stopping_patience=5,  # 提前停止
                 batch_size=16,  # 批次大小
                 warmup_steps=4000,
                 gradient_accumulation_steps=4,
                 max_grad_norm=2,  # 最大梯度
                 device='cuda',
                 log_n_step=100,  # 多少step 记录一次
                 save_model_n_step=-1,  # 多少步存储一次模型
                 only_save_best_model=True,
                 adversarial_train=None,  # FGM, PGD
                 adversarial_embed='word_embeddings',
                 adversarial_epsilon=1.0,
                 **kwargs):
        super(TrainerConfig, self).__init__(**kwargs)
        self.train_flag = train_flag
        self.dataset_dir = dataset_dir
        self.vocab_file = vocab_file
        self.model_config_file = model_config_file

        self.continue_train = continue_train
        self.n_epochs = n_epochs
        self.early_stopping_patience = early_stopping_patience
        self.batch_size = batch_size

        self.warmup_steps = warmup_steps
        self.gradient_accumulation_steps = gradient_accumulation_steps
        self.max_grad_norm = max_grad_norm
        self.device = device

        self.log_n_step = log_n_step
        self.save_model_n_step = save_model_n_step
        self.only_save_best_model = only_save_best_model

        self.adversarial_train = adversarial_train
        self.adversarial_embed = adversarial_embed
        self.adversarial_epsilon = adversarial_epsilon

        self._load_finish_()

    def _load_finish_(self):
        if (self.dataset_dir is not None) and (not os.path.isabs(self.dataset_dir)):
            self.dataset_dir = os.path.join(path_utils.project_dir, self.dataset_dir)
        if (self.vocab_file is not None) and (not os.path.isabs(self.vocab_file)):
            self.vocab_file = os.path.join(path_utils.project_dir, self.vocab_file)


class Trainer(object):
    """ 训练器 """
    def __init__(self,
                 trainer_config: TrainerConfig,
                 model: ModelBase,
                 data_process: TextProcess,
                 **kwargs
                 ):
        self.config = trainer_config
        self.model = model
        self.data_process = data_process

        log_file_path = path_utils.get_train_log_path(train_flag=self.config.train_flag)
        self.logger = log_utils.get_logger(file_path=log_file_path)

    def get_last_save_model(self) -> ModelBase:
        """ 获取最后存储模型 """
        last_save_model_path = path_utils.get_last_save_model_path(train_flag=self.config.train_flag)
        if last_save_model_path:
            self.logger.info(f"加载存储模型:{last_save_model_path}")
            self.model.load_state_dict(torch.load(last_save_model_path), strict=False)
        return self.model

    def _save_model_(self, model: ModelBase, epoch: int = None, best_model=False):
        if best_model:
            save_model_path = path_utils.get_train_best_model_path(train_flag=self.config.train_flag)
        else:
            save_model_path = path_utils.get_train_model_path(train_flag=self.config.train_flag, epoch=epoch)
        self.logger.info(f"保存模型参数:{save_model_path}")
        torch.save(model.state_dict(), save_model_path)

    def get_last_save_model_epoch_no(self):
        """ 最后保存模型的 epoch no """
        return path_utils.get_last_save_model_epoch_no(train_flag=self.config.train_flag)

    def batch_acc(self, output: torch.Tensor, batch: any, device: str) -> float:
        """ 计算周期的acc """
        raise NotImplementedError(f"子类实现 batch_acc 方法")

    def model_optimizer(self, model: ModelBase) -> torch.optim.Optimizer:
        """ 获取模型优化器 """
        raise NotImplementedError(f"子类实现 model_optimizer 方法")

    def test(self, model: ModelBase, device: str) -> None:
        """ 子类实现测试方法，结果写入日志即可 """
        raise NotImplementedError(f"子类实现 test 方法")

    def model_forward(self, model: ModelBase, batch: any, device: str) -> (torch.Tensor, torch.Tensor):
        """ 模型运行一个周期，返回 loss, output"""
        raise NotImplementedError(f"子类实现 model_forward 方法")

    def valid(self, model: ModelBase, val_loader: DataLoader, device: str) -> (float, float):
        """ 数据验证，返回损失值、准确率 """
        loss_list, acc_list = [], []
        try:
            with torch.no_grad():
                for batch in val_loader:
                    loss, output = self.model_forward(model=model, batch=batch, device=device)
                    acc = self.batch_acc(output=output, batch=batch, device=device)
                    loss_list.append(loss.item())
                    acc_list.append(acc)
        except RuntimeError as exception:
            if "out of memory" in str(exception):
                self.logger.info("WARNING: val out of memory")
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
            else:
                self.logger.info(str(exception))
                raise exception
        return np.mean(loss_list), np.mean(acc_list)

    def train_a_epoch(self,
                      model: ModelBase,
                      optimizer: torch.optim.Optimizer,
                      train_loader: DataLoader,
                      device: str,
                      epoch: int,
                      scheduler=None):
        self.logger.info(f"epoch:{epoch} 开始训练")
        loss_list, acc_list = [], []

        # ------------  对抗训练初始化 begin ------------- #
        # FGM, PGD
        if self.config.adversarial_train == 'FGM':
            adversarial = FGM(model=self.model,
                              emb_name=self.config.adversarial_embed,
                              epsilon=self.config.adversarial_epsilon)
        elif self.config.adversarial_train == 'PGD':
            adversarial = PGD(model=self.model,
                              emb_name=self.config.adversarial_embed,
                              epsilon=self.config.adversarial_epsilon)
        else:
            adversarial = None
        # ------------  对抗训练初始化 end ------------- #

        try:
            for i, batch in enumerate(train_loader):
                loss, output = self.model_forward(model=model, batch=batch, device=device)
                batch_acc = self.batch_acc(output=output, batch=batch, device=device)

                accumulation_loss = loss/self.config.gradient_accumulation_steps
                accumulation_loss.backward()

                # ------------  对抗训练 begin ------------- #
                if self.config.adversarial_train == 'FGM':
                    adversarial.attack()  # 在embedding上添加对抗扰动
                    loss_adv, _ = self.model_forward(model=model, batch=batch, device=device)
                    loss_adv.backward()  # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
                    adversarial.restore()  # 恢复embedding参数

                if self.config.adversarial_train == 'PGD':
                    adversarial.backup_grad()
                    adv_k = 3
                    for t in range(adv_k):
                        adversarial.attack(is_first_attack=(t == 0))  # 在embedding上添加对抗扰动, first attack时备份param.data
                        if t != adv_k - 1:
                            model.zero_grad()
                        else:
                            adversarial.restore_grad()
                        loss_adv, _ = self.model_forward(model=model, batch=batch, device=device)
                        loss_adv.backward()  # 反向传播，并在正常的grad基础上，累加对抗训练的梯度
                    adversarial.restore()  # 恢复embedding参数
                # ------------  对抗训练 end ------------- #

                torch.nn.utils.clip_grad_norm_(model.parameters(), self.config.max_grad_norm)

                if (i + 1) % self.config.gradient_accumulation_steps == 0:
                    optimizer.step()
                    if scheduler:
                        scheduler.step()
                    optimizer.zero_grad()

                # 记录log
                loss_list.append(loss.item())
                acc_list.append(batch_acc)
                if (i + 1) % self.config.log_n_step == 0:
                    # 记录logger
                    info_str = f"epoch:{epoch} n step:{i+1}/{len(train_loader)}"
                    info_str = info_str + f" loss:{np.mean(loss_list):.4f} acc:{np.mean(acc_list):.4f}"
                    self.logger.info(info_str)
                    loss_list, acc_list = [], []

                # 存储模型
                if (self.config.save_model_n_step > 0) and ((i + 1) % self.config.save_model_n_step == 0):
                    if not self.config.only_save_best_model:
                        self.logger.info(f"epoch:{epoch} n step:{i+1} 保存模型")
                        self._save_model_(model=model, epoch=epoch)

        except RuntimeError as exception:
            if "out of memory" in str(exception):
                self.logger.info("WARNING: train out of memory")
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
            else:
                self.logger.info(str(exception))
                raise exception
        self.logger.info(f"epoch:{epoch} 完成训练")

    def train(self):
        self.logger.info(f"开始训练...")
        if hasattr(self.model, "config_json_string"):
            self.logger.info(f"模型配置: {self.model.config_json_string()}")
        self.logger.info(f"训练配置: {self.config.to_json_string()}")

        if self.config.continue_train:
            self.model = self.get_last_save_model()
            start_epoch = self.get_last_save_model_epoch_no()
        else:
            start_epoch = 0

        self.logger.info(f"加载数据...")
        train_loader = self.data_process.dataloader(mode='train', batch_size=self.config.batch_size)
        val_loader = self.data_process.dataloader(mode='val', batch_size=self.config.batch_size)
        self.logger.info(f"加载数据完成.")

        # 保存模型参数
        config_path = path_utils.get_train_model_config_path(train_flag=self.config.train_flag)
        self.logger.info(f"保存模型模型配置:{config_path}")
        self.model.config.to_json_file(config_path)

        device = self.config.device
        if 'cuda' in device:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = self.model.to(device)

        self.logger.info(f"设备: {device}")
        self.logger.info(f"开始周期: {start_epoch}")

        optimizer = self.model_optimizer(self.model)

        t_total = self.config.n_epochs * len(train_loader)
        scheduler = get_linear_schedule_with_warmup(
            optimizer, num_warmup_steps=self.config.warmup_steps, num_training_steps=t_total
        )

        val_loss_list = []
        for epoch in range(start_epoch, self.config.n_epochs):

            self.model.train()
            self.train_a_epoch(model=self.model,
                               optimizer=optimizer,
                               train_loader=train_loader,
                               device=device,
                               epoch=epoch+1,
                               scheduler=scheduler)

            # 数据验证
            self.logger.info(f"验证数据开始...")
            self.model.eval()
            val_loss, val_acc = self.valid(model=self.model, val_loader=val_loader, device=device)
            self.logger.info(f"验证数据 loss:{val_loss:.4f}  acc:{val_acc:.4f}")

            # 数据测试
            self.logger.info(f"测试数据开始...")
            self.model.eval()
            self.test(model=self.model, device=device)
            self.logger.info(f"测试数据完成")

            # 保存模型
            if (len(val_loss_list) <= 0) or (np.min(val_loss_list) > val_loss):
                self._save_model_(model=self.model, best_model=True)

            if not self.config.only_save_best_model:
                self._save_model_(model=self.model, epoch=epoch+1)

            # 判断是否停止训练
            stop_train = _early_stopping_(cur_loss=val_loss,
                                          loss_list=val_loss_list,
                                          patience=self.config.early_stopping_patience)
            if stop_train:
                self.logger.info(f"提前停止训练  patience:{self.config.early_stopping_patience}")
                break
            else:
                val_loss_list.append(val_loss)
        self.logger.info(f"完成训练.")


def _early_stopping_(cur_loss, loss_list: [float], patience: int) -> bool:
    """ 判断是否停止训练 """
    if len(loss_list) < patience:
        return False
    if cur_loss < np.min(loss_list):
        return False
    if (len(loss_list) - np.argmin(loss_list)) > patience:
        return True
    return False



