import os
from abc import abstractmethod
from argparse import ArgumentParser
from tqdm.auto import tqdm
import numpy as np
import pandas as pd
import random
import warnings
warnings.filterwarnings("ignore")

import torch
from torch import nn
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler, SequentialSampler
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import get_cosine_schedule_with_warmup
from tensorboardX import SummaryWriter

from ..model import ModelEMA, FGM
from ..utils import check_version, worker_init_reset_seed


class Trainer:
    '''
        import pandas as pd
        import numpy as np
        import matplotlib.pyplot as plt
        plt.style.use("ggplot")

        import torch
        from torch import nn
        from torch.utils.data import Dataset, DataLoader

        import os
        import sys
        sys.path.insert(0, os.path.abspath("../../../.."))
        import fireoil
        from fireoil.data import MyFoldData
        from fireoil.trainer import Trainer

        class MyTrainer(Trainer):
            def compute_loss(self, model, batch, device, return_preds=False):
                return {"loss": loss, "preds": preds} if return_preds else {"loss":loss}

            def compute_metrics(self, preds, batch):
                # metric是平均后的值
                return {"metric": metric, "other": other}

        trainer = MyTrainer(args, model, train_dl, val_dl)
        trainer.train()

        # 默认参数设置
        'debug': True,
        'seed': 402,
        'deterministic': True,
        'gpus': 1,
        'train_batch_size_per_device': 16,
        'val_batch_size_per_device': 32,
        'num_workers': 4,
        'use_model_ema': True,
        'sync_bn': False,
        'resume': None,
        'use_transformer_arch': True,
        'learning_rate': 2e-05,
        'weight_decay': 0.01,
        'momentum': 0.9,
        'warmup_ratio': 0.2,
        'gradient_accumulate_steps': 1,
        'num_epochs': 6,
        'use_clip_norm': True,
        'max_norm': 10.0,
        'use_mixed_precision_training': False,
        'use_fgm_training': False,
        'fgm_emb_str': 'emb.',
        'logdir': None,
        'checkpoint_style': 'max',
        'saved_model_path': './trained_weights'

        # FGM慎用，经验上收敛慢且结果不好。
        # 梯度累计也慎用，经验上结果不好。
        # 对于transformers结构模型，lr使用默认2e-5即可，对于CNN，需要修改参数为1e-3到1e-1之间。
    '''
    def __init__(self, args, model, train_dl, val_dl):
        self.args = self.set_args(args)
        self.model = model
        self.train_dl = train_dl
        self.val_dl = val_dl
        self.set_training_factors()

    @classmethod
    def get_default_args(cls):
        parser = ArgumentParser(description="get default arguments")
        args = parser.parse_args([]) # 由于经常会被Notebook调用，因此这里传入空列表参数
        args = cls.set_args(cls, args)
        return args

    @classmethod
    def get_cnn_default_args(cls):
        parser = ArgumentParser(description="get cnn default arguments")
        args = parser.parse_args([]) # 由于经常会被Notebook调用，因此这里传入空列表参数
        args = cls.set_args(cls, args)
        args.num_epochs = 30
        args.learning_rate = 1e-2
        args.model_architeture = "cnn"
        return args

    def set_args(self, args):
        # seed
        args.seed = getattr(args, "seed", 402)
        args.deterministic = getattr(args, "deterministic", True)
        # env
        args.gpus = getattr(args, "gpus", 1)
        args.debug = getattr(args, "debug", True)
        # data
        args.num_workers = getattr(args, "num_workers", 16)
        args.batch_size = getattr(args, "batch_size", 16)
        # model
        args.use_model_ema = getattr(args, "use_model_ema", True)
        args.sync_bn = getattr(args, "sync_bn", False)
        args.resume = getattr(args, "resume", None)
        # optimizer
        args.model_architeture = getattr(args, "model_architeture", "transformer")
        args.learning_rate = getattr(args, "learning_rate", 2e-5)
        args.weight_decay = getattr(args, "weight_decay", 1e-2)
        args.momentum = getattr(args, "momentum", 0.9)
        args.warmup_ratio = getattr(args, "warmup_ratio", 0.2)
        args.gradient_accumulate_steps = getattr(args, "gradient_accumulate_steps", 1)
        # trainer
        args.num_epochs = getattr(args, "num_epochs", 6)
        args.use_clip_norm = getattr(args, "use_clip_norm", True)
        args.max_norm = getattr(args, "max_norm", 10.)
        args.use_mixed_precision_training = getattr(args, "use_mixed_precision_training", False)
        args.use_fgm_training = getattr(args, "use_fgm_training", False) # 使用fgm对抗训练
        args.fgm_emb_str = getattr(args, "fgm_emb_str", "emb.") # 在layer name含有emb.的层上做对抗训练
        # logger
        args.logdir = getattr(args, "logdir", None)
        # checkpoint
        # "max“模式是指指标越大越好(evaluate_one_epoch返回值越大越好，一般指准确率。如果是loss，则是越小越好)
        # 经验中transformer和cnn，取6轮和30轮时的效果最好，如果是语言模型的话，尽可能多的轮数进行训练
        args.checkpoint_style = getattr(args, "checkpoint_style", "max")
        args.saved_model_path = getattr(args, "saved_model_path", "./trained_weights")
        return args

    def set_training_factors(self):
        self.set_seed()
        self.set_env()
        self.set_model()
        self.set_data()
        self.set_optimizer_scheduler()
        self.set_logger()
        self.set_checkpoint()

    def set_seed(self):
        seed = self.args.seed

        seed_max, seed_min = np.iinfo(np.uint32).max, np.iinfo(np.uint32).min
        if seed > seed_max or seed < seed_min:
            seed = random.randint(seed_min, seed_max)

        random.seed(seed)
        np.random.seed(seed)

        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        if self.args.deterministic:
            torch.backends.cudnn.deterministic = True
            torch.backends.cuda.benchmark = False

        self.args.seed = seed

    def set_env(self):
        self.world_size = int(os.getenv('WORLD_SIZE', 1))
        self.local_rank = int(os.getenv('LOCAL_RANK', -1))
        self.rank = int(os.getenv('RANK', -1))
        self.is_distributed = (self.world_size > 1)
        if self.is_distributed:
            torch.cuda.set_device(self.local_rank)
            device = torch.device("cuda", self.local_rank)
            dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
        else:
            device = torch.device("cuda")
        self.device = device

    def close_env(self):
        if self.is_distributed:
            dist.destroy_process_group()

    def set_model(self):
        if self.args.resume:
            ckpt = torch.load(self.args.resume, map_location="cpu")
            self.model.load_state_dict(ckpt, strict=False)
            print("use resume model state_dict(): %s" % (self.args.resume))
        if self.is_distributed:
            if check_version(torch.__version__, '1.11.0'):
                model = DDP(self.model, device_ids=[self.local_rank], output_device=self.local_rank, static_graph=True)
            else:
                model = DDP(self.model, device_ids=[self.local_rank], output_device=self.local_rank)
            if self.args.sync_bn:
                self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

        self.model.to(self.device)

        if self.rank in {-1, 0}:
            self.ema = ModelEMA(self.model) if self.args.use_model_ema else None

        if self.args.use_fgm_training:
            self.fgm = FGM(self.model)

    def set_data(self):
        '''
            train_dl = DataLoader(
                self.train_ds, batch_size=args.batch_size, pin_memory=True,
                shuffle=True, num_workers=args.num_workers, collate_fn = data_collator
            )
            self.val_dl = DataLoader(
                self.val_ds, batch_size=args.batch_size*2, pin_memory=True,
                shuffle=False, num_workers=args.num_workers, collate_fn=.data_collator
            )
        '''
        if self.is_distributed:
            dataset = self.train_dl.dataset
            collate_fn = self.train_dl.collate_fn
            train_sampler = DistributedSampler(dataset, shuffle=True)
            self.train_dl = DataLoader(
                dataset, sampler=train_sampler, pin_memory=True, num_workers=self.args.num_workers,
                collate_fn=collate_fn, worker_init_fn=worker_init_reset_seed, drop_last=False,
                batch_size = self.args.batch_size
            )

        self.val_dl.worker_init_fn = worker_init_reset_seed

    def set_optimizer_scheduler(self):
        if self.args.model_architeture.lower() == "transformer":
            # Optimizer
            # Split weights in two groups, one with weight decay and the other not.
            no_decay = ["bias", "LayerNorm.weight"]
            optimizer_grouped_parameters = [
                {
                    "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
                    "weight_decay": self.args.weight_decay,
                },
                {
                    "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
                    "weight_decay": 0.0,
                },
            ]
            optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate)
        elif self.args.model_architeture.lower() == "cnn":
            g = [], [], []  # optimizer parameter groups
            bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k)  # normalization layers, i.e. BatchNorm2d()
            for v in self.model.modules():
                if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):  # bias
                    g[2].append(v.bias)
                if isinstance(v, bn):  # weight (no decay)
                    g[1].append(v.weight)
                elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
                    g[0].append(v.weight)

            optimizer = torch.optim.SGD(g[2], lr=self.args.learning_rate, momentum=self.args.momentum, nesterov=True)
            optimizer.add_param_group({'params': g[0], 'weight_decay': self.args.weight_decay})  # add g0 with weight_decay
            optimizer.add_param_group({'params': g[1]})  # add g1 (BatchNorm2d weights)
            del g
        elif self.args.model_architeture.lower() == "for adamw":
            optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.learning_rate,
                                          weight_decay=self.args.weight_decay)
        else:
            optimizer = torch.optim.SGD(self.model.parameters(),
                                        lr=self.args.learning_rate,
                                        weight_decay=self.args.weight_decay,
                                        momentum=self.args.momentum, nesterov=True)

        num_training_steps = self.args.num_epochs * len(self.train_dl)
        num_warmup_steps = int(self.args.warmup_ratio * num_training_steps) 
        scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps)

        self.optimizer = optimizer
        self.scheduler = scheduler

    def set_logger(self):
        self.logger = SummaryWriter(self.args.logdir)

    def set_checkpoint(self):
        os.makedirs(self.args.saved_model_path, exist_ok=True)
        if self.args.checkpoint_style == "max":
            self.best_metric = 0.
        else:
            self.best_metric = np.inf

    @abstractmethod
    def compute_loss(self, model, batch, device, return_preds=False):
        raise RuntimeError("you should implement compute_loss from class Trainer")
        # return {"loss": loss, "preds": preds} if return_preds else {"loss":loss}

    @abstractmethod
    def compute_metrics(self, preds, batch):
        raise RuntimeError("you should implement compute_metrics from class Trainer")
        # return {"metric": metric, "other": other, ...} # metric必须是平均之后的

    def train(self):
        if self.rank in {-1, 0}:
            print(self.args)

        print_dict = {}
        self.optimizer.zero_grad(set_to_none=True)
        for epoch in range(self.args.num_epochs):
            self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.use_mixed_precision_training)
            if self.rank in {-1, 0}:
                train_results = self.train_one_epoch(epoch)
                val_results = self.evaluate_one_epoch(epoch)
                self.save_checkpoint(val_results)

                if epoch == 0:
                    print_dict["epoch"] = [epoch]
                    print_dict["train_loss"] = [train_results["loss_item"]]
                    for k, v in val_results.items():
                        if k == "loss":
                            print_dict["val_loss"] = [v]
                        else:
                            print_dict[k] = [v]
                else:
                    print_dict["epoch"].append(epoch)
                    print_dict["train_loss"].append(train_results["loss_item"])
                    for k, v in val_results.items():
                        if k == "loss":
                            print_dict["val_loss"].append(v)
                        else:
                            print_dict[k].append(v)

                print_df = pd.DataFrame(print_dict)
                print(print_df)
                print_df.to_csv(os.path.join(self.args.saved_model_path, "training_results.csv"), index=None)
            else:
                self.train_one_epoch(epoch)
        
        if self.rank in {-1, 0}:
            self.save_checkpoint(val_results, is_last=True)
            print_df.to_csv(os.path.join(self.args.saved_model_path, "training_results.csv"), index=None)

        torch.cuda.empty_cache()
        if self.is_distributed:
            self.close_env()

    def training_step(self, batch, batch_idx):
        if self.args.use_fgm_training:
            # 正常训练
            with torch.cuda.amp.autocast(enabled=self.args.use_mixed_precision_training):
                loss_item = self.compute_loss(self.model, batch, self.device, return_preds=False)
            if "loss" not in loss_item.keys():
                raise RuntimeError("loss key is not returned from compute_loss.")
            loss = loss_item["loss"] / self.args.gradient_accumulate_steps / 2. # 除以2.是因为进行了两次backward，梯度算了两次，取平均吧
            self.scaler.scale(loss).backward()
            if self.args.use_clip_norm:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.args.max_norm)

            # 攻击layer name中包含self.args.fgm_emb_str的层
            self.fgm.attack(emb_name=self.args.fgm_emb_str)

            # if (batch_idx + 1) % self.args.gradient_accumulate_steps == 0 or batch_idx >= len(self.train_dl) - 1:
            #     self.optimizer.zero_grad(set_to_none=True) # 如果要累加梯度，加上注释

            # 再次训练
            with torch.cuda.amp.autocast(enabled=self.args.use_mixed_precision_training):
                loss_item = self.compute_loss(self.model, batch, self.device, return_preds=False)
            if "loss" not in loss_item.keys():
                raise RuntimeError("loss key is not returned from compute_loss.")
            loss = loss_item["loss"] / self.args.gradient_accumulate_steps / 2.
            self.scaler.scale(loss).backward()
            if self.args.use_clip_norm:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.args.max_norm)

            # 恢复被攻击参数
            self.fgm.restore(emb_name=self.args.fgm_emb_str)

            if (batch_idx + 1) % self.args.gradient_accumulate_steps == 0 or batch_idx >= len(self.train_dl) - 1:
                self.scaler.step(self.optimizer)  # optimizer.step
                self.scaler.update()
                self.optimizer.zero_grad(set_to_none=True)
                if self.args.use_model_ema:
                    self.ema.update(self.model)
        else:
            with torch.cuda.amp.autocast(enabled=self.args.use_mixed_precision_training):
                loss_item = self.compute_loss(self.model, batch, self.device, return_preds=False)
            if "loss" not in loss_item.keys():
                raise RuntimeError("loss key is not returned from compute_loss.")
            loss = loss_item["loss"] / self.args.gradient_accumulate_steps
            self.scaler.scale(loss).backward()
            if self.args.use_clip_norm:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.args.max_norm)

            if (batch_idx + 1) % self.args.gradient_accumulate_steps == 0 or batch_idx >= len(self.train_dl) - 1:
                self.scaler.step(self.optimizer)  # optimizer.step
                self.scaler.update()
                self.optimizer.zero_grad(set_to_none=True)
                if self.args.use_model_ema:
                    self.ema.update(self.model)

        return loss_item["loss"].detach().cpu().numpy()
    
    def train_one_epoch(self, epoch):
        if self.rank in {-1, 0}:
            loss_accumulator = 0.
            number_of_batch = 0.

        if self.is_distributed:
            self.train_dl.sampler.set_epoch(epoch)

        self.model.train()
        tbar = tqdm(self.train_dl)
        for batch_idx, batch in enumerate(tbar):
            loss_item = self.training_step(batch, batch_idx)

            self.scheduler.step() # 学习率更新

            if self.rank in {-1, 0}:
                loss_accumulator += loss_item
                number_of_batch += 1
                tbar.set_description_str("[TRAIN Epoch %d/%d]"%(epoch, self.args.num_epochs-1))
                tbar.set_postfix_str("loss: %.4f, lr: %.8f"%(loss_accumulator/number_of_batch, self.scheduler.get_last_lr()[0]))
                self.logger.add_scalar("train/loss_step", loss_accumulator/number_of_batch, batch_idx + epoch * len(self.train_dl))
                self.logger.add_scalar("learning rate", self.scheduler.get_last_lr()[0], batch_idx + epoch * len(self.train_dl))

            if self.args.debug:
                break
                
        torch.cuda.empty_cache()

        if self.rank in {-1, 0}:
            self.logger.add_scalar("train/loss_epoch", loss_accumulator/number_of_batch, epoch)
            return {"loss_item": loss_accumulator/number_of_batch}

    def evaluate_one_epoch(self, epoch):
        if self.args.use_model_ema:
            eval_model = self.ema.ema
        else:
            eval_model = self.model

        # self.rank 已经 in {-1, 0}了
        number_of_batch = 0.
        metric_accumulate_dict = {}
        eval_model.eval()
        tbar = tqdm(self.val_dl)
        for batch_idx, batch in enumerate(tbar):
            with torch.no_grad():
                loss_dict = self.compute_loss(eval_model, batch, self.device, return_preds=True)
            if "preds" not in loss_dict.keys():
                raise RuntimeError("preds must be in the loss_dict.")
            
            metrics_dict = self.compute_metrics(loss_dict["preds"], batch)
            if "metric" not in metrics_dict.keys():
                raise RuntimeError("metric must be in the metrics_dict.")

            if batch_idx == 0:
                for k, v in metrics_dict.items():
                    metric_accumulate_dict[k] = v
                metric_accumulate_dict["loss"] = loss_dict["loss"].detach().cpu().numpy()
            else:
                for k, v in metrics_dict.items():
                    metric_accumulate_dict[k] += v
                metric_accumulate_dict["loss"] += loss_dict["loss"].detach().cpu().numpy()
            number_of_batch += 1
            
            tbar.set_description_str("[VALID Epoch %d/%d]"%(epoch, self.args.num_epochs-1))
            tbar.set_postfix_str("loss: %.4f, metric: %.4f"%(metric_accumulate_dict["loss"]/number_of_batch, \
                metric_accumulate_dict["metric"]/number_of_batch))
            for k, v in metric_accumulate_dict.items():
                self.logger.add_scalar("valid/%s_step"%k, v/number_of_batch, batch_idx + epoch * len(self.val_dl))

            if self.args.debug:
                break

        for k, v in metric_accumulate_dict.items():
                metric_accumulate_dict[k] = v / number_of_batch

        for k, v in metric_accumulate_dict.items():
            self.logger.add_scalar("valid/%s_epoch"%k, v, epoch)
        torch.cuda.empty_cache()
        return metric_accumulate_dict

    def save_checkpoint(self, metrics=None , is_last=False):
        if self.args.use_model_ema:
            save_model = self.ema.ema
        else:
            save_model = self.model

        os.makedirs(self.args.saved_model_path, exist_ok=True)

        if is_last:
            save_filepath = os.path.join(self.args.saved_model_path, "last.pt")
            torch.save(save_model.module.state_dict() if hasattr(save_model, "module") \
                       else save_model.state_dict(), save_filepath)
        else:
            metric = metrics["metric"]
            save_filepath = os.path.join(self.args.saved_model_path, "best.pt")
            if self.args.checkpoint_style == "max":
                if self.best_metric < metric:
                    self.best_metric = metric
                    torch.save(save_model.module.state_dict() if hasattr(save_model, "module") \
                       else save_model.state_dict(), save_filepath)
            else:
                if self.best_metric > metric:
                    self.best_metric = metric
                    torch.save(save_model.module.state_dict() if hasattr(save_model, "module") \
                       else save_model.state_dict(), save_filepath)