import shutil
from collections import OrderedDict
from data_provider import build_data_provider
from models import build_model
from utils import build_record, move_to_device, log_ddp_batch, log_ddp_epoch
from utils import coloring
from .exp_basic import Exp_Basic
from solver import build_lr, build_optimizer, get_lr
from metrics import build_metric
import os.path as osp
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from torchmetrics import MeanMetric


class Processor_ddp(Exp_Basic):
    def __init__(self, cfg):
        self.rank = cfg.rank
        super(Processor_ddp, self).__init__(cfg)
        assert self.rank == dist.get_rank(), "The rank should be identical"
        self.train_records = self.init_records()

    def init_model(self):
        model = build_model(self.cfg.MODEL).to(self.device)
        # 引入SyncBN，这句代码，会将普通BN替换成SyncBN。
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        # 构造DDP模型
        self.model = DDP(model, device_ids=[self.rank], find_unused_parameters=True)

    def init_dataloader(self):
        self.batch_size = self.cfg.DATASET.get('batch_size', 8)
        train_dataset, train_loader = build_data_provider(self.cfg.DATASET,
                                                          self.cfg.PIPELINE,
                                                          flag='train')
        self.train_loader = train_loader
        self.validate = self.cfg.DATASET.get('valid', None)
        if self.validate:
            valid_dataset, valid_loader = build_data_provider(self.cfg.DATASET,
                                                              self.cfg.PIPELINE,
                                                              flag='valid')
            self.valid_loader = valid_loader

        self.inference = self.cfg.DATASET.get('test', None)

    def init_solver(self):
        #  Construct solver.
        optimizer = build_optimizer(self.cfg.SOLVER.Optimizer,
                                    parameter_list=self.model.parameters())
        self.optimizer = optimizer
        self.lr = build_lr(self.cfg.SOLVER.Scheduler, optimizer,
                           len(self.train_loader), self.cfg.epochs)

    def init_records(self):
        record_list = [("loss", MeanMetric().to(self.device))]
        if 'Recognizer1D' in self.cfg.framework:  # TODO: required specify str in framework
            record_list.append(("hit_at_one", MeanMetric().to(self.device)))
            record_list.append(("perr", MeanMetric().to(self.device)))
            record_list.append(("gap", MeanMetric().to(self.device)))
        elif 'Recognizer' in self.cfg.framework:
            record_list.append(("Acc@1", MeanMetric().to(self.device)))
            record_list.append(("Acc@5", MeanMetric().to(self.device)))

        record_list = OrderedDict(record_list)
        return record_list

    def train_epoch(self, epoch):
        self.model.train()
        self.train_loader.sampler.set_epoch(epoch)
        for i, data in enumerate(self.train_loader):
            data = move_to_device(data, self.device)
            ###AMP###
            if self.amp:
                with torch.cuda.amp.autocast():
                    outputs = self.model(data, mode='train')
                avg_loss = outputs['loss']  # 1、Scalesloss.先将梯度放大,防止梯度消失

                self.scaler.scale(avg_loss).backward()
                # if self.use_gradient_accumulation:
                # 2、scaler.step()   再把梯度的值unscale回来.
                # 如果梯度的值不是 infs 或者 NaNs, 那么调用optimizer.step()来更新权重,
                # 否则，忽略step调用，从而保证权重不更新（不被破坏）
                self.scaler.step(self.optimizer)
                self.scaler.update()  # 3、准备着，看是否要增大scaler
                self.optimizer.zero_grad()

            else:
                outputs = self.model(data, mode='train')
                # backward
                avg_loss = outputs['loss']
                avg_loss.backward()
                # minimize
                self.optimizer.step()
                self.optimizer.zero_grad()

            # log record
            for name, value in outputs.items():  # record loss and metric
                self.train_records[name].update(value)
                # self.train_records[name].update(value, self.batch_size)

            if self.rank == 0 and (i + 1) % self.cfg.get("log_interval", 10) == 0:
                cur_record = OrderedDict()
                cur_record.update({'lr': get_lr(self.optimizer)})
                for name, value in outputs.items():
                    cur_record.update({name: value.cpu().detach().numpy()})
                log_ddp_batch(cur_record, i + 1, epoch, self.cfg.epochs, "train")

            # learning rate iter step
            if self.cfg.SOLVER.Scheduler.get("iter_step"):
                self.lr.step()

            # learning rate epoch step
        if not self.cfg.SOLVER.Scheduler.get("iter_step"):
            self.lr.step()

        # statistical epoch information, and then reset
        epoch_record = OrderedDict()
        for name in self.train_records.keys():
            avg_val = self.train_records[name].compute()
            epoch_record.update({name: avg_val.cpu().detach().numpy()})
            self.train_records[name].reset()

        if self.rank == 0:
            log_ddp_epoch(epoch_record, epoch, "train")

    def vali_epoch(self, epoch, best):
        self.model.eval()
        record_list = build_record(self.cfg.MODEL, ddp_used=False)
        for i, data in enumerate(self.valid_loader):
            data = move_to_device(data, self.device)
            outputs = self.model(data, mode='valid')
            # log_record
            for name, value in outputs.items():  # record loss and metric
                record_list[name].update(value.cpu().detach())
                # record_list[name].update(value.cpu().detach(), self.batch_size)

        epoch_record = OrderedDict()
        for name in record_list.keys():
            avg_val = record_list[name].avg
            epoch_record.update({name: avg_val})
        log_ddp_epoch(record_list, epoch, "valid")

        best_flag = False
        # The following code needs to be adjusted for the specific model
        for top_flag in ['hit_at_one', 'top1']:
            if record_list.get(top_flag) and record_list[top_flag].avg > best:
                best = record_list[top_flag].avg
                best_flag = True

        return best, best_flag

    def test(self, weights_path):
        self.model.eval()
        assert osp.exists(weights_path), " The weights path to load can't be found"

        self._load_checkpoint(weights_path, resume=False)
        test_batch_size = self.cfg.DATASET.get('test_batch_size', self.batch_size)
        test_dataset, test_loader = build_data_provider(self.cfg.DATASET,
                                                        self.cfg.PIPELINE,
                                                        flag='test')
        # add params to metrics
        self.cfg.METRIC.data_size = len(test_dataset)
        self.cfg.METRIC.batch_size = test_batch_size
        Metric = build_metric(self.cfg.METRIC)

        for batch_id, data in enumerate(test_loader):
            data = move_to_device(data, self.device)
            outputs = self.model(data, mode='test')
            Metric.update(batch_id, data, outputs)
        Metric.accumulate()

    def train(self):

        resume_epoch = 0
        resume_path = self.cfg.get("resumes")
        if resume_path is not None:
            if not osp.isfile(resume_path):
                raise IOError(f'{resume_path} not exist')
            resume_epoch = self._load_checkpoint(resume_path, resume=True)
            dist.barrier()

        # Finetune:
        weights_path = self.cfg.get("weights")
        if weights_path is not None:
            assert resume_epoch == 0, f"Conflict occurs when finetuning, please switch resume function off by setting resume_epoch to 0 or not indicating it."
            if not osp.isfile(weights_path):
                raise IOError(f'{weights_path} not exist')
            checkpoint = torch.load(weights_path)
            model_dict = checkpoint['model'] if checkpoint['model'] is not None else checkpoint
            self.model.load_state_dict(model_dict)
            dist.barrier()

        # Train Model
        max_epoch = self.cfg.epochs
        best = 0.
        eval_interval = lambda epoch: 1 if epoch > max_epoch - 10 else 5
        for epoch in range(1, max_epoch + 1):
            if epoch < resume_epoch:
                self.logger.info(
                    f"| epoch: [{epoch}] <= resume_epoch: [{resume_epoch}], continue... "
                )
                continue
            # let all processes sync up before starting with a new epoch of training
            # dist.barrier()
            # Training
            self.train_epoch(epoch)

            # Validation only on GPU: rank 0
            if self.rank == 0:
                if self.validate and epoch % eval_interval(epoch) == 0:
                    with torch.no_grad():
                        best, save_best_flag = self.vali_epoch(epoch, best)
                    # save best and ddp model without early_stop
                    if save_best_flag:
                        self._save_checkpoint(osp.join(self.output_dir,
                                                       self.model_name + "_best.pth"),
                                              epoch, best_res=best, resume=False)

                        if self.model_name == "AttentionLstm":
                            self.logger.info(
                                f"Already save the best model (hit_at_one){best}")
                        else:
                            msg = "Already save the best model (top1 acc) {}".format(int(best * 10000) / 10000)
                            self.logger.info(coloring(msg, "RED"))

                # Save model and optimizer regularly for resume
                if epoch % 10 == 0:
                    self._save_checkpoint(osp.join(self.temp_dir,
                                                   self.model_name + "_resume.pth"),
                                          epoch, resume=True)
        # Training stop
        if self.rank == 0:
            self.logger.info(f'training {self.model_name} finished')
            self.logger.info('start prediction process')

            if self.inference:
                weights_file = osp.join(self.output_dir, self.model_name + "_best.pth")
                self.test(weights_file)

            self.logger.info(f'The valid dataset best top1 acc {int(best * 10000) / 10000}')

        # finally, may delete the resume dir
        # shutil.rmtree(self.temp_dir)

    def _save_checkpoint(self, path, epoch, best_res=None, resume=False):
        checkpoint = {
            'model': self.model.state_dict(),
        }
        if best_res is not None:
            checkpoint.update({'best': best_res})
        # if save best_params only need save model,or else save all state for resume
        if resume:
            checkpoint.update({
                'epoch': epoch,
                'optimizer': self.optimizer.state_dict(),
                'scheduler': self.lr.state_dict()
            })
            if self.amp:
                checkpoint.update({'ampscaler': self.scaler.state_dict()})
        torch.save(checkpoint, path)

    def _load_checkpoint(self, path, resume=False):
        # Map model to be loaded to specified single gpu
        # checkpoint = torch.load(path,map_location='cuda:{}'.format(self.rank))
        checkpoint = torch.load(path)
        self.model.load_state_dict(checkpoint['model'])
        save_epoch = 0
        if resume:
            save_epoch = checkpoint['epoch']
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.lr.load_state_dict(checkpoint['scheduler'])
            if self.amp:
                self.scaler.load_state_dict(checkpoint['ampscaler'])
        return save_epoch
