import shutil
from data_provider import build_data_provider
from models import build_model
from utils import build_record, log_batch, log_epoch, move_to_device, do_preciseBN
from utils import coloring, EarlyStopping
from .exp_basic import Exp_Basic
from solver import build_lr, build_optimizer, get_lr
from metrics import build_metric
import time
import os.path as osp
import torch


class Processor(Exp_Basic):
    def __init__(self, cfg):
        super(Processor, self).__init__(cfg)
    def init_model(self):
        self.model = build_model(self.cfg.MODEL).to(self.device)

    def init_dataloader(self):
        self.batch_size = self.cfg.DATASET.get('batch_size', 8)
        train_dataset, train_loader = build_data_provider(self.cfg.DATASET,
                                                          self.cfg.PIPELINE,
                                                          flag='train')
        self.train_loader = train_loader
        self.validate = self.cfg.DATASET.get('valid', None)
        if self.validate:
            valid_dataset, valid_loader = build_data_provider(self.cfg.DATASET,
                                                              self.cfg.PIPELINE,
                                                              flag='valid')
            self.valid_loader = valid_loader

        self.inference = self.cfg.DATASET.get('test', None)

    def init_solver(self):
        #  Construct solver.
        optimizer = build_optimizer(self.cfg.SOLVER.Optimizer,
                                    parameter_list=self.model.parameters())
        self.optimizer = optimizer
        self.lr = build_lr(self.cfg.SOLVER.Scheduler, optimizer,
                           len(self.train_loader), self.cfg.epochs)

        if self.cfg.SOLVER.get("Earlystop"):
            earlystop_cfg = self.cfg.SOLVER.Earlystop.copy()
            self.monitor = EarlyStopping(**earlystop_cfg)
        else:
            self.monitor = EarlyStopping(patience=self.cfg.epochs, max_better=True)

    def train_epoch(self, epoch):
        self.model.train()
        tic = time.time()
        record_list = build_record(self.cfg.MODEL)
        for i, data in enumerate(self.train_loader):
            data = move_to_device(data, self.device)
            ###AMP###
            if self.amp:
                with torch.cuda.amp.autocast():
                    outputs = self.model(data, mode='train')
                avg_loss = outputs['loss']  # 1、Scalesloss.先将梯度放大,防止梯度消失

                self.scaler.scale(avg_loss).backward()
                # if self.use_gradient_accumulation:
                # 2、scaler.step()   再把梯度的值unscale回来.
                # 如果梯度的值不是 infs 或者 NaNs, 那么调用optimizer.step()来更新权重,
                # 否则，忽略step调用，从而保证权重不更新（不被破坏）
                self.scaler.step(self.optimizer)
                self.scaler.update()  # 3、准备着，看是否要增大scaler
                self.optimizer.zero_grad()

            else:
                outputs = self.model(data, mode='train')
                # backward
                avg_loss = outputs['loss']
                avg_loss.backward()
                # minimize
                if self.use_gradient_accumulation:  # Use gradient accumulation strategy
                    if i == 0:
                        self.optimizer.zero_grad()
                    elif (i + 1) % self.cfg.GRADIENT_ACCUMULATION.num_iters == 0:
                        self.optimizer.step()
                        self.optimizer.zero_grad()
                else:  # Common case
                    self.optimizer.step()
                    self.optimizer.zero_grad()

            # log record
            record_list['lr'].update(get_lr(self.optimizer))
            for name, value in outputs.items():  # record loss and metric
                record_list[name].update(value.cpu().detach())

            record_list['batch_time'].update(time.time() - tic)
            tic = time.time()

            if (i+1) % self.cfg.get("log_interval", 10) == 0:
                ips = "ips: {:.4f} instance/sec.".format(
                    self.batch_size / record_list["batch_time"].val)
                log_batch(record_list, i+1, epoch, self.cfg.epochs, "train", ips)

            # learning rate iter step
            if self.cfg.SOLVER.Scheduler.get("iter_step"):
                self.lr.step()

            # learning rate epoch step
        if not self.cfg.SOLVER.Scheduler.get("iter_step"):
            self.lr.step()

        ips = "avg_ips: {:.4f} instance/sec.".format(
            self.batch_size * record_list["batch_time"].count /
            record_list["batch_time"].sum)
        log_epoch(record_list, epoch, "train", ips)

        if self.cfg.get("PRECISEBN") and (epoch % self.cfg.PRECISEBN.preciseBN_interval
                                          == 0 or epoch == self.cfg.epochs):
            do_preciseBN(self.model, self.train_loader,
                         min(self.cfg.PRECISEBN.num_iters_preciseBN, len(self.train_loader)))

    def vali_epoch(self, epoch):
        self.model.eval()
        record_list = build_record(self.cfg.MODEL)
        record_list.pop('lr')  # delete lr ,it's not necessary
        tic = time.time()
        for i, data in enumerate(self.valid_loader):
            data = move_to_device(data, self.device)
            outputs = self.model(data, mode='valid')
            # log_record
            for name, value in outputs.items():  # record loss and metric
                record_list[name].update(value.cpu().detach(), self.batch_size)

            record_list['batch_time'].update(time.time() - tic)
            tic = time.time()

        ips = "avg_ips: {:.4f} instance/sec.".format(
            self.batch_size * record_list["batch_time"].count /
            record_list["batch_time"].sum)
        log_epoch(record_list, epoch, "valid", ips)

        for top_flag in ['hit_at_one', 'top1']:
            if record_list.get(top_flag):
                val_metric = record_list[top_flag].avg

        return val_metric

    def test(self, weights_path):
        self.model.eval()
        assert osp.exists(weights_path), " The weights path to load can't be found"

        self._load_checkpoint(weights_path,resume=False)
        test_batch_size = self.cfg.DATASET.get('test_batch_size', self.batch_size)
        test_dataset, test_loader = build_data_provider(self.cfg.DATASET,
                                                        self.cfg.PIPELINE,
                                                        flag='test')
        # add params to metrics
        self.cfg.METRIC.data_size = len(test_dataset)
        self.cfg.METRIC.batch_size = test_batch_size
        Metric = build_metric(self.cfg.METRIC)

        for batch_id, data in enumerate(test_loader):
            data = move_to_device(data, self.device)
            outputs = self.model(data, mode='test')
            Metric.update(batch_id, data, outputs)
        Metric.accumulate()

    def train(self):

        resume_epoch = 0
        resume_path = self.cfg.get("resumes")
        if resume_path is not None:
            if not osp.isfile(resume_path):
                raise IOError(f'{resume_path} not exist')
            resume_epoch = self._load_checkpoint(resume_path, resume=True)
        # Finetune:
        weights_path = self.cfg.get("weights")
        if weights_path is not None:
            assert resume_epoch == 0, f"Conflict occurs when finetuning, please switch resume function off by setting resume_epoch to 0 or not indicating it."
            if not osp.isfile(weights_path):
                raise IOError(f'{weights_path} not exist')
            checkpoint = torch.load(weights_path)
            model_dict = checkpoint['model'] if checkpoint['model'] is not None else checkpoint
            self.model.load_state_dict(model_dict)

        # Train Model
        best = 0.
        max_epoch = self.cfg.epochs
        eval_interval = lambda epoch: 1 if epoch > max_epoch - 10 else 5
        for epoch in range(1, max_epoch + 1):
            if epoch < resume_epoch:
                self.logger.info(
                    f"| epoch: [{epoch}] <= resume_epoch: [{resume_epoch}], continue... "
                )
                continue
            # Training
            self.train_epoch(epoch)

            # Validation
            if self.validate and epoch % eval_interval(epoch) == 0:
                with torch.no_grad():
                    val_metric = self.vali_epoch(epoch)
                    save_best_flag = self.monitor(val_metric)
                    # save best
                if save_best_flag:
                    best = val_metric
                    self._save_checkpoint(osp.join(self.output_dir,
                                                   self.model_name + "_best.pth"),
                                          epoch,best_res= best,resume=False)

                    if self.model_name == "AttentionLstm":
                        self.logger.info(
                            f"Already save the best model (hit_at_one){best}")
                    else:
                        msg = "Already save the best model (top1 acc) {}".format(int(best * 10000) / 10000)
                        self.logger.info(coloring(msg, "RED"))
                else:
                    if self.monitor.early_stop:
                        self.logger.info("Early stopping")
                        break

            # Save model and optimizer regularly for resume
            if epoch % 10 == 0:
                self._save_checkpoint(osp.join(self.temp_dir, self.model_name + "_resume.pth"),
                                      epoch, resume=True)

        self.logger.info(f'training {self.model_name} finished')
        self.logger.info('start prediction process')

        if self.inference:
            weights_file = osp.join(self.output_dir, self.model_name + "_best.pth")
            self.test(weights_file)

        self.logger.info(f'The valid dataset best top1 acc {int(best * 10000) / 10000}')
        # finally, may delete the resume dir
        shutil.rmtree(self.temp_dir)

    def _save_checkpoint(self, path, epoch, best_res= None,resume=False):
        checkpoint = {
            'model': self.model.state_dict(),
        }
        if best_res is not None:
            checkpoint.update({'best':best_res})
        # if save best_params only need save model,or else save all state for resume
        if resume:
            checkpoint.update({
                'epoch': epoch,
                'optimizer': self.optimizer.state_dict(),
                'scheduler': self.lr.state_dict()
            })
            if self.amp:
                checkpoint.update({'ampscaler': self.scaler.state_dict()})
        torch.save(checkpoint, path)

    def _load_checkpoint(self, path, resume=False):
        checkpoint = torch.load(path)
        self.model.load_state_dict(checkpoint['model'])
        if resume:
            save_epoch = checkpoint['epoch']
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.lr.load_state_dict(checkpoint['scheduler'])
            if self.amp:
                self.scaler.load_state_dict(checkpoint['ampscaler'])
            return save_epoch
        else:
            return 0
