import torch
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast as autocast
from tqdm import tqdm
import torch.distributed as dist
from lib.dataset import DATASET
from lib.utils.record import get_logger
from lib.utils.utils import get_loader
from torch import nn
import os
import torch.multiprocessing as mp
from lib.algorithms import ALGORITHMS
from lib.sampler import NormalEpisodeSampler
import sys
from lib.backbone.conv4 import Conv4
from torch.autograd import Variable


class Tester:
    def __init__(self, cfg):

        self.cfg = cfg

        self.algorithm = ALGORITHMS[cfg.ALGORITHM](cfg)

        self.model = self.algorithm['net']
        self.loss_fn = self.algorithm['loss']
        self.metric = self.algorithm['metric']
        self.test_sampler = self.algorithm['test_sampler']

        self.model_path = cfg.TEST.MODEL_PATH

        self.check_point = torch.load(self.model_path)
        state = self.check_point['parameter']
        for name, param in self.model.backbone.state_dict().items():
            param.data.copy_(state['backbone.' + name])
        self.model.load_state_dict(self.check_point['parameter'])
        print('the model has been loaded over !')

        self.test_dataset = DATASET[cfg.DATASET.NAME](cfg, 'TEST')

        self.logger = get_logger(cfg, 'test')
        self.dataset_size = len(self.test_dataset)
        self.test_batch_size = cfg.TEST.BATCH_SIZE

        self.amp = cfg.TEST.AMP
        if self.amp:
            self.scaler = GradScaler()

        self.device = cfg.TEST.DDP.DEVICES
        self.ddp_nr = cfg.TEST.DDP.NR
        self.ddp_gpus = len(self.device)
        self.ddp_backend = cfg.TEST.DDP.BACKEND
        self.ddp_nodes = cfg.TEST.DDP.NODES
        self.ddp_world_size = self.ddp_nodes * self.ddp_gpus
        self.find_unused_parameters = cfg.TEST.DDP.FIND_UNUSED_PARAMETERS

    def _ddp_test(self, gpu):
        if gpu == 0:
            logger = get_logger(self.cfg, 'test')

        rank = self.ddp_nr * self.ddp_gpus + gpu

        dist.init_process_group(
            backend=self.ddp_backend,
            init_method='env://',
            world_size=self.ddp_world_size,
            rank=rank
        )

        torch.manual_seed(0)
        torch.cuda.set_device(gpu)

        self.model.cuda(gpu)

        ddp_model = nn.parallel.DistributedDataParallel(self.model,
                                                        find_unused_parameters=self.find_unused_parameters,
                                                        device_ids=[gpu])

        test_sampler = self.test_sampler(
            dataset=self.test_dataset,
            cfg=self.cfg.TEST,
            num_replicas=self.ddp_world_size,
            rank=rank
        )

        test_loader = get_loader(self.cfg, self.test_dataset, 'TEST', test_sampler)

        aver_loss = 0
        aver_acc = 0

        ddp_model.eval()

        with tqdm(total=len(test_loader), disable=(gpu != 0)) as pbar:

            # with torch.no_grad():
            for batch, (data, labels) in enumerate(test_loader):
                data, labels = data.to(gpu), labels.to(gpu)

                if self.amp:
                    with autocast():
                        data_f = ddp_model(data, 'TEST')
                else:
                    data_f = ddp_model(data, 'TEST')

                loss = self.loss_fn(data_f, labels, 'TEST')

                pbar.set_postfix({'testing loss': f'{loss.item():0.6f}'})
                pbar.update(1)

                # 记录一个平均的就行了, 也就是记录一行数据就行了
                acc = self.metric(data_f, labels, 'TEST')

                t = torch.tensor([loss.item(), acc], dtype=torch.float64, device='cuda')
                dist.barrier()
                dist.all_reduce(t, op=torch.distributed.ReduceOp.SUM)

                aver_loss += t[0] / self.ddp_world_size
                aver_acc += t[1] / self.ddp_world_size

        aver_loss /= len(test_loader)
        aver_acc /= len(test_loader)

        if gpu == 0:
            logger.info(f"| TEST ] aver_loss: {aver_loss:0.6f}  aver_acc: {aver_acc:0.4f}")
            print(f"test aver_loss: {aver_loss:0.6f}  aver_acc: {aver_acc:0.4f}")

    def _normal_test(self):

        logger = get_logger(self.cfg, 'test')
        self.model.cuda()

        test_sampler = NormalEpisodeSampler(
            data_source=self.test_dataset,
            cfg=self.cfg.TEST,
        )

        test_loader = get_loader(self.cfg, self.test_dataset, 'TEST', test_sampler)

        aver_loss = 0
        aver_acc = 0

        self.model.eval()

        with tqdm(total=len(test_loader)) as pbar:

            # with torch.no_grad():
            for batch, (data, labels) in enumerate(test_loader):
                data, labels = data.to(0), labels.to(0)

                if self.amp:
                    with autocast():
                        data_f = self.model(data, 'TEST')
                else:
                    data_f = self.model(data, 'TEST')

                loss = self.loss_fn(data_f, labels, 'TEST')

                pbar.set_postfix({'testing loss': f'{loss.item():0.6f}'})
                pbar.update(1)

                # 记录一个平均的就行了, 也就是记录一行数据就行了
                acc = self.metric(data_f, labels, 'TEST')

                aver_loss += loss.item()
                aver_acc += acc

        aver_loss /= len(test_loader)
        aver_acc /= len(test_loader)

        logger.info(f"| TEST ] aver_loss: {aver_loss:0.6f}  aver_acc: {aver_acc:0.4f}")
        print(f"test aver_loss: {aver_loss:0.6f}  aver_acc: {aver_acc:0.4f}")

    def run(self):
        # self._normal_test()

        assert isinstance(self.device, tuple), 'invalid device'
        os.environ['MASTER_ADDR'] = self.cfg.TEST.DDP.MASTER_ADDR  # 设置master节点的地址
        os.environ['MASTER_PORT'] = str(self.cfg.TEST.DDP.MASTER_PORT)  # 设置端口号，从而让所有节点能够相互通信
        self.device = ','.join(tuple(map(str, self.device)))
        os.environ["CUDA_VISIBLE_DEVICES"] = self.device
        mp.spawn(self._ddp_test, nprocs=self.ddp_gpus, args=())
