import os
from datetime import datetime
# from lib.sampler import SAMPLER
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast as autocast
from tqdm import tqdm
from lib.dataset import DATASET
from lib.optimizer import OPTIMIZER
from lib.utils.record import get_logger
from lib.utils.utils import get_loader
from lib.algorithms import ALGORITHMS
from lib.sampler import EpisodeSampler
from torch.utils.data.distributed import DistributedSampler
import sys
from torch.utils.data import DataLoader
import random


class Trainer:
    def __init__(self, cfg):

        self.cfg = cfg

        self.algorithm = ALGORITHMS[cfg.ALGORITHM](cfg)

        self.model = self.algorithm['net']
        self.loss_fn = self.algorithm['loss']
        self.metric = self.algorithm['metric']
        self.train_sampler = self.algorithm['train_sampler']
        self.validate_sampler = self.algorithm['validate_sampler']

        self.train_dataset = DATASET[cfg.DATASET.NAME](cfg, 'TRAIN')
        # validate dataset for tuning parameters and early stopping
        self.val_dataset = DATASET[cfg.DATASET.NAME](cfg, 'VALIDATE')
        self.optimizer = cfg.TRAIN.OPTIMIZER
        self.epochs = cfg.TRAIN.EPOCH
        self.save_path = os.path.join(cfg.ROOT, cfg.NAME, 'model')
        self.dataset_size = len(self.train_dataset)
        self.train_batch_size = self.cfg.TRAIN.BATCH_SIZE

        self.amp = cfg.TRAIN.AMP
        if self.amp:
            self.scaler = GradScaler()

        self.device = cfg.TRAIN.DDP.DEVICES
        self.ddp_nr = cfg.TRAIN.DDP.NR
        self.ddp_gpus = len(self.device)
        self.ddp_backend = cfg.TRAIN.DDP.BACKEND
        self.ddp_nodes = cfg.TRAIN.DDP.NODES
        self.ddp_world_size = self.ddp_nodes * self.ddp_gpus
        self.find_unused_parameters = cfg.TRAIN.DDP.FIND_UNUSED_PARAMETERS

        self.save_batch = cfg.TRAIN.SAVE_BATCH


    def save_model(self, model, epoch, save_path, loss=None, acc=None):

        data_time = datetime.now().strftime("%m-%d-%H-%M")
        model_name = self.cfg.ALGORITHM
        dataset = self.cfg.DATASET.NAME

        check_point = {
            'model_name': model_name,
            'dataset': dataset,
            'parameter': model.state_dict(),
            'epoch': epoch,
            'loss': loss,
            'acc': acc,
            'datetime': data_time,
        }

        if not os.path.exists(save_path):
            os.makedirs(save_path)

        if loss and acc:
            file_name = f"{model_name}-{data_time}-loss {loss:0.6f}-acc {acc:0.4f}.pth"
        else:
            file_name = f'{epoch}-{model_name}-{data_time}.pth'

        torch.save(check_point, os.path.join(save_path, file_name))

    # multi-gpu training, also can be used as single gpu training
    def _ddp_train(self, gpu):

        if gpu == 0:
            logger = get_logger(self.cfg, 'train')

        rank = self.ddp_nr * self.ddp_gpus + gpu

        dist.init_process_group(
            backend=self.ddp_backend,
            init_method='env://',
            world_size=self.ddp_world_size,
            rank=rank
        )

        torch.manual_seed(0)
        torch.cuda.set_device(gpu)

        self.model.cuda(gpu)

        ddp_model = nn.parallel.DistributedDataParallel(self.model,
                                                        find_unused_parameters=self.find_unused_parameters,
                                                        device_ids=[gpu])

        optimizer = OPTIMIZER[self.optimizer](self.cfg.TRAIN, ddp_model)

        train_sampler = self.train_sampler(
            dataset=self.train_dataset,
            cfg=self.cfg.TRAIN,
            num_replicas=self.ddp_world_size,
            rank=rank
        )

        val_sampler = self.validate_sampler(
            dataset=self.val_dataset,
            cfg=self.cfg.VALIDATE,
            num_replicas=self.ddp_world_size,
            rank=rank
        )

        train_loader = get_loader(self.cfg, self.train_dataset, 'TRAIN', train_sampler)
        val_loader = get_loader(self.cfg, self.val_dataset, 'VALIDATE', val_sampler)

        early_stop = self.cfg.TRAIN.EARLY_STOP
        best_loss = float('inf')
        best_acc = 0

        for epoch in range(self.epochs):

            #################################################################
            ########### train the model on the training dataset #############
            #################################################################

            ddp_model.train()

            train_loader.sampler.set_epoch(epoch)

            with tqdm(total=len(train_loader), disable=(gpu != 0)) as pbar:

                for batch, (data, labels) in enumerate(train_loader):
                    data, labels = data.cuda(non_blocking=True), labels.cuda(non_blocking=True)

                    if self.amp:
                        with autocast():
                            data_f = ddp_model(data, 'TRAIN')
                            loss = self.loss_fn(data_f, labels, 'TRAIN')
                            optimizer.zero_grad()
                            self.scaler.scale(loss).backward()
                            self.scaler.step(optimizer)
                            self.scaler.update()
                    else:
                        data_f = ddp_model(data, 'TRAIN')
                        loss = self.loss_fn(data_f, labels, 'TRAIN')
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()

                    pbar.set_postfix({'training loss': f'{loss.item():0.6f}'})
                    pbar.update(1)

                    if (batch + 1) % self.save_batch == 0:

                        acc = self.metric(data_f, labels, 'TRAIN')

                        t = torch.tensor([loss.item(), acc], dtype=torch.float64, device='cuda')
                        dist.barrier()
                        dist.all_reduce(t, op=torch.distributed.ReduceOp.SUM)

                        if gpu == 0:
                            t = (t / self.ddp_world_size).tolist()
                            logger.info(f"| {epoch + 1}/{self.epochs} | "
                                        f"{(batch + 1) * self.train_batch_size * self.ddp_world_size}/{self.dataset_size}]"
                                        f"loss: {t[0]:0.6f}  acc: {t[1]:0.4f}")

                            save_path = os.path.join(self.save_path, 'epoch_model')
                            self.save_model(model=ddp_model.module,
                                            epoch=epoch,
                                            save_path=save_path)

            ######################################################################
            ########### evaluate the model on the validation dataset #############
            ######################################################################

            val_loss = 0
            val_acc = 0

            ddp_model.eval()

            with tqdm(total=len(val_loader), disable=(gpu != 0)) as pbar:

                for batch, (data, labels) in enumerate(val_loader):
                    data, labels = data.cuda(non_blocking=True), labels.cuda(non_blocking=True)

                    if self.amp:
                        with autocast():
                            data_f = ddp_model(data, 'VALIDATE')
                    else:
                        data_f = ddp_model(data, 'VALIDATE')

                    loss = self.loss_fn(data_f, labels, 'VALIDATE')

                    pbar.set_postfix({'validating loss': f'{loss.item():0.6f}'})
                    pbar.update(1)

                    # 记录一个平均的就行了, 也就是记录一行数据就行了
                    acc = self.metric(data_f, labels, 'VALIDATE')

                    t = torch.tensor([loss.item(), acc], dtype=torch.float64, device='cuda')
                    dist.barrier()
                    dist.all_reduce(t, op=torch.distributed.ReduceOp.SUM)

                    val_loss += t[0] / self.ddp_world_size
                    val_acc += t[1] / self.ddp_world_size

            val_loss /= len(val_loader)
            val_acc /= len(val_loader)

            if gpu == 0:
                logger.info(f"| VALIDATE ] aver_loss: {val_loss:0.6f}  aver_acc: {val_acc:0.4f}")
                print(f"epoch: {epoch}/{self.epochs} validate aver_loss: {val_loss:0.6f}  aver_acc: {val_acc:0.4f}")

            ############################################################
            ############# save the best model and early stop ###########
            ############################################################

            if val_acc > best_acc:
                early_stop = self.cfg.TRAIN.EARLY_STOP
                best_acc = val_acc
                if gpu == 0:
                    save_path = os.path.join(self.save_path, 'best_model')
                    self.save_model(model=ddp_model.module,
                                    epoch=epoch,
                                    loss=val_loss,
                                    acc=val_acc,
                                    save_path=save_path)

            # if val_loss < best_loss:
            #     early_stop = self.cfg.TRAIN.EARLY_STOP
            #     best_loss = val_loss
            #     if gpu == 0:
            #         self.save_model(model=ddp_model.module,
            #                         epoch=epoch,
            #                         loss=val_loss,
            #                         acc=val_acc,
            #                         loss_or_acc=True)
            else:
                early_stop -= 1
                if early_stop < 0:
                    if gpu == 0:
                        logger.info('| EARLY STOPPING!!!!!]')
                    break

            # 每次只更新train_sampler的indices, 不更新validate的
            # 这样每一轮只有train的episodes不同, validate的是相同的
            train_sampler.indices = train_sampler.update_indices()

    def normal_train(self):

        logger = get_logger(self.cfg, 'train')

        self.model.cuda()

        optimizer = OPTIMIZER[self.optimizer](self.cfg.TRAIN, self.model)

        train_loader = DataLoader(
            dataset=self.train_dataset,
            batch_size=self.cfg['TRAIN'].BATCH_SIZE,
            shuffle=True,
            drop_last=False,
            pin_memory=self.cfg.DATASET.PIN_MEMORY,
        )

        val_loader = DataLoader(
            dataset=self.val_dataset,
            batch_size=self.cfg['VALIDATE'].BATCH_SIZE,
            shuffle=True,
            drop_last=True,
            pin_memory=self.cfg.DATASET.PIN_MEMORY,
        )

        early_stop = self.cfg.TRAIN.EARLY_STOP
        best_acc = 0

        for epoch in range(self.epochs):

            #################################################################
            ########### train the model on the training dataset #############
            #################################################################

            self.model.train()

            with tqdm(total=len(train_loader)) as pbar:
                for batch, (data, labels) in enumerate(train_loader):

                    data, labels = data.cuda(), labels.cuda()

                    if self.amp:
                        with autocast():
                            data_f = self.model(data, 'TRAIN')
                            loss = self.loss_fn(data_f, labels, 'TRAIN')
                            optimizer.zero_grad()
                            self.scaler.scale(loss).backward()
                            self.scaler.step(optimizer)
                            self.scaler.update()
                    else:
                        data_f = self.model(data, 'TRAIN')
                        loss = self.loss_fn(data_f, labels, 'TRAIN')
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()

                    pbar.set_postfix({'training loss': f'{loss.item():0.6f}'})
                    pbar.update(1)

                    if (batch + 1) % self.save_batch == 0:
                        acc = self.metric(data_f, labels, 'TRAIN')
                        logger.info(f"| {epoch + 1}/{self.epochs} | " 
                                    f"{(batch + 1) * self.train_batch_size * self.ddp_world_size}/{self.dataset_size}]"
                                    f"loss: {loss.item():0.6f}  acc: {acc:0.4f}")

                save_path = os.path.join(self.save_path, 'epoch_model')
                self.save_model(model=self.model,
                                epoch=epoch,
                                save_path=save_path)

            ######################################################################
            ########### evaluate the model on the validation dataset #############
            ######################################################################

            val_loss = 0
            val_acc = 0

            self.model.eval()

            with tqdm(total=len(val_loader)) as pbar:

                for batch, (data, labels) in enumerate(val_loader):
                    data, labels = data.cuda(), labels.cuda()

                    if self.amp:
                        with autocast():
                            data_f = self.model(data, 'VALIDATE')
                    else:
                        data_f = self.model(data, 'VALIDATE')

                    loss = self.loss_fn(data_f, labels, 'VALIDATE')

                    pbar.set_postfix({'validating loss': f'{loss.item():0.6f}'})
                    pbar.update(1)

                    # 记录一个平均的就行了, 也就是记录一行数据就行了
                    acc = self.metric(data_f, labels, 'VALIDATE')

                    val_loss += loss.item()
                    val_acc += acc

            val_loss /= len(val_loader)
            val_acc /= len(val_loader)

            logger.info(f"| VALIDATE ] aver_loss: {val_loss:0.6f}  aver_acc: {val_acc:0.4f}")
            print(f"epoch: {epoch+1}/{self.epochs} validate aver_loss: {val_loss:0.6f}  aver_acc: {val_acc:0.4f}")

            ############################################################
            ############# save the best model and early stop ###########
            ############################################################

            if val_acc > best_acc:
                early_stop = self.cfg.TRAIN.EARLY_STOP
                best_acc = val_acc
                save_path = os.path.join(self.save_path, 'best_model')
                self.save_model(model=self.model,
                                epoch=epoch,
                                loss=val_loss,
                                acc=val_acc,
                                save_path=save_path)
            else:
                early_stop -= 1
                if early_stop < 0:
                    logger.info('| EARLY STOPPING!!!!!]')
                    break

    def run(self):
        # self.normal_train()
        assert isinstance(self.device, tuple), 'invalid device'
        os.environ['MASTER_ADDR'] = self.cfg.TRAIN.DDP.MASTER_ADDR  # 设置master节点的地址
        os.environ['MASTER_PORT'] = str(self.cfg.TRAIN.DDP.MASTER_PORT)  # 设置端口号，从而让所有节点能够相互通信
        self.device = ','.join(tuple(map(str, self.device)))
        os.environ["CUDA_VISIBLE_DEVICES"] = self.device
        mp.spawn(self._ddp_train, nprocs=self.ddp_gpus, args=())
