import torch.nn as nn
import torch.optim as optim
import torch.utils.hipify.constants
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm

from config import parser_config
from datalist import Dataset, data_transform
from model import UNet
from utils import *
from torch import distributed

'''
DDP模式，专门用于在单机多GPU的环境下显示，速度快

其加速原理是通过提高batch size 来增加并行度

官方也建议使用

使用方法：

    cmd: python -m torch.distributed.launch --nproc_per_node 4 trainDDP.py

    严格注意：使用这个命令的时候，会修改你代码中的args.local_rank,所以一定要确保local_rank写在args里面

    指定GPU的命令模式

    cmd:CUDA_VISIBLE_DEVICES="0,1" python -m torch.distributed.launch --nproc_per_node 2 trainDDP.py

'''

'''
这个项目有一个缺陷，目前始终找不到各进程之间的平衡，所以目前这个只能用于纯train
'''


best_loss = 100000000
img_size = 512


def get_image(images):
    '''
    从model inference过后的数据里面，获取一张图片
    :param images: tensor 类型的 images，可能包含数个图片
    :return: 可以直接读取的 Pillow 类型的 image，其中image为images中的一张图片，默认读取了第一张
    '''
    output = (denormalize(images.permute((0, 2, 3, 1)).
                          detach().
                          to('cpu').
                          numpy()) * 255).astype('uint8')
    output = output[0]

    output = Image.fromarray(output).convert('RGB')
    return output


'''
Noise2Noise DDP model 

local_rank:本机的一个进程
local_rank:所有机器中的一个进程
world_size:表示GPU设备数量

这里的loacl_rank 和 local_rank 是一样的
'''


class TrainDDP(object):
    def __init__(self):

        self.args = parser_config()

        self.args.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1

        init_seeds(2 + self.args.local_rank)

        # DDP mode
        self.args.total_batch_size = self.args.train_batch_size
        self.device = select_device(self.args.device, batch_size=self.args.train_batch_size)

        if self.args.local_rank != -1:
            assert torch.cuda.device_count() > self.args.local_rank
            torch.cuda.set_device(self.args.local_rank)
            self.device = torch.device('cuda', self.args.local_rank)

            dist.init_process_group(backend='nccl', init_method="env://")
            self.args.world_size = dist.get_world_size()
            assert self.args.train_batch_size % self.args.world_size == 0, \
                '--batch_size must be multiple of CUDA device count'
            # train_batch_size的重新分配，也就是说总的batch_size是world_size * config.batch_size，
            # 所以在一次的训练过程中batch_size就相当于扩充了world_size倍
            self.args.train_batch_size = self.args.total_batch_size * self.args.world_size // self.args.world_size

        self.train_dataset = Dataset(type='train')
        self.test_dataset = Dataset(type='test')

        with torch_distributed_zero_first(self.args.local_rank):
            self.train_dataloader, _ = create_dataloader(self.args.world_size, self.train_dataset,
                                                         self.args.train_batch_size, self.args.local_rank, workers=0,
                                                         train=True)
        '''
        主进程加载一次模型即可，然后通过ddp的模式，将权重分发到各个子进程
        '''
        if self.args.local_rank == 0:
            self.test_dataloader, _ = create_dataloader(self.args.world_size, self.test_dataset,
                                                        self.args.test_batch_size, -1, workers=0, train=False)

        self.model = UNet().to(self.device)
        self.criterion = nn.L1Loss().to(self.device)
        self.optimizer = optim.Adam(params=self.model.parameters(),
                                    lr=self.args.lr,
                                    betas=[0.9, 0.99],
                                    eps=1e-8)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, 1e-2, 1e-5)
        '''
        根据需要加载与训练的模型权重参数
        '''
        if self.args.resume and self.args.pretrained_model:
            model_dict = self.model.state_dict()
            checkpoint = torch.load(self.args.pretrained_model)
            pretrained_dict = checkpoint['model_state_dict']
            pretrained_dict = {k: v for k, v in pretrained_dict.items() if
                               np.shape(model_dict[k]) == np.shape(v)}
            model_dict.update(pretrained_dict)
            self.model.load_state_dict(model_dict, strict=True)
            print("Process:", self.args.local_rank,
                  " Restoring the weight from pretrained-weight file "
                  "\nFinished to load the weight")

        self.mode = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model).to(self.device)

        if self.args.local_rank == 0:
            self.mode = DDP(self.model, device_ids=[self.args.local_rank], output_device=self.args.local_rank,
                            find_unused_parameters=False)

        self.optimizer.zero_grad()

        print(f"{self.args.local_rank} processing start training")

        for epoch in range(0, self.args.epochs):
            self.train(epoch)
            # self.test(epoch)

        dist.destroy_process_group() if self.args.local_rank != 0 else None
        torch.cuda.empty_cache()
        Color_print("finish model training")

    # 这里只有主进程使用tqdm显示进度，其他进程使用enumerator
    def train(self, epoch):
        self.model.train()
        average_loss = []
        reduced_loss = []
       
        self.train_dataloader.sampler.set_epoch(epoch)
        pbar = self.train_dataloader
        # if self.args.local_rank == 0:
        #     pbar = tqdm(pbar, desc=f'Train epoch {epoch}/{self.args.epochs} Process:{self.args.local_rank}')

        for inputs, targets in pbar:
            self.optimizer.zero_grad()
            inputs, targets = inputs.to(self.device, non_blocking=True), targets.to(self.device, non_blocking=True)
            outputs = self.model(inputs)
            outputs = nn.Tanh()(outputs)
            loss = self.criterion(outputs, targets)
            average_loss.append(loss.item())
            loss.backward()
            self.optimizer.step()

            distributed.barrier()
            reduced_loss.append(reduce_mean(loss, self.args.world_size))

            if self.args.local_rank == 0:
                # pbar.set_description(f'Train epoch:{epoch} '
                #                      f'Process:{self.args.local_rank} '
                #                      f'loss: {np.mean(average_loss)} ')
                # pbar.update(1)
                print("all loss:", np.mean(reduced_loss))
        if self.args.local_rank == 0:
            print(f'{self.args.local_rank} is finished in the {epoch} epoch, loss {np.mean(reduced_loss)}')

        self.scheduler.step()

    def test(self, epoch):
        self.model.eval()
        average_loss = []
        if self.args.local_rank == 0:
            pbar = tqdm(self.test_dataloader)
            with torch.no_grad():
                for inputs, targets in pbar:
                    inputs, targets = inputs.to(self.device, non_blocking=True), targets.to(self.device,
                                                                                            non_blocking=True)
                    outputs = self.model(inputs)
                    outputs = nn.Tanh()(outputs)
                    loss = self.criterion(outputs, targets)
                    average_loss.append(loss.item())
                    pbar.set_description(
                        f'Test epoch:{epoch} Process:{self.args.local_rank} loss: {np.mean(average_loss)}')
                    pbar.update(1)

        global best_loss
        '''
        if loss is smaller the last epoch, than save the model
        在主进程上保存一次即可,同时避免多次重复保存
        '''
        if self.args.save and best_loss > np.mean(average_loss) and self.args.local_rank in [-1, 0]:
            best_loss = np.mean(average_loss)
        torch.save({
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            # 'loss': round(np.mean(average_loss), 2)
        }, 'weights/temp/best.pth')
        Color_print("model saved")
        '''
        save the test result for each epoch
        '''
        if self.args.local_rank == 0:
            with torch.no_grad():
                data = Image.open("./000005.jpg")
                data = data.resize((img_size, img_size))
                temp1 = data
                data = WaterMark()(data)

                temp2 = data
                data = data_transform(data)

                data = data.unsqueeze(0)
                data = data.to(self.device)
                output = self.model(data)
                output = nn.Tanh()(output)
                result = get_image(output)

                blank = Image.new(mode='RGB', size=(3 * img_size, img_size), color=(255, 255, 255))

                blank.paste(temp1, (0, 0))
                blank.paste(temp2, (img_size, 0))
                blank.paste(result, (img_size * 2, 0))
                blank.save(f'result/{epoch}.jpg')


train = TrainDDP()
