import os
from functools import partial

import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import models
from tqdm import tqdm

from common import select_device, torch_distributed_zero_first, init_seeds
from config import parser
from datalist import CartoonDataset
from model import UnetGenerator, SpectNormDiscriminator
from utils import VariationLoss, LSGanLoss, GuidedFilter, ColorShift, simple_superpixel, slic, adaptive_slic, sscolor, \
    denormalize

SuperPixelDict = {
    'slic': slic,
    'adaptive_slic': adaptive_slic,
    'sscolor': sscolor}
from torchvision import transforms

data_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# resize2scale = transforms.Resize
infer_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])

'''
DDP模式，专门用于在单机多GPU的环境下显示，速度快

官方也建议使用

使用方法：

    cmd: python -m torch.distributed.launch --nproc_per_node 4 trainddp.py

    严格注意：使用这个命令的时候，会修改你代码中的args.local_rank,所以一定要确保local_rank写在args里面

    指定GPU的命令模式

    cmd:CUDA_VISIBLE_DEVICES="1,2" python -m torch.distributed.launch --nproc_per_node 3 trainddp.py

'''


class trainDDP(object):
    def __init__(self):
        self.args = parser.parse_args()
        self.args.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
        self.args.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1

        print(f"-----------{self.args.project_name}-------------")
        print("using DDP model to accelerate")

        # 配置随机数种子，使得结果可重复实现
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        init_seeds(self.args.global_rank + 2)
        # self.device = torch.device('cuda' if use_cuda else 'cpu')
        # TODO
        # 转到UBUNTU后，这里的num_workers可以更改，扩大数据dataloader的线程数，使得gpu保持一个100%的工作状态
        # 加快模型的训练速度

        self.args.total_batch_size = self.args.train_batch_size
        self.device = select_device(self.args.device, batch_size=self.args.train_batch_size)
        if self.args.local_rank != -1:
            assert torch.cuda.device_count() > self.args.local_rank
            torch.cuda.set_device(self.args.local_rank)
            self.device = torch.device('cuda', self.args.local_rank)
            dist.init_process_group(backend='nccl', init_method='env://')
            self.args.world_size = dist.get_world_size()
            assert self.args.train_batch_size % self.args.world_size == 0, '--batch-size must be multiple of CUDA device count'

            self.args.train_batch_size = self.args.total_batch_size // self.args.world_size
        '''
        构造DataLoader
        '''
        # trainloader
        with torch_distributed_zero_first(self.args.global_rank):
            self.train_dataloader, _ = self.create_dataloader(self.args.train_batch_size, self.args.global_rank,
                                                              workers=8, train=True)

        '''
        white_box gan的数据处理类 
        '''
        self.guided_filter = GuidedFilter()
        self.colorshift = ColorShift(device=self.device)
        self.superpixel_fn = partial(SuperPixelDict['sscolor'],
                                     seg_num=200)

        '''
        构造Model
        '''
        self.generator = UnetGenerator()
        self.disc_gray = SpectNormDiscriminator()
        self.disc_blur = SpectNormDiscriminator()
        # 这里使用了VGG的特征提取部分用来做loss的数据提取
        self.vgg = models.vgg16(pretrained=True).features
        for i in self.vgg.parameters():
            i.requires_grad = False

        '''
        加载模型
        '''
        if self.args.resume:
            try:
                print("load the weight from pretrained-weight file")
                # generator
                pretrained_dict = torch.load("weights/generator/pretrain/best.pth", map_location=self.device)[
                    'model_state_dict']
                new_state_dict = {}
                for k, v in pretrained_dict.items():
                    new_state_dict[k[7:]] = v
                self.generator.load_state_dict(new_state_dict, True)
                print("finished loading the generate model")
                # discriminator_blur
                pretrained_dict = torch.load("weights/discriminator_blur/best.pth", map_location=self.device)[
                    'model_state_dict']
                new_state_dict = {}
                for k, v in pretrained_dict.items():
                    new_state_dict[k[7:]] = v
                self.disc_blur.load_state_dict(new_state_dict, True)
                print("finished loading the discriminator_gray model")
                # discriminator_gray
                pretrained_dict = torch.load("weights/discriminator_gray/best.pth", map_location=self.device)[
                    'model_state_dict']
                new_state_dict = {}
                for k, v in pretrained_dict.items():
                    new_state_dict[k[7:]] = v
                self.disc_gray.load_state_dict(new_state_dict, True)
                print("finished loading the discriminator_blur model")
                del new_state_dict

                print("Finished to load the weight")
            except:
                print("can not load weight \n train the model from stratch")
        # SyncBatchNorm
        self.generator = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.generator).to(self.device)
        self.disc_gray = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.disc_gray).to(self.device)
        self.disc_blur = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.disc_blur).to(self.device)
        self.vgg = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.vgg).to(self.device)

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        self.tv_loss = VariationLoss(1).to(self.device)  # 这里的1代表像素点的起始位置
        self.texture_loss = LSGanLoss().to(self.device)
        self.surface_loss = LSGanLoss().to(self.device)
        self.content_loss = nn.L1Loss().to(self.device)
        self.structure_loss = nn.L1Loss().to(self.device)

        self.optimizer_g = optim.Adam(self.generator.parameters(), lr=self.args.lr)
        self.optimizer_d = optim.Adam(
            [{'params': self.disc_gray.parameters()}, {'params': self.disc_blur.parameters()}], lr=self.args.lr)
        self.scheduler1 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer_d, T_max=5, eta_min=1e-5)
        self.scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer_g, T_max=5, eta_min=1e-5)

        self.optimizer_g.zero_grad()
        self.optimizer_d.zero_grad()

        for epoch in range(1, self.args.epochs + 1):
            self.train(epoch)
            if epoch % 1 == 0:
                self.test(epoch)
        dist.destroy_process_group() if self.args.global_rank not in [-1, 0] else None
        torch.cuda.empty_cache()
        print("finish model training")

    def train(self, epoch):
        self.generator.Train()
        self.disc_blur.Train()
        self.disc_gray.Train()
        self.vgg.eval()

        mloss = torch.zeros(4, device=self.device)
        if self.args.global_rank != -1:
            self.train_dataloader.sampler.set_epoch(epoch)

        pbar = enumerate(self.train_dataloader)
        if self.args.global_rank in [-1, 0]:
            pbar = tqdm(pbar)

        for i, (cartoon, photo) in pbar:
            cartoon = cartoon.to(self.device, non_blocking=True)
            img = photo.to(self.device, non_blocking=True)
            #################################################################################
            '''
            生成网络训练
            '''
            self.optimizer_g.zero_grad()

            generator_img = self.generator(img)  # generator

            output_for_generator = self.guided_filter(img, generator_img, r=1)

            # 1.blur for surface representation
            blur_fake = self.guided_filter(output_for_generator, output_for_generator, r=5, eps=2e-1)
            blur_fake_logit = self.disc_blur(blur_fake)  # discrimnator
            g_loss_blur = self.surface_loss._g_loss(blur_fake_logit)
            # 2. gray for texture respresentation
            gray_fake, = self.colorshift(output_for_generator)

            gray_fake_logit = self.disc_gray(gray_fake)
            g_loss_gray = self.texture_loss._g_loss(gray_fake_logit)

            # 3.superpixel for structure representation
            input_spuerpixel = torch.from_numpy(
                simple_superpixel(output_for_generator.detach().permute((0, 2, 3, 1)).cpu().numpy(),
                                  self.superpixel_fn)
            ).to(self.device).permute((0, 3, 1, 2))

            vgg_output = self.vgg(output_for_generator)
            _, c, h, w = vgg_output.shape
            vgg_superpixel = self.vgg(input_spuerpixel)
            superpixel_loss = self.structure_loss(vgg_superpixel, vgg_output) / (c * h * w)
            # 4.content loss
            vgg_photo = self.vgg(img)
            photo_loss = self.content_loss(vgg_photo, vgg_output) / (c * h * w)
            # 5.total variation loss
            tv_loss = self.tv_loss(output_for_generator)
            #                      tv             content               structure             surface           texture
            total_loss = 1e4 * tv_loss + 2e2 * photo_loss + 2e2 * superpixel_loss + 1e-1 * g_loss_blur + 1e-1 * g_loss_gray
            total_loss.backward()
            the_g_loss = total_loss.item()

            self.optimizer_g.step()
            ############################################################################################################
            self.optimizer_d.zero_grad()
            '''
            判别网络训练
            '''
            img = photo.to(self.device)

            generator_img = self.generator(img)  # generator
            '''
            focus the output from guided_filter is the port to output the cartoonize img
            '''
            output_discriminator = self.guided_filter(img, generator_img, r=1)  # 边缘模糊化

            # 1.blur for surface representation
            blur_fake = self.guided_filter(output_discriminator, output_discriminator, r=5, eps=2e-1)
            blur_real = self.guided_filter(cartoon, cartoon, r=5, eps=2e-1)
            blur_fake_logit = self.disc_blur(blur_fake)  # discriminator
            blur_real_logit = self.disc_blur(blur_real)
            d_loss_blur = self.surface_loss._d_loss(blur_real_logit, blur_fake_logit)
            # 2.gray for texture represenation
            gray_fake, gray_real = self.colorshift(output_discriminator, cartoon)
            gray_fake_logit = self.disc_gray(gray_fake)
            gray_real_logit = self.disc_gray(gray_real)
            d_loss_gray = self.texture_loss._d_loss(gray_real_logit, gray_fake_logit)

            d_loss = d_loss_gray + d_loss_blur

            the_d_loss = d_loss.item()

            d_loss.backward()

            self.optimizer_d.step()

            if self.args.global_rank in [-1, 0]:
                self.get_image(output_discriminator, photo)
                pbar.set_description(
                    f'Train Epoch:{epoch}/{self.args.epochs} g_loss:{the_g_loss} d_loss:{the_d_loss}')
        self.scheduler1.step()
        self.scheduler2.step()

    def test(self, epoch):

        with torch.no_grad():
            torch.save({
                'model_state_dict': self.generator.state_dict()
            },
                './weights/generator/best.pth')
            torch.save({
                'model_state_dict': self.disc_blur.state_dict()
            },
                'weights/discriminator_blur/best.pth')
            torch.save({
                'model_state_dict': self.disc_gray.state_dict()
            },
                'weights/discriminator_gray/best.pth')

            print("save models")

    def get_image(self, image, photo):

        assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

        output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
        photo = (denormalize(photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

        output0 = Image.fromarray(output[0]).convert('RGB')
        photo0 = Image.fromarray(photo[0]).convert('RGB')

        output1 = Image.fromarray(output[1]).convert('RGB')
        photo1 = Image.fromarray(photo[1]).convert('RGB')

        output2 = Image.fromarray(output[2]).convert('RGB')
        photo2 = Image.fromarray(photo[2]).convert('RGB')

        output3 = Image.fromarray(output[3]).convert('RGB')
        photo3 = Image.fromarray(photo[3]).convert('RGB')

        width = output3.width
        height = output3.height

        target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

        target.paste(output0, (0, 0, width, height))
        target.paste(photo0, (width, 0, 2 * width, height))

        target.paste(output1, (0, height, width, 2 * height))
        target.paste(photo1, (width, height, 2 * width, 2 * height))

        target.paste(output2, (0, height * 2, width, 3 * height))
        target.paste(photo2, (width, height * 2, width * 2, height * 3))

        target.paste(output3, (0, height * 3, width, 4 * height))
        target.paste(photo3, (width, height * 3, 2 * width, height * 4))

        target.save("./results/1.jpg")

    def create_dataloader(self, batch_size, rank, workers, image_weights=False, train=True):
        with torch_distributed_zero_first(rank):
            dataset = CartoonDataset(self.args.data_path, self.args.photo_path)
        batch_size = min(batch_size, len(dataset))
        nw = min([os.cpu_count() // self.args.world_size, batch_size if batch_size > 1 else 0, workers])
        sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None

        dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=nw, sampler=sampler, pin_memory=True)

        return dataloader, dataset


trainDDP = trainDDP()
