import argparse
import time
from functools import partial
import torch.nn.functional
import torch
import torch.distributed as dist
import torch.optim as optim
from PIL import Image
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import models

from datalist import CartoonDataset
from model import UnetGenerator, SpectNormDiscriminator
from utils import VariationLoss, LSGanLoss, GuidedFilter, ColorShift, simple_superpixel, slic, adaptive_slic, sscolor, \
    denormalize

parser = argparse.ArgumentParser(description="pytorch white-box cartoongan")
parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training')
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ')
parser.add_argument('--batch_size', '--batch-size', default=24, type=int)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--lr', default=0.0002, type=float)
parser.add_argument('--ip', default='127.0.0.1', type=str)
parser.add_argument('--port', default='23456', type=str)


def get_image(image, photo):
    assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

    output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
    photo = (denormalize(photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

    output0 = Image.fromarray(output[0]).convert('RGB')
    photo0 = Image.fromarray(photo[0]).convert('RGB')

    output1 = Image.fromarray(output[1]).convert('RGB')
    photo1 = Image.fromarray(photo[1]).convert('RGB')

    output2 = Image.fromarray(output[2]).convert('RGB')
    photo2 = Image.fromarray(photo[2]).convert('RGB')

    output3 = Image.fromarray(output[3]).convert('RGB')
    photo3 = Image.fromarray(photo[3]).convert('RGB')

    width = output3.width
    height = output3.height

    target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

    target.paste(output0, (0, 0, width, height))
    target.paste(photo0, (width, 0, 2 * width, height))

    target.paste(output1, (0, height, width, 2 * height))
    target.paste(photo1, (width, height, 2 * width, 2 * height))

    target.paste(output2, (0, height * 2, width, 3 * height))
    target.paste(photo2, (width, height * 2, width * 2, height * 3))

    target.paste(output3, (0, height * 3, width, 4 * height))
    target.paste(photo3, (width, height * 3, 2 * width, height * 4))

    target.save("./results/1.jpg")


def reduce_mean(tensor, nprocs):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= nprocs
    return rt


SuperPixelDict = {
    'slic': slic,
    'adaptive_slic': adaptive_slic,
    'sscolor': sscolor}
from torchvision import transforms

data_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# resize2scale = transforms.Resize
infer_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])


def main():
    args = parser.parse_args()
    args.nprocs = torch.cuda.device_count()

    main_worker(args.local_rank, args.nprocs, args)


def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank

    # 获得init_method的通信端口
    init_method = "env://"

    # 1. 分布式初始化，对于每一个进程都需要进行初始化，所以定义在 main_worker中
    cudnn.benchmark = True
    dist.init_process_group(backend='nccl', init_method=init_method, world_size=args.nprocs,
                            rank=local_rank)

    generator = UnetGenerator()
    discrimnator_gray = SpectNormDiscriminator()
    discrimnator_blur = SpectNormDiscriminator()
    vgg = models.vgg16(pretrained=True).features
    for i in vgg.parameters():
        i.requires_grad = False

    guided_filter = GuidedFilter()
    colorshift = ColorShift(device=local_rank)
    superpixel_fn = partial(SuperPixelDict['sscolor'],
                            seg_num=200)

    print("load the weight from pretrained-weight file")
    # generator
    pretrained_dict = torch.load("weights/generator/pretrain/best.pth")[
        'model_state_dict']
    new_state_dict = {}
    for k, v in pretrained_dict.items():
        new_state_dict[k[7:]] = v
    generator.load_state_dict(new_state_dict, True)
    print("finished loading the generate model")

    del new_state_dict

    print("Finished to load the weight")

    vgg.eval()
    generator.train()
    discrimnator_blur.train()
    discrimnator_gray.train()

    torch.cuda.set_device(local_rank)
    generator.cuda(local_rank)
    discrimnator_gray.cuda(local_rank)
    discrimnator_blur.cuda(local_rank)
    vgg.cuda(local_rank)

    generator = torch.nn.SyncBatchNorm.convert_sync_batchnorm(generator).to(local_rank)
    discrimnator_gray = torch.nn.SyncBatchNorm.convert_sync_batchnorm(discrimnator_gray).to(local_rank)
    discrimnator_blur = torch.nn.SyncBatchNorm.convert_sync_batchnorm(discrimnator_blur).to(local_rank)

    generator = torch.nn.parallel.DistributedDataParallel(generator, device_ids=[local_rank],find_unused_parameters=True)
    discrimnator_gray = torch.nn.parallel.DistributedDataParallel(discrimnator_gray, device_ids=[local_rank],find_unused_parameters=True)
    discrimnator_blur = torch.nn.parallel.DistributedDataParallel(discrimnator_blur, device_ids=[local_rank],find_unused_parameters=True)

    tv_loss = VariationLoss(1).to(local_rank)  # 这里的1代表像素点的起始位置
    texture_loss = LSGanLoss().to(local_rank)
    surface_loss = LSGanLoss().to(local_rank)
    content_loss = nn.L1Loss().to(local_rank)
    structure_loss = nn.L1Loss().to(local_rank)

    optimizer_g = optim.Adam(generator.parameters(), lr=0.00001, betas=(0.5, 0.99),weight_decay=0.001)
    optimizer_d = optim.Adam(
        [{'params': discrimnator_gray.parameters()}, {'params': discrimnator_blur.parameters()}], lr=0.001,
        betas=(0.5, 0.99),weight_decay=0.001)

    batch_size = int(args.batch_size / nprocs)

    train_dataset = CartoonDataset(cartoon_data_path="../cartoon/hosoda", photo_data_path="../JPEGImages")
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size, num_workers=0, pin_memory=True,
                                               sampler=train_sampler)

    for epoch in range(args.epochs):
        start = time.time()
        train_sampler.set_epoch(epoch)
        for step, (cartoons, photos) in enumerate(train_loader):
            cartoons = cartoons.cuda(local_rank, non_blocking=True)
            imgs = photos.cuda(local_rank, non_blocking=True)
            if step % 2 != 0:
                '''
                生成网络训练
                '''
                optimizer_g.zero_grad()

                generator_img = generator(imgs)
                output2 = guided_filter(imgs, generator_img, r=1)

                # 1.blur for surface representation
                blur_fake = guided_filter(output2, output2, r=5, eps=2e-1)
                blur_fake_logit = discrimnator_blur(blur_fake)
                g_loss_blur = surface_loss._g_loss(blur_fake_logit)

                # 2. gray for texture respresentation
                gray_fake, = colorshift(output2)
                gray_fake_logit = discrimnator_gray(gray_fake)
                g_loss_gray = texture_loss._g_loss(gray_fake_logit)

                # 3.superpixel for structure representation
                input_spuerpixel = torch.from_numpy(
                    simple_superpixel(output2.detach().permute((0, 2, 3, 1)).cpu().numpy(),
                                      superpixel_fn)
                ).to(local_rank).permute((0, 3, 1, 2)).contiguous()

                vgg_output = vgg(output2)
                _, c, h, w = vgg_output.shape
                vgg_superpixel = vgg(input_spuerpixel)
                superpixel_loss = structure_loss(vgg_superpixel, vgg_output) / (c * h * w)
                # 4.content loss
                vgg_photo = vgg(imgs)
                photo_loss = content_loss(vgg_photo, vgg_output) / (c * h * w)
                # 5.total variation loss
                tv = tv_loss(output2)
                #                    tv           content            structure              surface             texture
                total_loss = 10000 * tv + 200 * photo_loss + 2e3 * superpixel_loss + 0.1 * g_loss_blur + 1 * g_loss_gray

                torch.distributed.barrier()

                reduced_g_loss = reduce_mean(total_loss, args.nprocs)

                total_loss.backward()

                optimizer_g.step()

                if args.local_rank == 0:
                    print(
                        'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tg_Loss: {:0.4f}\tLR: {:0.6f}'.format(
                            reduced_g_loss,
                            optimizer_d.param_groups[0]['lr'],
                            epoch=epoch + 1,
                            trained_samples=step * args.batch_size + len(photos),
                            total_samples=len(train_loader.dataset)
                        ))
            ############################################################################################################
            else:
                '''
                鉴别网络训练
                '''
                optimizer_d.zero_grad()

                generator_img = generator(imgs)
                output = guided_filter(imgs, generator_img, r=1)

                # 1.blur for surface representation
                blur_fake = guided_filter(output, output, r=5, eps=2e-1)
                blur_real = guided_filter(cartoons, cartoons, r=5, eps=2e-1)
                blur_fake_logit = discrimnator_blur(blur_fake)
                blur_real_logit = discrimnator_blur(blur_real)
                d_loss_blur = surface_loss._d_loss(blur_real_logit, blur_fake_logit)

                # 2.gray for texture represenation
                gray_fake, gray_real = colorshift(output, cartoons)
                gray_fake_logit = discrimnator_gray(gray_fake)
                gray_real_logit = discrimnator_gray(gray_real)
                d_loss_gray = texture_loss._d_loss(gray_real_logit, gray_fake_logit)

                d_loss = d_loss_gray + d_loss_blur

                torch.distributed.barrier()
                reduced_d_loss = reduce_mean(d_loss, args.nprocs)

                d_loss.backward()

                optimizer_d.step()

                if args.local_rank == 0:
                    print(
                        'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\td_Loss: {:0.4f}\tLR: {:0.6f}'.format(
                            reduced_d_loss,
                            optimizer_d.param_groups[0]['lr'],
                            epoch=epoch + 1,
                            trained_samples=step * args.batch_size + len(photos),
                            total_samples=len(train_loader.dataset)
                        ))
                    get_image(output, photos)

        finish = time.time()
        if args.local_rank == 0:
            print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))
            torch.save({
                'model_state_dict': generator.state_dict()
            },
                './weights/generator/best.pth')
            torch.save({
                'model_state_dict': discrimnator_blur.state_dict()
            },
                'weights/discriminator_blur/best.pth')
            torch.save({
                'model_state_dict': discrimnator_gray.state_dict()
            },
                'weights/discriminator_gray/best.pth')

            print("save models")


if __name__ == "__main__":
    main()
