# -*- coding: utf-8 -*-
"""
author:LTH
data:
"""
from functools import partial

import torch
import torch.nn as nn
import torch.optim as optim
from PIL import Image
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import models
from tqdm import tqdm

from config import parser
from datalist import CartoonDataset
from model import UnetGenerator, SpectNormDiscriminator
from utils import VariationLoss, LSGanLoss, GuidedFilter, ColorShift, simple_superpixel, slic, adaptive_slic, sscolor, \
    denormalize

SuperPixelDict = {
    'slic': slic,
    'adaptive_slic': adaptive_slic,
    'sscolor': sscolor}
from torchvision import transforms

data_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
# resize2scale = transforms.Resize
infer_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
'''
这里的问题可能是出的引导图可能有问题
'''


class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        self.device = torch.device('cuda' if use_cuda else 'cpu')
        # TODO
        # 转到UBUNTU后，这里的num_workers可以更改，扩大数据dataloader的线程数，使得gpu保持一个100%的工作状态
        # 加快模型的训练速度
        # windows下不支持这种设置
        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}

        '''
        构造DataLoader
        '''
        self.train_dataset = CartoonDataset(self.args.data_path, self.args.photo_path)
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.args.train_batch_size, shuffle=True,
                                           **train_kwargs)
        '''
        white_box gan的数据处理类 
        '''
        self.guided_filter = GuidedFilter()
        self.colorshift = ColorShift(device=self.device)
        self.superpixel_fn = partial(SuperPixelDict['sscolor'],
                                     seg_num=200)

        '''
        构造Model
        '''
        self.generator = UnetGenerator().to(self.device)
        self.disc_gray = SpectNormDiscriminator().to(self.device)
        self.disc_blur = SpectNormDiscriminator().to(self.device)
        self.vgg = models.vgg11(pretrained=True).to(self.device).features
        for i in self.vgg.parameters():
            i.requires_grad = False
        '''
        加载模型
        '''
        if self.args.resume:
            try:
                print("load the weight from pretrained-weight file")
                # generator
                pretrained_dict = torch.load("weights/generator/pretrain/best.pth", map_location=self.device)[
                    'model_state_dict']
                new_state_dict = {}
                for k, v in pretrained_dict.items():
                    new_state_dict[k[7:]] = v
                self.generator.load_state_dict(new_state_dict, True)
                print("finished loading the generate model")
                # discriminator_blur
                pretrained_dict = torch.load("weights/discriminator_blur/best.pth", map_location=self.device)[
                    'model_state_dict']
                new_state_dict = {}
                for k, v in pretrained_dict.items():
                    new_state_dict[k[7:]] = v
                self.disc_blur.load_state_dict(new_state_dict, True)
                print("finished loading the discriminator_gray model")
                # discriminator_gray
                pretrained_dict = torch.load("weights/discriminator_gray/best.pth", map_location=self.device)[
                    'model_state_dict']
                new_state_dict = {}
                for k, v in pretrained_dict.items():
                    new_state_dict[k[7:]] = v
                self.disc_gray.load_state_dict(new_state_dict, True)
                print("finished loading the discriminator_blur model")
                del new_state_dict

                print("Finished to load the weight")
            except:
                print("can not load weight \n train the model from stratch")
        else:
            print("load the weight from pretrained-weight file")
            # generator
            pretrained_dict = torch.load("weights/generator/best.pth", map_location=self.device)[
                'model_state_dict']
            new_state_dict = {}
            for k, v in pretrained_dict.items():
                new_state_dict[k[7:]] = v
            self.generator.load_state_dict(new_state_dict, True)
            print("finished loading the generate model")

        if use_cuda:
            self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
            self.disc_blur = torch.nn.DataParallel(self.disc_blur, device_ids=range(torch.cuda.device_count()))
            self.disc_gray = torch.nn.DataParallel(self.disc_gray, device_ids=range(torch.cuda.device_count()))
            self.vgg = torch.nn.DataParallel(self.vgg, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        self.tv_loss = VariationLoss(1).to(self.device)  # 这里的1代表像素点的起始位置
        self.texture_loss = LSGanLoss().to(self.device)
        self.surface_loss = LSGanLoss().to(self.device)
        self.content_loss = nn.L1Loss().to(self.device)
        self.structure_loss = nn.L1Loss().to(self.device)

        self.optimizer_g = optim.Adam(self.generator.parameters(), lr=0.01, betas=(0.5, 0.99), weight_decay=0.01)
        self.optimizer_d = optim.Adam(
            [{'params': self.disc_gray.parameters()}, {'params': self.disc_blur.parameters()}], lr=0.01,
            betas=(0.5, 0.99), weight_decay=0.01)
        self.scheduler1 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer_d, T_max=5, eta_min=1e-5)
        self.scheduler2 = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer_g, T_max=5, eta_min=1e-5)
        try:
            for epoch in range(1, self.args.epochs + 1):
                self.train(epoch)
                if epoch % 1 == 0:
                    self.test(epoch)
        except KeyboardInterrupt:
            torch.cuda.empty_cache()
            self.test()
        torch.cuda.empty_cache()
        print("finish model training")

    def train(self, epoch):
        self.generator.train()
        self.disc_blur.train()
        self.disc_gray.train()
        self.vgg.eval()
        count = 0
        '''
        train generator 5 times and discriminator i time 
        '''
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch{epoch}/{self.args.epochs}')
        for cartoon, photo in pbar:
            cartoon = cartoon.to(self.device)
            #################################################################################
            if count % 2 != 0:
                '''
                生成网络训练
                '''

                self.optimizer_g.zero_grad()
                img = photo.to(self.device)

                generator_img = self.generator(img)  # generator
                generator_img = self.guided_filter(generator_img, img, r=1)

                output2 = self.guided_filter( generator_img,img, r=5,eps=2e-1)

                # 1.blur for surface representation
                # blur_fake = self.guided_filter(output2, output2, r=5, eps=2e-1)
                blur_fake=output2
                # self.get_image(blur_fake, blur_fake)
                blur_fake_logit = self.disc_blur(blur_fake)  # discrimnator
                g_loss_blur = self.surface_loss._g_loss(blur_fake_logit)
                # 2. gray for texture respresentation
                gray_fake, = self.colorshift(output2)
                # self.get_image(gray_fake, gray_fake)
                gray_fake_logit = self.disc_gray(gray_fake)
                g_loss_gray = self.texture_loss._g_loss(gray_fake_logit)

                # 3.superpixel for structure representation
                input_spuerpixel = torch.from_numpy(
                    simple_superpixel(output2.detach().permute((0, 2, 3, 1)).cpu().numpy(),
                                      self.superpixel_fn)
                ).to(self.device).permute((0, 3, 1, 2))

                # self.get_image(input_spuerpixel, input_spuerpixel)
                vgg_output = self.vgg(output2)
                _, c, h, w = vgg_output.shape
                vgg_superpixel = self.vgg(input_spuerpixel)
                superpixel_loss = self.structure_loss(vgg_superpixel, vgg_output)
                # 4.content loss
                vgg_photo = self.vgg(img)
                photo_loss = self.content_loss(vgg_photo, vgg_output)
                # 5.total variation loss
                tv_loss = self.tv_loss(output2)
                #                    tv               content            structure              surface             texture
                # total_loss = 10000 * tv_loss + 200 * photo_loss + 2e3 * superpixel_loss + 0.1 * g_loss_blur + 1 * g_loss_gray
                total_loss = 10000 * tv_loss + 2e3 * photo_loss + 2e3 * superpixel_loss + 1 * g_loss_blur + 10 * g_loss_gray
                # total_loss=10000*tv_loss+200*photo_loss*2000*superpixel_loss+0.1*g_loss_blur+1*g_loss_gray
                total_loss.backward()
                the_g_loss = total_loss.item()

                self.optimizer_g.step()
                pbar.set_description(
                    f'Train Epoch:{epoch}/{self.args.epochs} g_loss:{the_g_loss} ')
                pbar.update(1)
            ############################################################################################################
            else:
                self.optimizer_d.zero_grad()
                '''
                判别网络训练
                '''
                img = photo.to(self.device)

                generator_img = self.generator(img).detach()  # generator
                '''
                focus the output from guided_filter is the port to output the cartoonize img
                '''
                output = self.guided_filter( generator_img,img, r=1)  # 边缘模糊化

                # 1.blur for surface representation
                blur_fake = self.guided_filter(output, output, r=5, eps=2e-1)
                blur_real = self.guided_filter(cartoon, cartoon, r=5, eps=2e-1)
                blur_fake_logit = self.disc_blur(blur_fake)  # discrimnator
                blur_real_logit = self.disc_blur(blur_real)
                d_loss_blur = self.surface_loss._d_loss(blur_real_logit, blur_fake_logit)
                # 2.gray for texture represenation
                gray_fake, gray_real = self.colorshift(output, cartoon)
                gray_fake_logit = self.disc_gray(gray_fake)
                gray_real_logit = self.disc_gray(gray_real)
                d_loss_gray = self.texture_loss._d_loss(gray_real_logit, gray_fake_logit)

                d_loss = d_loss_gray + d_loss_blur

                the_d_loss = d_loss.item()

                d_loss.backward()

                self.optimizer_d.step()

                self.get_image(output*0.5+img*0.5, photo)
                pbar.set_description(
                    f'Train Epoch:{epoch}/{self.args.epochs} d_loss:{the_d_loss}')
                pbar.update(1)
            count += 1

        # self.scheduler1.step()
        # self.scheduler2.step()

    @torch.no_grad()
    def test(self, epoch=None):

        torch.save({
            'model_state_dict': self.generator.state_dict()
        },
            './weights/generator/best.pth')
        torch.save({
            'model_state_dict': self.disc_blur.state_dict()
        },
            'weights/discriminator_blur/best.pth')
        torch.save({
            'model_state_dict': self.disc_gray.state_dict()
        },
            'weights/discriminator_gray/best.pth')

        print("save models")

    def get_image(self, image, photo):

        assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

        output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
        photo = (denormalize(photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

        output0 = Image.fromarray(output[0]).convert('RGB')
        photo0 = Image.fromarray(photo[0]).convert('RGB')

        output1 = Image.fromarray(output[1]).convert('RGB')
        photo1 = Image.fromarray(photo[1]).convert('RGB')

        output2 = Image.fromarray(output[2]).convert('RGB')
        photo2 = Image.fromarray(photo[2]).convert('RGB')

        output3 = Image.fromarray(output[3]).convert('RGB')
        photo3 = Image.fromarray(photo[3]).convert('RGB')

        width = output3.width
        height = output3.height

        target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

        target.paste(output0, (0, 0, width, height))
        target.paste(photo0, (width, 0, 2 * width, height))

        target.paste(output1, (0, height, width, 2 * height))
        target.paste(photo1, (width, height, 2 * width, 2 * height))

        target.paste(output2, (0, height * 2, width, 3 * height))
        target.paste(photo2, (width, height * 2, width * 2, height * 3))

        target.paste(output3, (0, height * 3, width, 4 * height))
        target.paste(photo3, (width, height * 3, 2 * width, height * 4))

        target.save("./results/1.jpg")


train = train()
