# -*- coding: utf-8 -*-
"""
author:LTH
data:
"""
from typing import Union

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision.models import vgg19
from tqdm import tqdm

from config import parser
from datalist import AnimeDataset
from model import AnimeGenerator, AnimeDiscriminator
from utils import LSGanLoss


def denormalize(im: Union[np.ndarray, torch.Tensor], mean=0.5, std=0.5):
    return im * std + mean


class ContentLoss(nn.Module):
    def __init__(self):
        super(ContentLoss, self).__init__()

    def forward(self, input, target):
        self.loss = F.l1_loss(input, target, reduction="mean")
        return self.loss


def gram_matrix(input):
    a, b, c, d = input.size()

    features = input.view(a * b, c * d)
    G = torch.mm(features, features.t())
    return G.div(a * b * c * d)


class StyleLoss(nn.Module):
    def __init__(self):
        super(StyleLoss, self).__init__()

    @staticmethod
    def forward(input, target):
        S = gram_matrix(target)
        G = gram_matrix(input)
        loss = F.l1_loss(G, S, reduction="mean")
        return loss


class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        print("use_cuda:", use_cuda)
        # if use_cuda:
        #     torch.cuda.manual_seed(self.args.seed)
        #     torch.cuda.manual_seed_all(self.args.seed)
        # else:
        #     torch.manual_seed(self.args.seed)

        self.device = torch.device('cuda' if use_cuda else 'cpu')
        # TODO
        # 转到UBUNTU后，这里的num_workers可以更改，扩大数据dataloader的线程数，使得gpu保持一个100%的工作状态
        # 加快模型的训练速度
        # windows下不支持这种设置
        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}

        '''
        构造DataLoader
        '''
        self.train_dataset = AnimeDataset()
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=4, shuffle=True, drop_last=True,
                                           **train_kwargs)

        self.generator = AnimeGenerator().to(self.device)
        self.discriminator = AnimeDiscriminator().to(self.device)
        self.vgg = vgg19(pretrained=True).to(self.device)
        self.vgg = self.vgg.features[:26]
        self.vgg.eval()
        self.criterion = LSGanLoss().to(self.device)
        self.content_loss = ContentLoss().to(self.device)
        self.huber_loss = nn.SmoothL1Loss().to(self.device)
        self.gram_loss = StyleLoss().to(self.device)
        self.l1loss = nn.L1Loss().to(self.device)

        self.generator.train()
        self.discriminator.train()

        self.Y = torch.tensor([0.299, 0.587, 0.114]).to(self.device)
        self.U = torch.tensor([-0.14714119, -0.28886916, 0.43601035]).to(self.device)
        self.V = torch.tensor([0.61497538, -0.51496512, -0.10001026]).to(self.device)

        if use_cuda:
            self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
            self.discriminator = torch.nn.DataParallel(self.discriminator, device_ids=range(torch.cuda.device_count()))
            self.vgg = torch.nn.DataParallel(self.vgg, device_ids=range(torch.cuda.device_count()))
        self.generator.load_state_dict(torch.load("weights/generate_best.pth")["model_state_dict"])
        self.optimizer_g = optim.Adam(self.generator.parameters(), lr=2e-4, betas=(0.5, 0.999))
        self.optimizer_d = optim.Adam(self.discriminator.parameters(), lr=4e-4, betas=(0.5, 0.999))

        try:
            for epoch in range(1, self.args.epochs + 1):
                # # if epoch > 50:
                self.train(epoch)
            # else:
            #     self.pre_train(epoch)

        except KeyboardInterrupt:
            torch.cuda.empty_cache()
            # self.test()

            print("stop model training")
        torch.cuda.empty_cache()
        print("finish model training")

    def train(self, epoch):
        count = 0
        g_loss = torch.tensor(0)
        d_loss = torch.tensor(0)
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch {epoch}/{self.args.epochs}')
        for photo, cartoon, x, y in pbar:
            photo, cartoon, x, y = photo.to(self.device), cartoon.to(self.device), x.to(self.device), y.to(self.device)
            if count % 2 != 0:
                # generator
                #####################
                self.optimizer_g.zero_grad()

                generator_img = self.generator(photo)
                fake_logit = self.discriminator(generator_img)
                # adv loss
                loss_adv = self.criterion._g_loss(fake_logit=fake_logit)

                photo_content = self.vgg(photo)

                _, c, h, w = photo_content.shape
                generator_content = self.vgg(generator_img)
                # content loss
                loss_con = self.content_loss(photo_content, generator_content)

                gray_cartoon_content = self.vgg(x)
                # gram loss
                loss_gram = self.gram_loss(generator_content, gray_cartoon_content)

                y_generator = (generator_img + 1) / 2 * self.Y[None, :, None, None]
                y_photo = (photo + 1) / 2 * self.Y[None, :, None, None]

                u_generator = (generator_img + 1) / 2 * self.U[None, :, None, None]
                u_photo = (photo + 1) / 2 * self.U[None, :, None, None]

                v_generator = (generator_img + 1) / 2 * self.V[None, :, None, None]
                v_photo = (photo + 1) / 2 * self.V[None, :, None, None]
                # color loss
                loss_color_recon = self.l1loss(y_generator, y_photo) \
                                   + self.huber_loss(u_generator, u_photo) \
                                   + self.huber_loss(v_generator, v_photo)

                g_loss = 10 * loss_adv + 1.5 * loss_con + 3 * loss_gram + 30 * loss_color_recon

                g_loss.backward()

                self.optimizer_g.step()
            else:
                # discriminator
                #####################
                self.optimizer_d.zero_grad()
                real_logit = self.discriminator(cartoon)
                fake_logit_color = self.discriminator(y)
                fake_logit_smooth = self.discriminator(x)
                generator_img = self.generator(photo)
                fake_logit = self.discriminator(generator_img)
                loss_adv1 = self.criterion._d_loss2(logit=fake_logit, a=0)
                loss_adv2 = self.criterion._d_loss2(logit=fake_logit_color, a=0)
                loss_adv3 = self.criterion._d_loss2(logit=fake_logit_smooth, a=0)
                loss_adv4 = self.criterion._d_loss2(logit=real_logit, a=1)

                d_loss = 10 * (loss_adv4 + loss_adv1 + 0.1 * loss_adv2 + loss_adv3)

                d_loss.backward()
                self.optimizer_d.step()
            get_image(generator_img, photo)
            # 输出图片
            count += 1

            pbar.set_description(f"[Epoch]:{epoch}\td_loss:{d_loss.item()}\tg_loss:{g_loss.item()}")
        torch.save({
            'model_state_dict': self.generator.state_dict()
        },
            './weights/generate_best.pth')

    def pre_train(self, epoch):
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch {epoch}/{self.args.epochs}')
        for photo, smooth_gray_cartoon, cartoon_recon, cartoon in pbar:
            photo, smooth_gray_cartoon, cartoon_recon, cartoon = photo.to(self.device), smooth_gray_cartoon.to(
                self.device), cartoon_recon.to(self.device), cartoon.to(self.device)
            self.optimizer_g.zero_grad()

            generator_img = self.generator(photo)

            vgg_generator = self.vgg(generator_img)
            vgg_img = self.vgg(photo)
            _, c, h, w = generator_img.shape
            loss = self.content_loss(vgg_generator, vgg_img)

            loss.backward()

            self.optimizer_g.step()

            get_image(generator_img, photo)

            pbar.set_description(f"loss:{loss.item()}")
        torch.save({
            'model_state_dict': self.generator.state_dict()
        },
            './best.pth')

        print("save models")


def get_image(image, photo, name="result"):
    assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

    output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
    photo = (denormalize(photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

    output0 = Image.fromarray(output[0])
    photo0 = Image.fromarray(photo[0])

    output1 = Image.fromarray(output[1])
    photo1 = Image.fromarray(photo[1])

    output2 = Image.fromarray(output[2])
    photo2 = Image.fromarray(photo[2])

    output3 = Image.fromarray(output[3])
    photo3 = Image.fromarray(photo[3])

    width = output3.width
    height = output3.height

    target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

    target.paste(output0, (0, 0, width, height))
    target.paste(photo0, (width, 0, 2 * width, height))

    target.paste(output1, (0, height, width, 2 * height))
    target.paste(photo1, (width, height, 2 * width, 2 * height))

    target.paste(output2, (0, height * 2, width, 3 * height))
    target.paste(photo2, (width, height * 2, width * 2, height * 3))

    target.paste(output3, (0, height * 3, width, 4 * height))
    target.paste(photo3, (width, height * 3, 2 * width, height * 4))

    target.save(name + "1.jpg")


def get_image_YUV(image, photo, name="yuv"):
    assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

    output = ((image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
    photo = ((photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

    output0 = Image.fromarray(output[0])
    photo0 = Image.fromarray(photo[0])

    output1 = Image.fromarray(output[1])
    photo1 = Image.fromarray(photo[1])

    output2 = Image.fromarray(output[2])
    photo2 = Image.fromarray(photo[2])

    output3 = Image.fromarray(output[3])
    photo3 = Image.fromarray(photo[3])

    width = output3.width
    height = output3.height

    target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

    target.paste(output0, (0, 0, width, height))
    target.paste(photo0, (width, 0, 2 * width, height))

    target.paste(output1, (0, height, width, 2 * height))
    target.paste(photo1, (width, height, 2 * width, 2 * height))

    target.paste(output2, (0, height * 2, width, 3 * height))
    target.paste(photo2, (width, height * 2, width * 2, height * 3))

    target.paste(output3, (0, height * 3, width, 4 * height))
    target.paste(photo3, (width, height * 3, 2 * width, height * 4))

    target.save(name + "1.jpg")


if __name__ == "__main__":
    modeL = train()
