# -*- coding: utf-8 -*-
"""
author:LTH
data:
"""
from typing import Union

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision.models import vgg19
from tqdm import tqdm

from config import parser
from datalist import AnimeDataset, TestAnimeDataset
from model import AnimeGenerator


def denormalize(im: Union[np.ndarray, torch.Tensor], mean=0.5, std=0.5):
    return im * std + mean


class ContentLoss(nn.Module):
    def __init__(self):
        super(ContentLoss, self).__init__()

    def forward(self, input, target):
        self.loss = F.l1_loss(input, target, reduction="mean")
        return self.loss


class pretrain(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        print("use_cuda:", use_cuda)

        self.device = torch.device('cuda' if use_cuda else 'cpu')
        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}

        '''
        构造DataLoader
        '''
        self.train_dataset = AnimeDataset()
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=8, shuffle=True, drop_last=True,
                                           **train_kwargs)

        self.test_dataset = TestAnimeDataset()
        self.test_dataloader = DataLoader(self.test_dataset, batch_size=8, shuffle=True, drop_last=True, **train_kwargs)

        self.generator = AnimeGenerator().to(self.device)
        self.vgg = vgg19(pretrained=True).to(self.device)
        self.vgg = self.vgg.features[:26]
        self.vgg.eval()
        self.content_loss = ContentLoss()

        self.generator.train()
        if use_cuda:
            self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
            self.vgg = torch.nn.DataParallel(self.vgg, device_ids=range(torch.cuda.device_count()))
        self.optimizer_g = optim.Adam(self.generator.parameters(), lr=0.0001, betas=(0.5, 0.999))

        try:
            for epoch in range(1, self.args.epochs + 1):
                self.train(epoch)

        except KeyboardInterrupt:
            torch.cuda.empty_cache()
            print("stop model training")

    def train(self, epoch):
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch {epoch}/{self.args.epochs}')
        for photo, smooth_gray_cartoon, cartoon_recon, cartoon in pbar:
            photo, smooth_gray_cartoon, cartoon_recon, cartoon = photo.to(self.device), smooth_gray_cartoon.to(
                self.device), cartoon_recon.to(self.device), cartoon.to(self.device)
            self.optimizer_g.zero_grad()

            generator_img = self.generator(photo)

            vgg_generator = self.vgg(generator_img)
            vgg_img = self.vgg(photo)
            _, c, h, w = generator_img.shape
            loss = self.content_loss(vgg_generator, vgg_img) / (c * h * w)

            loss.backward()

            self.optimizer_g.step()

            get_image(generator_img, photo)

            pbar.set_description(f"loss:{loss.item()}")
        torch.save({
            'model_state_dict': self.generator.state_dict()
        },
            './best.pth')

        print("save models")


def get_image(image, photo, name="result"):
    assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

    output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
    photo = (denormalize(photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

    output0 = Image.fromarray(output[0])
    photo0 = Image.fromarray(photo[0])

    output1 = Image.fromarray(output[1])
    photo1 = Image.fromarray(photo[1])

    output2 = Image.fromarray(output[2])
    photo2 = Image.fromarray(photo[2])

    output3 = Image.fromarray(output[3])
    photo3 = Image.fromarray(photo[3])

    width = output3.width
    height = output3.height

    target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

    target.paste(output0, (0, 0, width, height))
    target.paste(photo0, (width, 0, 2 * width, height))

    target.paste(output1, (0, height, width, 2 * height))
    target.paste(photo1, (width, height, 2 * width, 2 * height))

    target.paste(output2, (0, height * 2, width, 3 * height))
    target.paste(photo2, (width, height * 2, width * 2, height * 3))

    target.paste(output3, (0, height * 3, width, 4 * height))
    target.paste(photo3, (width, height * 3, 2 * width, height * 4))

    target.save(name + "1.jpg")


if __name__ == "__main__":
    model = pretrain()
