# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2023/1/3
-------------------------------------------------
   Change Activity:
                   2023/1/3 13:20: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import torch
import torchvision
from PIL import Image
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config import GetConfig
from datalist import StyleGanv2Dataset
from model import Generator, Discriminator
from utils import accumulate, GanLoss, mixing_noise, denormalize, weights_init


class Train:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")

        # data
        kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {"num_workers": 4, "pin_memory": False}
        self.train_dataloader = DataLoader(
            StyleGanv2Dataset(base_dir="ddsa", mode="train"),
            batch_size=self.args.train_batch_size,
            shuffle=True,
            **kwargs)

        self.generator = Generator(size=256,
                                   style_dim=512,
                                   n_mlp=8,
                                   channel_multiplier=2,
                                   blur_kernel=[1, 3, 3, 1],
                                   lr_mlp=0.01, ).to(self.device)
        self.discriminator = Discriminator(size=256, channel_multiplier=2).to(self.device)
        self.generator_ema = Generator(size=256,
                                       style_dim=512,
                                       n_mlp=8,
                                       channel_multiplier=2,
                                       blur_kernel=[1, 3, 3, 1],
                                       lr_mlp=0.01, ).to(self.device)
        accumulate(self.generator_ema, self.generator)
        self.generator_ema.eval()

        if use_cuda:
            self.generator = torch.nn.DataParallel(self.generator, device_ids=range(torch.cuda.device_count()))
            self.generator_ema = torch.nn.DataParallel(self.generator_ema, device_ids=range(torch.cuda.device_count()))
            self.discriminator = torch.nn.DataParallel(self.discriminator, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        if self.args.resume:
            print("load the pretrained weight")
            self.generator_ema.load_state_dict(torch.load("weights/generate_best.pth")["model_state_dict"])
            self.generator.load_state_dict(torch.load("weights/ema_generate_best.pth")["model_state_dict"])
        else:
            print("train from scratch")
            weights_init(self.generator, init_type="normal", init_gain=1)
        self.gan_loss = GanLoss()

        self.g_optim = optim.Adam(
            params=self.generator.parameters(),
            lr=self.args.lr,
            betas=(0, 0.99)
        )

        self.d_optim = optim.Adam(
            params=self.discriminator.parameters(),
            lr=self.args.lr,
            betas=(0, 0.99)
        )

        self.mean_path_length = 0
        self.d_scheduler = optim.lr_scheduler.StepLR(self.d_optim, 1, gamma=0.9)
        self.g_scheduler = optim.lr_scheduler.StepLR(self.g_optim, 1, gamma=0.9)

    def work(self):
        for epoch in range(58, self.args.epochs):
            self.test(epoch)
            self.train(epoch)
        torch.cuda.empty_cache()
        print("model finish training")

    def train(self, epoch):
        self.generator.train()
        self.discriminator.train()

        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch:{epoch}/{self.args.epochs}')
        count = 0
        d_count = 0
        g_count = 0
        for real_img in pbar:
            real_img = real_img.to(self.device)
            if count % 2 == 0:
                # generator
                #####################
                g_count += 1
                self.g_optim.zero_grad()
                latent = mixing_noise(self.args.train_batch_size, 512, 0.5, self.device)
                fake_img, latent = self.generator(latent, return_latents=True)
                fake_pred = self.discriminator(fake_img)
                g_loss = self.gan_loss.g_loss_original(fake_pred)
                g_loss.backward()
                self.g_optim.step()

                # PATH PENALTY
                if g_count % 4 == 0:
                    # print("path penalty")
                    self.g_optim.zero_grad()
                    latent = mixing_noise(self.args.train_batch_size, 512, 0.5, self.device)
                    fake_img, latent = self.generator(latent, return_latents=True)
                    path_loss, self.mean_path_length, path_lengths = self.gan_loss.g_path_regularize(fake_img, latent,
                                                                                                     self.mean_path_length)
                    loss = path_loss * 2 * 4
                    loss.backward()
                    self.g_optim.step()

                pbar.set_description(
                    f"[Epoch]:{epoch}\tg_loss:{g_loss.item()}\t lr:{self.g_optim.param_groups[0]['lr']}")
            else:
                # discriminator
                #####################
                d_count += 1
                self.d_optim.zero_grad()
                latent = mixing_noise(self.args.train_batch_size, 512, 0.5, self.device)
                fake_img, _ = self.generator(latent, return_latents=False)
                fake_pred = self.discriminator(fake_img)
                real_pred = self.discriminator(real_img)
                d_loss = self.gan_loss.d_loss_original(real_pred, fake_pred)
                d_loss.backward()
                self.d_optim.step()

                # LAZY REGULARIZATION
                if d_count % 16 == 0:
                    # print("lazy regularization")
                    self.d_optim.zero_grad()
                    real_img.requires_grad = True
                    real_pred = self.discriminator(real_img)
                    penalty_loss = 10 * self.gan_loss.d_r1_loss(real_pred, real_img) * 16
                    penalty_loss.backward()
                    self.d_optim.step()

                # self.get_image(real_img, fake_img)
                torchvision.utils.save_image(fake_img, "./results/get_from_generate.jpg", nrow=2,
                                             normalize=True, value_range=(-1, 1))
                pbar.set_description(
                    f"[Epoch]:{epoch}\td_loss:{d_loss.item()}\t lr:{self.d_optim.param_groups[0]['lr']}")
            count += 1

            accumulate(self.generator_ema, self.generator)
        torch.save({
            'model_state_dict': self.generator_ema.state_dict()
        },
            'weights/ema_generate_best.pth')
        torch.save({
            'model_state_dict': self.generator.state_dict()
        },
            'weights/generate_best.pth')
        torch.save({
            'model_state_dict': self.discriminator.state_dict()
        },
            'weights/discriminator_best.pth')
        # self.d_scheduler.step()
        # self.g_scheduler.step()

    @staticmethod
    def get_image(image, photo, save_path="./results/get_from_generate.jpg"):
        # eps = 1e-5
        # photo = torch.clamp(min=eps,max=1-eps,input=photo)
        # photo = torch.tanh(photo)

        assert len(image) >= 4, "image batchsize should greater than 4, or this function can not work "

        output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
        photo = (denormalize(photo.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
        output0 = Image.fromarray(output[0]).convert('RGB')
        photo0 = Image.fromarray(photo[0]).convert('RGB')

        output1 = Image.fromarray(output[1]).convert('RGB')
        photo1 = Image.fromarray(photo[1]).convert('RGB')

        output2 = Image.fromarray(output[2]).convert('RGB')
        photo2 = Image.fromarray(photo[2]).convert('RGB')

        output3 = Image.fromarray(output[3]).convert('RGB')
        photo3 = Image.fromarray(photo[3]).convert('RGB')

        width = output3.width
        height = output3.height

        target = Image.new('RGB', (width + width, 4 * height), (255, 255, 255))

        target.paste(output0, (0, 0, width, height))
        target.paste(photo0, (width, 0, 2 * width, height))

        target.paste(output1, (0, height, width, 2 * height))
        target.paste(photo1, (width, height, 2 * width, 2 * height))

        target.paste(output2, (0, height * 2, width, 3 * height))
        target.paste(photo2, (width, height * 2, width * 2, height * 3))

        target.paste(output3, (0, height * 3, width, 4 * height))
        target.paste(photo3, (width, height * 3, 2 * width, height * 4))

        target.save(save_path)

    @torch.no_grad()
    def test(self, epoch):
        self.generator_ema.eval()
        latent = mixing_noise(self.args.train_batch_size, 512, 0.5, self.device)
        fake_img, _ = self.generator(latent, return_latents=False)

        # self.get_image(fake_img, fake_img, save_path="./results/get_from_generate_ema_"+str(epoch)+".jpg")
        torchvision.utils.save_image(fake_img, "./results/get_from_generate_ema_" + str(epoch) + ".jpg", nrow=2,
                                     normalize=True, value_range=(-1, 1))


if __name__ == "__main__":
    model = Train()
    model.work()
