# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2022/10/14
-------------------------------------------------
   Change Activity:
                   2022/10/14 3:07: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config import GetConfig
from datalist import CelebAHQ
from model import UNet
from utils import extract, denormalize

metric_loss = float("inf")


class Train:
    def __init__(self):
        self.args = GetConfig()

        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        # num_workers的设置数量为GPU的4倍，同时这个参数的提高会增加CPU的内存消耗
        kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {"num_workers": 0, "pin_memory": False}

        self.train_dataloader = DataLoader(CelebAHQ(), batch_size=self.args.train_batch_size, shuffle=True,
                                           drop_last=True, **kwargs)

        self.model = UNet(T=1000, ch=128, ch_mult=[1, 2, 2, 2], attn=[1], num_res_blocks=2, dropout=0.1)

        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        if self.args.resume:
            try:
                print("loading the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                checkpoint = torch.load(self.args.pretrained_weight)['model_state_dict']

                pretrained_dict = {k: v for k, v in checkpoint.items() if
                                   k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v)}

                model_dict.update(pretrained_dict)
                self.model.load_state_dict(model_dict, strict=False)
                print("Restoring the weight from pretrained-weight file \nFinished loading the weight")
            except Exception as e:
                raise e
        else:
            print("train from scratch")

        self.loss = nn.MSELoss()
        self.optimizer = optim.AdamW(self.model.parameters(), lr=self.args.lr, weight_decay=5e-4)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.9)
        # ============================================================================================================
        self.betas = torch.linspace(1e-4, 0.02, 1000).double().to(self.device)
        self.alpha = 1 - self.betas
        self.alpha_bar = torch.cumprod(self.alpha, dim=0)

        self.sqrt_alpha_bar = torch.sqrt(self.alpha_bar)
        self.sqrt_one_minus_alpha_bar = torch.sqrt(1 - self.alpha_bar)
        # ============================================================================================================
        self.alpha_bar_prev = F.pad(self.alpha_bar, [1, 0], value=1)[:1000]
        self.sqrt_recip_alphas_bar = torch.sqrt(1. / self.alpha_bar)
        self.sqrt_recipm1_alphas_bar = torch.sqrt(1. / self.alpha_bar - 1)
        self.posterior_var = self.betas * (1 - self.alpha_bar_prev) / (1 - self.alpha_bar)
        self.posterior_log_var_clipped = torch.log(
            torch.cat([self.posterior_var[1:2], self.posterior_var[1:]])
        )
        self.posterior_mean_coef1 = torch.sqrt(self.alpha_bar_prev) * self.betas / (1 - self.alpha_bar)
        self.posterior_mean_coef2 = torch.sqrt(self.alpha) * (1 - self.alpha_bar_prev) / (1. - self.alpha_bar)
        # ============================================================================================================

    def work(self):
        for epoch in range(1, self.args.epochs):
            self.train(epoch)
            self.test(epoch)
        torch.cuda.empty_cache()
        print("model finish training")

    def add_noise(self, data):
        t = torch.randint(1000, size=(data.shape[0],), device=data.device)
        noise = torch.randn_like(data)

        x_t = (extract(self.sqrt_alpha_bar, t, data.shape) * data +
               extract(self.sqrt_one_minus_alpha_bar, t, data.shape) * noise)
        return x_t, t, noise

    def remove_noise(self, noise):
        for time_step in tqdm(reversed(range(1000))):
            t = noise.new_ones([noise.shape[0], ], dtype=torch.long) * time_step
            model_log_var = extract(torch.log(torch.cat([self.posterior_var[1:2],
                                                         self.betas[1:]])), t, noise.shape)
            eps_from_model = self.model(noise, t)

            data = extract(self.sqrt_recip_alphas_bar, t, noise.shape) * noise - extract(self.sqrt_recipm1_alphas_bar,
                                                                                         t, noise
                                                                                         .shape) * eps_from_model
            model_mean, _ = self.q_mean_variance(data, noise, t)

            if time_step > 0:
                eps_from_rand = torch.randn_like(noise)
            else:
                eps_from_rand = 0
            noise = model_mean + torch.exp(0.5 * model_log_var) * eps_from_rand

        return torch.clip(noise, -1, 1)

    def remove_noise2(self, noise):

        res = []

        for time_step in tqdm(reversed(range(1000))):
            t = noise.new_ones([noise.shape[0], ], dtype=torch.long) * time_step
            model_log_var = extract(torch.log(torch.cat([self.posterior_var[1:2],
                                                         self.betas[1:]])), t, noise.shape)
            eps_from_model = self.model(noise, t)

            data = extract(self.sqrt_recip_alphas_bar, t, noise.shape) * noise - extract(self.sqrt_recipm1_alphas_bar,
                                                                                         t, noise
                                                                                         .shape) * eps_from_model
            model_mean, _ = self.q_mean_variance(data, noise, t)

            if time_step > 0:
                eps_from_rand = torch.randn_like(noise)
            else:
                eps_from_rand = 0
            noise = model_mean + torch.exp(0.5 * model_log_var) * eps_from_rand

            if time_step % 10 == 0:
                res.append(torch.clip(noise, -1, 1))

        return res

    def train(self, epoch):
        self.model.train()
        average_loss = []
        pbar = tqdm(self.train_dataloader, desc=f"Train Epoch:{epoch}/{self.args.epochs}")

        for data in pbar:
            self.optimizer.zero_grad()
            data = data.to(self.device)
            # add noise
            data_add_noise, t, noise = self.add_noise(data)
            output = self.model(data_add_noise, t)
            loss = F.mse_loss(output, noise, reduction="mean")
            loss.backward()

            torch.nn.utils.clip_grad_norm_(
                self.model.parameters(), 1
            )
            self.optimizer.step()
            average_loss.append(loss.item())

            pbar.set_description(
                f'Train Epoch: {epoch}/{self.args.epochs} '
                f' train_loss: {np.mean(average_loss)} '
                f' learning_rate: {self.optimizer.state_dict()["param_groups"][0]["lr"]}'
            )

        global metric_loss
        if np.mean(average_loss) < metric_loss:
            metric_loss = np.mean(average_loss)
            torch.save({
                'model_state_dict': self.model.state_dict(),
            },
                'weight/best.pth')
            print("model saved")

    @torch.no_grad()
    def test(self, epoch):
        self.model.eval()

        noise = torch.randn((self.args.train_batch_size, 3, 128, 128), device=self.device)
        temp = noise
        data = self.remove_noise(noise)

        data = self.get_image(data, temp).convert("RGB")
        data.save("./result/" + str(epoch) + ".jpg")

    @torch.no_grad()
    def inference(self):
        self.model.eval()

        noise = torch.randn((self.args.train_batch_size, 3, 128, 128), device=self.device)
        data = self.remove_noise2(noise)
        self.get_image_display(data)


    def q_mean_variance(self, x_0, x_t, t):
        """
        Compute the mean and variance of the diffusion posterior
        q(x_{t-1} | x_t, x_0)
        """
        assert x_0.shape == x_t.shape
        posterior_mean = (
                extract(self.posterior_mean_coef1, t, x_t.shape) * x_0 +
                extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
        )
        posterior_log_var_clipped = extract(
            self.posterior_log_var_clipped, t, x_t.shape)
        return posterior_mean, posterior_log_var_clipped

    @staticmethod
    def get_image(image, noise):
        print("image:", torch.max(image), torch.min(image))
        print("noise:", torch.max(noise), torch.min(noise))
        image = (denormalize(image.permute((0, 2, 3, 1)).to("cpu").numpy()) * 255).astype("uint8")
        image = Image.fromarray(image[0])

        noise_image = (denormalize(noise.permute((0, 2, 3, 1)).to("cpu").numpy()) * 255).astype("uint8")
        noise_image = Image.fromarray(noise_image[0])

        new_image = Image.new("RGB", (2 * 128 + 1, 128))
        new_image.paste(noise_image, (0, 0))
        new_image.paste(image, (128 + 1, 0))

        return new_image

    @staticmethod
    def get_image_display(data):
        new_data = []
        for image in data:
            image = (denormalize(image.permute((0, 2, 3, 1)).to("cpu").numpy()) * 255).astype("uint8")

            image = Image.fromarray(image[0])
            new_data.append(image)

        row = col = int(len(new_data)**0.5)
        new_image = Image.new("RGB",(col*(128+1),row*(128+1)))
        for i in range(row):
            for j in range(col):
                print("=================================")
                print(i*col+j)
                print(i,j)
                print(np.sum(np.array(new_data[i*col+j])))

                new_image.paste(new_data[i*col+j],(j*(128+1),i*(128+1)))
        new_image.save("total_result3.jpg")


if __name__ == "__main__":
    model = Train()
    model.work()
