# -*- coding: utf-8 -*-
# @Time    : 2023/5/10 10:48
# @Author  : Pan
# @Software: PyCharm
# @Project : VisualFramework
# @FileName: Diffusion

import os
import time
import loss
import paddle
import networks
import optimizers
import datasets
import numpy as np
from PIL import Image
from core import base
from tqdm import tqdm
from visualdl import LogWriter


class Diffusion:
    def __init__(self, diffusion_config):
        self.noise_steps = diffusion_config["noise_steps"]
        self.beta_start = diffusion_config["beta_start"]
        self.beta_end = diffusion_config["beta_end"]
        self.img_size = diffusion_config["img_size"]

        self.beta = self.prepare_noise_schedule()
        self.alpha = 1. - self.beta
        self.alpha_hat = paddle.cumprod(self.alpha, dim=0)   # 第N步的累成原图信息

    def prepare_noise_schedule(self):
        return paddle.linspace(self.beta_start, self.beta_end, self.noise_steps)

    def noise_images(self, x, t):
        sqrt_alpha_hat = paddle.sqrt(self.alpha_hat[t])[:, None, None, None]
        sqrt_one_minus_alpha_hat = paddle.sqrt(1 - self.alpha_hat[t])[:, None, None, None]
        epsilon = paddle.randn(shape=x.shape)
        return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * epsilon, epsilon

    def sample_timesteps(self, n):
        return paddle.randint(low=1, high=self.noise_steps, shape=(n,))

    def sample(self, model, x):
        model.eval()
        with paddle.no_grad():
            for i in tqdm(reversed(range(1, self.noise_steps)), position=0):

                t = paddle.to_tensor([i] * x.shape[0]).astype("int64")
                predicted_noise = model(x, t)[0]
                alpha = self.alpha[t][:, None, None, None]
                alpha_hat = self.alpha_hat[t][:, None, None, None]
                beta = self.beta[t][:, None, None, None]
                if i > 1:
                    noise = paddle.randn(shape=x.shape)
                else:
                    noise = paddle.zeros_like(x)
                x = 1 / paddle.sqrt(alpha) * (x - ((1 - alpha) / (paddle.sqrt(1 - alpha_hat))) * predicted_noise) + paddle.sqrt(beta) * noise
        model.train()
        x = (x.clip(-1, 1) + 1) / 2
        x = (x * 255)
        return x


class DiffusionEngine:
    def __init__(self, config):
        self.start_time = time.time()
        self.next_time = time.time()
        self.now_time = time.time()

        self.base_info_config = config["base_info"]
        self.train_dataset_config = config["train_dataset"]
        self.eval_dataset_config = config["eval_dataset"]
        self.optimizer_config = config["optimizer"]
        self.network_config = config["network"]
        self.loss_config = config["loss"]
        self.diffusion_config = config["diffusion"]
        self.amp = config["amp"] if "amp" in config.keys() else None
        self.only_last = self.base_info_config["only_last"] if "only_last" in self.base_info_config.keys() else True

        self.writer = LogWriter(logdir=self.base_info_config["log_dir"])

        self.model = networks.make_model(self.network_config)
        self.diffusion = Diffusion(self.diffusion_config)
        self.model, self.optimizer, self.lr = optimizers.make_optim(self.optimizer_config, self.model)
        self.train_dataloader = datasets.make_dataloader(self.train_dataset_config)
        self.eval_dataloader = datasets.make_dataloader(self.eval_dataset_config)
        self.loss = loss.LossCompose(self.loss_config)

        if self.base_info_config["pretrained"] is None:
            self.step = 0
        else:
            self.model, self.optimizer, self.lr, self.step = base.load_model(self.model, self.optimizer, self.lr, self.base_info_config["pretrained"])

        if self.amp:
            self.amp = paddle.amp.GradScaler(init_loss_scaling=self.amp["scale"])

    def train(self):
        if self.amp is not None:
            self.fp16_train()
        else:
            while self.step < self.base_info_config["step"]:
                for idx, data in enumerate(self.train_dataloader):
                    self.step += 1
                    img = data["img"]
                    self.optimizer.clear_grad()
                    t = self.diffusion.sample_timesteps(img.shape[0])
                    imgn, noise = self.diffusion.noise_images(img, t)
                    predict = self.model(imgn, t)
                    loss_value = self.loss(predict, [noise])
                    loss_value.backward()
                    self.optimizer.step()
                    self.lr.step()
                    if self.step % self.base_info_config["dot"] == 0:
                        self.display()
                    if self.step % self.base_info_config["save_iters"] == 0:
                        base.save_model(self.model, self.optimizer, self.step, self.base_info_config["save_path"], only_last=self.only_last)
                        self.predict()

    def predict(self):
        for idx, data in enumerate(self.eval_dataloader):
            data["predict"] = self.diffusion.sample(self.model, data["img"])
            for i in range(data["predict"].shape[0]):
                img = data["predict"][i].transpose([1, 2, 0])
                img = np.array(img).astype("uint8")
                img = Image.fromarray(img)
                img.save(os.path.join(self.base_info_config["save_path"], str(self.step), data["path"][i]))

    def display(self):
        self.now_time = time.time()
        epoch = int(self.step / len(self.train_dataloader))
        process = self.step/self.base_info_config["step"]
        speed_time = self.now_time - self.start_time
        remain_time = (self.now_time - self.next_time) / (self.base_info_config["dot"] / self.base_info_config["step"]) - speed_time
        self.next_time = self.now_time
        info_list = [
            {
                "name": "learning_rate",
                "value": self.lr.get_lr()
            }
        ] + self.loss.get_loss_info()
        base_info = "\033[5;31;47m[Train]\033[0m %s epochs:%4d steps:%9d/%9d process:%5.2f%% speed_time:%s remain_time:%s" % (
            time.ctime(), epoch, self.step, self.base_info_config["step"], process*100, base.time_std(speed_time), base.time_std(remain_time)
        )
        for item in info_list:
            self.writer.add_scalar(tag="train/" + item["name"], step=self.step, value=item["value"])
            base_info += " %s:%f" % (item["name"], item["value"])
        print(base_info)

    def fp16_train(self):
        while self.step < self.base_info_config["step"]:
            for idx, data in enumerate(self.train_dataloader):
                self.step += 1
                img = data["img"]
                self.optimizer.clear_grad(set_to_zero=False)
                t = self.diffusion.sample_timesteps(img.shape[0])
                imgn, noise = self.diffusion.noise_images(img, t)
                with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}, level='O1'):
                    predict = self.model(imgn, t)
                    loss_value = self.loss(predict, [noise])
                scaled = self.amp.scale(loss_value)
                scaled.backward()
                self.amp.step(self.optimizer)
                self.amp.update()
                self.lr.step()
                if self.step % self.base_info_config["dot"] == 0:
                    self.display()
                if self.step % self.base_info_config["save_iters"] == 0:
                    base.save_model(self.model, self.optimizer, self.step, self.base_info_config["save_path"], only_last=self.only_last)
                    self.predict()
