# src/steganography.py

from typing import Union
import torch
import numpy as np
from tqdm import tqdm

GUIDANCE_SCALE = 1 # TODO: 此处需要手动调整

class Steganography:

    def __init__(self, model, device):
        self.model = model
        self.device = device
        self.num_steps = 50 # TODO: 此处需要手动调整
        self.tokenizer = self.model.tokenizer
        self.model.scheduler.set_timesteps(self.num_steps)
        self.prompt = None
        self.context = None

    def prev_step(self, model_output: Union[torch.FloatTensor, np.ndarray], timestep: int, sample: Union[torch.FloatTensor, np.ndarray]):
        # 计算前一个时间步（反向推理）
        prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
        # 获取当前时间步的 alpha 值
        alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
        # 判断时间步是否有效
        alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
        # beta_t = 1 - alpha_t
        beta_prod_t = 1 - alpha_prod_t
        # 计算预测的原始样本，公式为 
        # pred_original_sample = (sample - beta_t ** 0.5 * model_output) / alpha_t ** 0.5
        pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
        # 计算前一个时间步的样本方向
        pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output
        # 计算前一个样本的潜在表示
        prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
        return prev_sample
    
    def next_step(self, model_output: Union[torch.FloatTensor, np.ndarray], timestep: int, sample: Union[torch.FloatTensor, np.ndarray]):
        # 计算当前时间步与下一个时间步（正向推理）
        timestep, next_timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999), timestep
        alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
        alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]
        beta_prod_t = 1 - alpha_prod_t
        next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
        next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
        next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
        return next_sample

    def get_noise_pred(self, latents, t, is_forward=True, context=None):
        if context is None:
            context = self.context
        guidance_scale = GUIDANCE_SCALE
        uncond_embeddings, cond_embeddings = context.chunk(2)
        # 使用无条件嵌入计算噪声预测 —— unet
        noise_pred_uncond = self.model.unet(latents, t, uncond_embeddings)["sample"]
        # 使用有条件嵌入计算噪声预测 —— unet
        noise_prediction_text = self.model.unet(latents, t, cond_embeddings)["sample"]
        # 结合生成最终的噪声预测
        noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
        if is_forward:
            latents = self.next_step(noise_pred, t, latents)
        else:
            latents = self.prev_step(noise_pred, t, latents)
        return latents

    # 禁用梯度计算, 否则会 CUDA out of memory
    @torch.no_grad()
    def latent2image(self, latents):
        # image2latent 逆向过程
        latents = 1 / 0.18215 * latents.detach()
        image = self.model.vae.decode(latents)['sample']
        # 将图像的像素值从 [-1, 1] 归一化到 [0, 1] 的范围
        image = (image / 2 + 0.5).clamp(0, 1)
        # -> (H, W, C) 
        image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
        image = (image * 255).astype(np.uint8)
        return image

    @torch.no_grad()
    def image2latent(self, image):
        with torch.no_grad():
            # 将输入图像从 NumPy 数组转换为 PyTorch 张量，并归一化到 [-1, 1] 范围
            image = torch.from_numpy(image).float() / 127.5 - 1
            # （H, W, C) -> (C, H, W) -> (1, C, H, W)
            image = image.permute(2, 0, 1).unsqueeze(0).to(self.device)
            # 使用 VAE 编码图像，获取潜在分布的均值
            latents = self.model.vae.encode(image)['latent_dist'].mean
            # 缩放
            latents = latents * 0.18215
        return latents

    @torch.no_grad()
    def init_prompt(self, prompt: str):
        # 无条件嵌入
        uncond_input = self.model.tokenizer(
            [""], padding="max_length", max_length=self.model.tokenizer.model_max_length,
            return_tensors="pt"
        )
        uncond_embeddings = self.model.text_encoder(uncond_input.input_ids.to(self.model.device))[0]
        # 有条件嵌入
        text_input = self.model.tokenizer(
            [prompt],
            padding="max_length",
            max_length=self.model.tokenizer.model_max_length,
            truncation=True,
            return_tensors="pt",
        )
        text_embeddings = self.model.text_encoder(text_input.input_ids.to(self.model.device))[0]
        # 组合嵌入
        self.context = torch.cat([uncond_embeddings, text_embeddings])
        self.prompt = prompt

    @torch.no_grad()
    def pndm_loop(self, latent, is_forward=True):
        # 存储每个时间步的潜在向量
        all_latent = [latent]
        latent = latent.clone().detach() # 注意深拷贝，否则数据会被修改
        for i in tqdm(range(self.num_steps)):
            if is_forward:
                # 正向推理: 从最后一个时间步开始向前推理——加噪
                t = self.model.scheduler.timesteps[len(self.model.scheduler.timesteps) - i - 1]
            else:
                # 反向推理: 按顺序选择时间步，从列表的开头开始——去噪
                t = self.model.scheduler.timesteps[i]
            # 噪声预测
            latent = self.get_noise_pred(latent, t, is_forward, self.context)
            all_latent.append(latent)

        return all_latent


    @property
    def scheduler(self):
        return self.model.scheduler

    def invert(self, prompt, start_latent, is_forward):
        self.init_prompt(prompt) # 预处理 prompt
        # 核心推理步骤：传入初始潜在值 start_latent，进行加噪、去噪等一系列操作以生成新的潜在表示
        latents = self.pndm_loop(start_latent, is_forward=is_forward)
        return latents[-1] 