'''
stable-diffusion 推理
'''
import time

#
# from diffusers import DDPMScheduler, UNet2DModel
# # load model, scheduler
# model_id = "stabilityai/stable-diffusion-2-1"
# model_id = "google/ddpm-cat-256"
# scheduler = DDPMScheduler.from_pretrained(model_id)
# model = UNet2DModel.from_pretrained(model_id, use_safetensors=True).to("cuda")
#
# scheduler.set_timesteps(100)
#
# import torch
# sample_size = model.config.sample_size
# noise = torch.randn((1, 3, sample_size, sample_size), device="cuda")
#
#
# input = noise
# for t in scheduler.timesteps:
#     with torch.no_grad():
#         noisy_residual = model(input, t).sample
#     previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
#     input = previous_noisy_sample
#
#
#
# from PIL import Image
# import numpy as np
# image = (input / 2 + 0.5).clamp(0, 1).squeeze()
# image = (image.permute(1, 2, 0) * 255).round().to(torch.uint8).cpu().numpy()
# image = Image.fromarray(image)
# image.save("demo2.png")

from PIL import Image
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler

# model_id = "stabilityai/stable-diffusion-2-1"
# model_id = "CompVis/stable-diffusion-v1-4"
model_id = r"D:\code\other\stablediffusion\huggingface\hub\models--CompVis--stable-diffusion-v1-4\snapshots\133a221b8aa7292a167afc5127cb63fb5005638b"
torch_device = "cpu"


vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", use_safetensors=True)
tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder="tokenizer") #
text_encoder = CLIPTextModel.from_pretrained(
    model_id, subfolder="text_encoder", use_safetensors=True
)
unet = UNet2DConditionModel.from_pretrained(
    model_id, subfolder="unet", use_safetensors=True
)
# torch_device = "cuda"

# vae.to(torch_device)
# text_encoder.to(torch_device)
unet.to(torch_device)

from diffusers import UniPCMultistepScheduler
scheduler = UniPCMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")

# 创建文本嵌入
prompt = ["a photograph of an astronaut riding a horse"]
height = 128  # default height of Stable Diffusion
width = 128  # default width of Stable Diffusion
num_inference_steps = 25  # Number of denoising steps
guidance_scale = 7.5  # Scale for classifier-free guidance
# generator = torch.manual_seed(0)  # Seed generator to create the initial latent noise
generator = torch.cuda.manual_seed(0)# 设置 GPU 随机数种子
batch_size = len(prompt)

torch_device_cpu = "cpu"
text_input = tokenizer( # 分词 给text_encoder作为输入
    prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt"
) # to 2(input_ids,mask)*77
with torch.no_grad():
    text_embeddings = text_encoder(text_input.input_ids.to(torch_device_cpu))[0] # to 1(batch)*77*768

# 空白prompt？
max_length = text_input.input_ids.shape[-1] # prompt = [""]
uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt")
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device_cpu))[0]


text_embeddings = torch.cat([uncond_embeddings, text_embeddings])

# 迭代降噪
latents = torch.randn(
    (batch_size, unet.config.in_channels, height // 8, width // 8),
    # generator=generator, # ?一致性
    device=torch_device,
) # 1*4*64*64

latents = latents * scheduler.init_noise_sigma

from tqdm.auto import tqdm

scheduler.set_timesteps(num_inference_steps)
# scheduler.set_timesteps(25)
# tensor([999, 959, 919, 879, 839, 799, 759, 719, 679, 639, 599, 559, 519, 480,
#         440, 400, 360, 320, 280, 240, 200, 160, 120,  80,  40])

latents = latents.to(torch_device)
text_embeddings = text_embeddings.to(torch_device)
for t in tqdm(scheduler.timesteps):
    # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
    latent_model_input = torch.cat([latents] * 2) # 2*4*64*64

    latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t) # ？

    # predict the noise residual
    with torch.no_grad():
        # 输入 latent_model_input (2*4*64*64)  text_embeddings (2*77*768)  t
        # noise_pred 输出 (2*4*64*64)
        ut = time.time()
        noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # 2*4*64*64 计算残差
        ut = time.time()-ut
    # perform guidance
    noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) # 2*4*64*64 to (1*4*64*64, 1*4*64*64)
    noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) #  ？

    # compute the previous noisy sample x_t -> x_t-1
    latents = scheduler.step(noise_pred, t, latents).prev_sample # 1*4*64*64, 更新latents

# vae 解码图像
# scale and decode the image latents with vae
latents = 1 / 0.18215 * latents
latents = latents.to(torch_device_cpu)
with torch.no_grad():
    image = vae.decode(latents).sample # 1*4*64*64 to 1*3*512*512 (value[-1,1] )

image = (image / 2 + 0.5).clamp(0, 1).squeeze() # [-1,1] to [0,1], 1*3*512*512 to 3*512*512
image = (image.permute(1, 2, 0) * 255).to(torch.uint8).cpu().numpy() # 3*512*512 to 512*512*3
image = Image.fromarray(image)
image.save("demo2_5.png")
