# %%import
from reflow.utils import _PIPELINES, _SCHEDULERS
from copy import deepcopy
import torch
from tqdm import tqdm, trange
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from torchvision.utils import make_grid
from typing import Callable, List, Optional, Union
from reflow.utils import decode_latents, nothing
from reflow.utils import set_seed
from diffusers.utils import randn_tensor
import math
from diffusers import UNet2DConditionModel
from loguru import logger
from pathlib import Path
import os
# %%config设置
device = 'cuda:2'
diffusers_pipeline = 'stable_diffusion'
diffusers_scheduler = 'euler_dummy'
diffusers_pipeline_ckpt = 'checkpoints/SD-1-4'

ckpt_path = None
# ckpt_path = "logs/online/sdv1-4_laion_1MPrompts_distill_inferenceN25CFG1.5_lpips_lr1e-6/checkpoints/score_model_s20000.pth"

# ckpt_path = "logs/online/sdv1-4_laion_1MPrompts_guidance7.5/checkpoints/score_model_s40000.pth"
ckpt_path = "logs/online/sdv1-4_laion_1MPrompts_distill_inferenceN25CFG1.5_l2_lr1e-6/checkpoints/score_model_s40000.pth"



# ckpt_path = "logs/pokemon/distill/AutoCaption/score_model_s2000.pth"
# ckpt_path="logs/pokemon/finetune_sd/main/score_model_s1000.pth"
# ckpt_path="logs/pokemon/2reflow/AutoCaption/score_model_s2000.pth"


# ckpt_path = "logs/emoji/distill/AutoCaption/score_model_s2000.pth"
# ckpt_path = "logs/emoji/2reflow/AutoCaption/score_model_s2000.pth"

# ckpt_path = "logs/nouns/distill/AutoCaption/score_model_s2000.pth"
# ckpt_path = "logs/sketch-scene/distill/AutoCaption/score_model_s2000.pth"

overide_timesteps = None
# overide_timesteps = [1e-3, 0.1]

use_xformers = True

save_dir = Path("tmp")
save_dir.mkdir(parents=True, exist_ok=True)

# %%加载 diffusion pipeline; 定义 reflow model
pipeline_cls = _PIPELINES[diffusers_pipeline]
scheduler_cls = _SCHEDULERS[diffusers_scheduler]

weight_dtype = torch.float16
pipeline = pipeline_cls.from_pretrained(
    diffusers_pipeline_ckpt,
    torch_dtype=weight_dtype,
    safety_checker=None,
    requires_safety_checker=False,
)
if ckpt_path:
    pipeline.unet.load_state_dict(torch.load(ckpt_path))
pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
if diffusers_scheduler == 'euler_dummy':
    pipeline.scheduler.overide_timesteps = overide_timesteps
pipeline.vae.load_state_dict(torch.load(
    "checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.bin"))
pipeline = pipeline.to(device)

if use_xformers:
    pipeline.enable_xformers_memory_efficient_attention()
    
# %%采样config

# diffusion
inference_steps = 25
if overide_timesteps:
    inference_steps = len(overide_timesteps)
guidance_scale = 1
num_images_per_prompt = 25

prompts = [
    # "A photograph of an astronaut riding a horse.",
    # "A puppy wearing a hat",
    "A brand new helicopter.",
    # "A beautiful castle, matte painting.",
    # "A wooden house sitting in a field."
    # "A boy with a hat wearing a tie"
    # "A dog wearing a Santa Claus hat"
    # "A bowl of food with meat in a sauce, broccoli and cucumbers."
    # "A bird is standing on top of a wooden bench."
    # "A kitchen with an oven, stove, and knives"
    # "a double decker bus going down the street"
    # "hyperdetailed robotic skeleton head, symetry, intricate, detailed,"
    # "superman in real world"
    # "supercat"
    # "totoro"
    # "hello kitty"
    # "the face and ear of a teddy bear"
]

random_seed = 2
if random_seed != -1:
    set_seed(random_seed)

persistent_noise = True
noise = None

sample_res = 64


# %%加载 prompts

prompts = [item for item in prompts for _ in range(num_images_per_prompt)]

if not (persistent_noise and isinstance(noise, torch.Tensor)):
    noise = torch.randn(len(prompts), 4, sample_res,
                        sample_res, device=device, dtype=weight_dtype)

# %%使用 diffusion model 采样获取数据
example = pipeline.inference_latent(
    prompt=prompts,
    latents=noise,
    num_inference_steps=inference_steps,
    # num_images_per_prompt=1,
    guidance_scale=guidance_scale,
)
# noise = example['noise']
latent = example['latent']
condition = {
    'encoder_hidden_states': example['text_embeddings']
}
uncond_condition = {
    'encoder_hidden_states': example['uncond_text_embeddings']
}
if uncond_condition['encoder_hidden_states'] is None:
    uncond_condition['encoder_hidden_states'] = pipeline._encode_prompt(
        prompt=[""] * len(prompts),
        device=pipeline.device,
        num_images_per_prompt=1,
        do_classifier_free_guidance=False,
    )
    
# %%打印并保存输出图片
nrow = int(math.sqrt(len(prompts)))
nrow = max(nrow, len(prompts)//nrow)
ncol = len(prompts)//nrow

def to_pil(img_latent):
    image = img_latent.mul(255.).to(dtype=torch.uint8).permute(1, 2, 0)
    image = Image.fromarray(image.numpy())
    return image


with torch.no_grad():
    samples = decode_latents(pipeline.vae, latent)
    sample_grid = make_grid(samples, nrow=nrow, )

samples_pil = [to_pil(sample) for sample in samples]
grid_pil = to_pil(sample_grid)

fig, axs = plt.subplots(nrow, ncol, figsize=(13, 13))

for i in range(nrow):
    for j in range(ncol):
        # 设置当前子图的标题为行列编号
        axs[i, j].set_title(f'({i},{j})', fontsize=20)
        
        img_ij = samples_pil[i*ncol+j]
        
        # img_ij.save(str(save_dir / f"({i},{j}).jpg"))

        # 显示图片
        axs[i, j].imshow(img_ij)
        axs[i, j].axis('off')  # 关闭坐标轴显示

# 调整子图之间的间距
plt.tight_layout()

# 显示图形
plt.show()

# grid_pil.save(str(save_dir / f"grid.jpg"))
# %%保存图片的设置
target_str="""
12
""" # 命名格式: 两个数字代表横纵坐标一张图片
model_name="pokemon_distill"
root_dir = Path("samples/free_sample")
#%%将选中的图片保存到目标文件夹
assert len(prompts)==num_images_per_prompt
prompt = prompts[0] # 确保只使用一个 prompt

sample_save_dir = root_dir / model_name / prompt
sample_save_dir.mkdir(parents=True, exist_ok=True)
start_idx = len(os.listdir(sample_save_dir))+1


def parse_target_str(target_str:str):
    # target = target_str.strip(",.\n").split(",")
    target = target_str.strip(",.\n")
    target = [(int(target[i]), int(target[i+1])) for i in range(0,len(target),2)]
    return target

target=parse_target_str(target_str)
for idx, (i,j) in enumerate(target, start=start_idx):
    img_ij=samples_pil[i*ncol+j]
    img_ij.save(str(sample_save_dir / f"{idx}.png"))
    print(f"image ({i},{j}) save")
    
target_str=""
print(f"{start_idx+len(target)-1} samples in total now")
    

# %%
