import torch
from torchsummary import summary
from modules.unet_2d_condition import UNet2DConditionModel
from modules.pose_net import PoseNet
from diffusers import AutoencoderKL, LMSDiscreteScheduler
from transformers import CLIPTextModel, CLIPTokenizer,CLIPTextConfig,CLIPVisionModelWithProjection,CLIPVisionConfig,CLIPImageProcessor
check_memory = False

def print_gpu_mem(msg="",is_print=check_memory):
    if is_print:
        allocated = torch.cuda.memory_allocated() / (1024 ** 2)
        reserved = torch.cuda.memory_reserved() / (1024 ** 2)
        print(f"[GPU Memory] {msg}: Allocated={allocated:.2f} MB, Reserved={reserved:.2f} MB")



# =============================
# Step 1: 加载模型（仅结构，不加载真实权重）
# =============================

print("加载UNet...")
unet = UNet2DConditionModel.from_config(
    UNet2DConditionModel.load_config("unet")
)
summary(unet)

print("加载PoseNet")
pose_net = PoseNet()

print("加载VAE...")
vae = AutoencoderKL.from_config(
    AutoencoderKL.load_config("../../sd1.5", subfolder="vae")
)

print("加载CLIP tokenizer 和 text encoder...")
tokenizer = CLIPTokenizer.from_pretrained("../../sd1.5", subfolder="tokenizer")
text_encoder = CLIPTextModel(CLIPTextConfig.from_pretrained("../../sd1.5", subfolder="text_encoder"))

#print("加载image encoder...")
#image_encoder = CLIPVisionModelWithProjection(CLIPVisionConfig.from_pretrained("image_encoder"))

# print("加载image processor...")
# default_clip_size = 224
# clip_image_size = (
#     image_encoder.config.image_size
# )
# feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size)

print("加载scheduler...")
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")

# =============================
# Step 2: 构建输入（模拟）
# =============================
prompt = ["a photograph of an astronaut riding a horse"]
# Tokenize prompt
text_input = tokenizer(
    prompt,
    padding="max_length",
    max_length=tokenizer.model_max_length,
    truncation=True,
    return_tensors="pt"
)
text_input= text_input.to("cuda")
# Encode tokens to text embeddings (bsz, seq_len, dim)
text_encoder.to("cuda")

with torch.no_grad():
    text_embeddings = text_encoder(text_input.input_ids)[0]
    print(text_embeddings.shape)
text_encoder.to("cpu")

#= Image Embeds
# image_encoder.to("cuda")
# image = torch.randn(1, 3, 224, 224).to('cuda')
# print('image_encoder stype',next(image_encoder.parameters()).dtype)
# print('image dtype',image.dtype)
# with torch.cuda.amp.autocast():
#     with torch.no_grad():
#         image_embeds = image_encoder(image).image_embeds       
# uncond_image_embeds = torch.zeros_like(image_embeds)
# print('image_embeds:',image_embeds.shape)
image_embeds = torch.randn(1,1024).to('cuda')

#= Create random latent (bsz, channels, height, width)
latents = torch.randn((1, 4, 64, 64))  # Latent size for 512x512 image
latents= latents.to("cuda")
# Set number of inference steps
scheduler.set_timesteps(20)

# Add noise to latents (for demo purpose only)
latents = latents * scheduler.init_noise_sigma

# =============================
# Step 3: Denoising loop
# =============================
print_gpu_mem("Before Unet in cpu")
unet.to("cuda")
for t in scheduler.timesteps:
    # Expand the latents if we are doing classifier-free guidance
    latent_model_input = scheduler.scale_model_input(latents, t)
    
    # 控制条件
    added_cond_kwargs = {"image_embeds": image_embeds}
    pose_latents = pose_net(torch.randn(1,3,512,512)).to("cuda")
    print(pose_latents.shape)
    # Predict noise residual
    with torch.no_grad():
        noise_pred = unet(latent_model_input,
                           t, 
                           encoder_hidden_states=None,
                           pose_latents = pose_latents,
                           added_cond_kwargs=added_cond_kwargs,
                           down_intrablock_additional_residuals=[i.to("cuda") for i in [torch.randn(1,320,64,64),torch.randn(1,640,32,32),torch.randn(1,1280,16,16),torch.randn(1,1280,8,8)]]).sample
    print_gpu_mem("After Unet in gpu")
    # Compute previous noisy sample x_t -> x_{t-1}
    latents = scheduler.step(noise_pred, t, latents).prev_sample

unet.to("cpu")
torch.cuda.empty_cache()
# =============================
# Step 4: Decode latents to image
# =============================
print("Decoding latents...")
print_gpu_mem("Before VAE decode")
vae.to("cuda")
print_gpu_mem("to gpu VAE decode")
with torch.no_grad():
    image = vae.decode(latents / 0.18215).sample  # Unscale from VAE
print_gpu_mem("After VAE decode")
vae.to("cpu")
print_gpu_mem("to cpu VAE decode")
# Normalize to [0,1]
image = (image / 2 + 0.5).clamp(0, 1)

print("Image shape:", image.shape)  # Should be (1, 3, 512, 512)
