import torch
import torch.nn as nn
from torchsummary import summary
from modules.unet_2d_condition import UNet2DConditionModel
from modules.pose_net import PoseNet
from modules.attention_processor import AttnProcessor,IPAttnProcessor
from diffusers import AutoencoderKL, LMSDiscreteScheduler, PNDMScheduler
from transformers import CLIPTextModel, CLIPTokenizer,CLIPTextConfig,CLIPVisionModelWithProjection,CLIPVisionConfig,CLIPImageProcessor

unet = UNet2DConditionModel.from_config(
    UNet2DConditionModel.load_config("configs/unet")
)
# init adapter modules
attn_procs = {} 
unet_sd = unet.state_dict() 

for name in unet.attn_processors.keys(): # 遍历所有attention操作，包括self at 和 cross at
    #print(name)
    # self at 则 dim = None
    # cross at 则 dim = cross dim
    cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
    
    # 获取每层的通道数
    if name.startswith("mid_block"):
        hidden_size = unet.config.block_out_channels[-1]
    elif name.startswith("up_blocks"):
        block_id = int(name[len("up_blocks.")])
        hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
    elif name.startswith("down_blocks"):
        block_id = int(name[len("down_blocks.")])
        hidden_size = unet.config.block_out_channels[block_id]

    # dim == None，说明是self at，保持不动
    # dim != None，说明是cross at，重新构造at层
    if cross_attention_dim is None:
        attn_procs[name] = AttnProcessor()
    else:
        layer_name = name.split(".processor")[0]
        weights = {
            "to_k_ip.weight": unet_sd[layer_name + ".to_k.weight"],
            "to_v_ip.weight": unet_sd[layer_name + ".to_v.weight"],
        }
        attn_procs[name] = IPAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
        attn_procs[name].load_state_dict(weights)
unet.set_attn_processor(attn_procs)

latents = torch.randn((1, 4, 64, 64))  # Latent size for 512x512 image
latents= latents.to("cuda")
unet.to("cuda")
ans = unet(latents,1,encoder_hidden_states=None,)
print(ans.shape)

