import os,pdb
os.environ['CUDA_VISIBLE_DEVICES']='4'

cur_dir = os.path.dirname( os.path.abspath(__file__) )
par_dir = os.path.dirname(cur_dir)

import sys
# print(sys.path)
sys.path.insert(0,par_dir)

import utils.util_for_huggingface
from utils.MODEL_CKP import FLUX

from diffusers import FluxPipeline
import torch

pipe = FluxPipeline.from_pretrained(FLUX, torch_dtype=torch.bfloat16)
'''
self.register_modules(
    vae=vae,
    text_encoder=text_encoder,
    text_encoder_2=text_encoder_2,
    tokenizer=tokenizer,
    tokenizer_2=tokenizer_2,
    transformer=transformer,
    scheduler=scheduler,
    image_encoder=image_encoder,
    feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
# Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
# by the patch size. So the vae scale factor is multiplied by the patch size to account for this
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
self.tokenizer_max_length = (
    self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
)
self.default_sample_size = 128

'''
# pdb.set_trace()

vae = pipe.vae
text_encoder = pipe.text_encoder
text_encoder_2 = pipe.text_encoder_2
transformer = pipe.transformer

tokenizer = pipe.tokenizer
tokenizer_2 = pipe.tokenizer_2

vae_scale_factor = pipe.vae_scale_factor

from optimum.exporters.onnx import main_export

save_onnx_dir = 'demo_onnx'
os.makedirs( save_onnx_dir , exist_ok=True )

# 统一 opset，方便后续合并
opset = 20
framework = "pt"
device = "cpu"
dtype = torch.bfloat16

def main():
    get_onnx()

def onnx_from_vae():
    """
    导出 VAE Decoder：
    输入 latent (B, 16, H/8, W/8) -> 输出 image (B, 3, H, W)
    """
    vae = pipe.vae
    vae.eval()

    # 构造 dummy latent（按 1024x1024 输入计算：latent=128x128）
    dummy_latent = torch.randn(1, 16, 128, 128, dtype=dtype)

    # 为了让 Optimum 能自动识别任务，我们临时把 forward 重定向到 decoder
    class VAEDecoderWrapper(torch.nn.Module):
        def __init__(self, vae):
            super().__init__()
            self.decoder = vae.decoder

        def forward(self, x):
            return self.decoder(x)

    wrapper = VAEDecoderWrapper(vae)

    # 用 torch.onnx 先导出，再让 Optimum 重新保存成规范目录
    onnx_path = os.path.join(save_onnx_dir, "vae_decoder_raw.onnx")
    torch.onnx.export(
        wrapper,
        (dummy_latent,),
        onnx_path,
        input_names=["latent"],
        output_names=["image"],
        dynamic_axes={"latent": {0: "batch"}, "image": {0: "batch"}},
        opset_version=opset,
        do_constant_folding=True,  # 优化常量折叠
        verbose=False  # 如需调试可设为True
    )
    
    print(f"VAE Decoder已导出至: {onnx_path}")
    return onnx_path


    # 用 Optimum 生成规范的目录结构
    # main_export更适合直接从 Hugging Face 模型库导出模型，而非处理已保存的 ONNX 文件。
    # main_export(
    #     model_name_or_path=tmp_onnx,
    #     output=os.path.join(save_onnx_dir, "vae_decoder"),
    #     task="image-to-image",   # 任意 task，占位
    #     framework=framework,
    #     device=device,
    #     opset=opset,
    # )
    
def onxx_from_txtencoder1():
    main_export()
    
def onnx_from_txtencoder2():
    main_export()
    
def onnx_from_transformers():
    import torch
    import torch.nn as nn

    class ExportableRMSNorm(nn.Module):
        """可导出为ONNX的RMSNorm实现，不依赖aten:rm_norm"""
        def __init__(self, normalized_shape, eps=1e-6, elementwise_affine=True):
            super().__init__()
            self.normalized_shape = normalized_shape
            self.eps = eps
            self.elementwise_affine = elementwise_affine
            
            # 权重参数（与原RMSNorm保持一致）
            if self.elementwise_affine:
                self.weight = nn.Parameter(torch.ones(normalized_shape))
            else:
                self.register_parameter('weight', None)

        def forward(self, x):
            # 计算平方的均值
            x_squared = torch.square(x)
            x_mean = torch.mean(x_squared, dim=-1, keepdim=True)
            
            # 计算RMS并归一化
            x_rm = torch.sqrt(x_mean + self.eps)
            x_normalized = x / x_rm
            
            # 应用缩放权重
            if self.elementwise_affine:
                x_normalized = x_normalized * self.weight
            
            return x_normalized

    def replace_rmsnorm_recursive(module, target_cls=torch.nn.RMSNorm, replacement_cls=ExportableRMSNorm):
        """递归替换模块中所有目标RMSNorm类为替代类"""
        # 遍历当前模块的所有子模块
        for name, child in module.named_children():
            # 递归处理子模块
            replace_rmsnorm_recursive(child, target_cls, replacement_cls)
            
            # 如果当前子模块是目标RMSNorm，则替换
            if isinstance(child, target_cls):
                # 获取原始参数
                original_weight = child.weight if hasattr(child, 'weight') else None
                original_eps = child.eps if hasattr(child, 'eps') else 1e-6
                original_elementwise_affine = child.elementwise_affine if hasattr(child, 'elementwise_affine') else True
                
                
                # 创建替代模块（复制参数）
                replacement = replacement_cls(
                    normalized_shape=child.normalized_shape,
                    eps=child.eps,
                    elementwise_affine=child.elementwise_affine
                )
                if child.elementwise_affine:
                    replacement.weight.data = child.weight.data  # 复制权重参数
                
                # 复制权重参数
                if original_elementwise_affine and original_weight is not None:
                    with torch.no_grad():
                        replacement.weight.data.copy_(original_weight)
            
                
                # 替换当前子模块
                setattr(module, name, replacement)
                
                # pdb.set_trace()
    # 验证替换结果（可选）
    def check_rmsnorm_replaced(module, target_cls=torch.nn.RMSNorm):
        """检查是否还有未替换的RMSNorm"""
        for name, child in module.named_children():
            if isinstance(child, target_cls):
                print(f"未替换的RMSNorm: {name}")
            check_rmsnorm_replaced(child, target_cls)
    """
    """
    transformer = pipe.transformer
    
    print("Origin Model structure:")
    for name, param in transformer.named_parameters():
        print(f"{name}: {param.shape}")
    
    replace_rmsnorm_recursive(transformer)
    check_rmsnorm_replaced(transformer)  # 若输出为空，则替换完成


    transformer.eval()

    # pdb.set_trace()
    # 打印模型结构，查看是否有异常
    print("Model structure:")
    for name, param in transformer.named_parameters():
        print(f"{name}: {param.shape}")

    hidden_states   = torch.randn(1, 64*64, 64, dtype=dtype) # 64*64=4096
    timestep        = torch.tensor([0], dtype=torch.long) #  noise_scheduler_copy.timesteps[indices]
    encoder_hidden_states = torch.randn(1, 512, 4096, dtype=dtype) # t5  + redux 729
    pooled_projections    = torch.randn(1, 768, dtype=dtype) # clip
    txt_ids = torch.randn(512, 3, dtype=dtype) # txt_id.shape == t5.shape(1) * 3
    img_ids = torch.randn(4096, 3, dtype=dtype)
    
    guidance_scale = 15
    guidance_vec = torch.full(
                        (1,),
                        guidance_scale,
                        dtype=dtype,
                    )

    class TransformerWrapper(torch.nn.Module):
        def __init__(self, tr):
            super().__init__()
            self.transformer = tr

        def forward(self, hs, t, enc_hs, pooled, txt, img):
            return self.transformer(
                hidden_states=hs,
                timestep=t / 1000,
                guidance=guidance_vec,
                encoder_hidden_states=enc_hs,
                pooled_projections=pooled,
                txt_ids=txt,
                img_ids=img,
                return_dict=False,
            )[0]

    wrapper = TransformerWrapper(transformer).cpu()

    tmp_onnx = os.path.join(save_onnx_dir, "transformer_raw.onnx")
    torch.onnx.export(
        wrapper,
        (hidden_states, timestep, encoder_hidden_states, pooled_projections, txt_ids, img_ids),
        tmp_onnx,
        input_names=["hidden_states", "timestep",
                     "encoder_hidden_states", "pooled_projections",
                     "txt_ids", "img_ids"],
        output_names=["noise_pred"],
        dynamic_axes={
            "hidden_states": {0: "batch"},
            "encoder_hidden_states": {0: "batch"},
            "pooled_projections": {0: "batch"},
            "txt_ids": {0: "batch"},
            "img_ids": {0: "batch"},
            "noise_pred": {0: "batch"},
        },
        opset_version=opset,
        do_constant_folding=True,  # 优化常量折叠
        verbose=False,  # 如需调试可设为True
        # 关键参数：强制禁用外部数据格式
        external_data=False,  # 核心修复！
        # 额外安全措施：避免大模型问题
        keep_initializers_as_inputs=False,
    )

    # 验证输出（可选）
    print(f"ONNX model saved to {tmp_onnx}")
    print(f"File size: {os.path.getsize(tmp_onnx) / (1024*1024):.2f} MB")

    # main_export(
    #     model_name_or_path=tmp_onnx,
    #     output=os.path.join(save_onnx_dir, "flux_transformer"),
    #     task="feature-extraction",
    #     framework=framework,
    #     device=device,
    #     opset=opset,
    # )
    
def get_onnx():
    # onnx_from_vae()
    # onxx_from_txtencoder1()
    # onnx_from_txtencoder2()
    onnx_from_transformers()
    
    
def preprocess():
    pass
    
def postprocess():
    pass
    
def pipe_for_onnx():
    pass


if __name__=='__main__':
    main()