import os,sys
osa = os.path.abspath
osd = os.path.dirname
cur_dir = osd(osa(__file__))
par_dir = osd(cur_dir)
sys.path.insert(0,par_dir)

os.environ['CUDA_VISIBLE_DEVICES'] = '3'


from diffusers import FluxControlPipeline
import torch
from utils.MODEL_CKP import FLUX_DEPTH

hidden_states = torch.randn(1, 4096, 128, dtype=torch.bfloat16).cuda()
encoder_hidden_states = torch.randn(1, 1241, 4096, dtype=torch.bfloat16).cuda()
pooled_projections = torch.randn(1, 768, dtype=torch.bfloat16).cuda()
timestep = torch.tensor([0.5], dtype=torch.bfloat16).cuda()
img_ids = torch.randint(0, 100, (4096,3)).cuda()
txt_ids = torch.randint(0, 100, (1241,3)).cuda()
guidance = torch.tensor([3.5], dtype=torch.bfloat16).cuda()

# 修改导出前的输入预处理
def get_trace_inputs():
    return (
        torch.randn(1, 4096, 128, dtype=torch.bfloat16).cuda(),      # hidden_states
        torch.randn(1, 1241, 4096, dtype=torch.bfloat16).cuda(),     # encoder_hidden_states
        torch.randn(1, 768, dtype=torch.bfloat16).cuda(),            # pooled_projections
        torch.tensor([0.5], dtype=torch.bfloat16).cuda(),            # timestep
        torch.randint(0, 100, (4096,3)).cuda(),# img_ids
        torch.randint(0, 100, (1241,3)).cuda(),# txt_ids
        torch.tensor([3.5], dtype=torch.bfloat16).cuda(),            # guidance
    )

example_inputs = get_trace_inputs()


def export():
    # 启用内存优化
    torch.backends.cuda.enable_flash_sdp(True)
    torch.backends.cuda.enable_math_sdp(True)
    
    # 使用更小的batch size和序列长度
    # small_input = example_inputs[:, :1024, :]  # 截取前1024个token
    # 加载原始 pipeline（无需GPU）
    pipe = FluxControlPipeline.from_pretrained(FLUX_DEPTH, torch_dtype=torch.bfloat16).to("cuda")

    # 提取 transformer 模型
    transformer = pipe.transformer.eval()

    ## 导出之前 验证 example input  的 可用性
    '''
    Transformer forward pass successful. Output type: <class 'tuple'>
    Output is tuple/list, first element shape: torch.Size([1, 4096, 64])
    '''

    # 确保模型处于eval模式
    transformer.eval()

    # Try example input validation
    # 在导出前禁用梯度检查点
    # transformer.gradient_checkpointing = False

    # with torch.no_grad():
    #     try:
    #         output = transformer(*example_inputs)
    #         print("Transformer forward pass successful. Output type:", type(output))
    #         if isinstance(output, (tuple, list)):
    #             print("Output is tuple/list, first element shape:", output[0].shape)
    #         else:
    #             print("Output shape:", output.shape)
    #     except Exception as e:
    #         print("Transformer forward pass failed:", e)

    # return 

    export_flux_transformer(transformer, output_dir='demo_export/chunks_exported_transformers')

def export_flux_transformer(transformer, output_dir="flux_chunks"):
    import os
    os.makedirs(output_dir, exist_ok=True)
    
    pos_input = torch.concat([
        torch.randint(0, 100, (4096,3)).cuda(),# img_ids
        torch.randint(0, 100, (1241,3)).cuda(),# txt_ids
    ],dim=0)
    traced = torch.jit.trace(transformer.pos_embed, pos_input)
    pos_embed_path = os.path.join(output_dir, "pos_embed.pt")
    torch.jit.save(traced, pos_embed_path)
    # 立即加载并测试
    loaded_pos_embed = torch.jit.load(pos_embed_path)
    try:
        _ = loaded_pos_embed(pos_input)
        print("pos_embed.pt loaded and ran successfully")
    except Exception as e:
        print("pos_embed.pt failed:", e)

    # 对其他组件采用相同方式
    time_text_input = (timestep, guidance, pooled_projections)
    traced = torch.jit.trace(transformer.time_text_embed, time_text_input)
    time_text_embed_path = os.path.join(output_dir, "time_text_embed.pt")
    torch.jit.save(traced, time_text_embed_path)
    loaded_time_text_embed = torch.jit.load(time_text_embed_path)
    try:
        _ = loaded_time_text_embed(*time_text_input)
        print("time_text_embed.pt loaded and ran successfully")
    except Exception as e:
        print("time_text_embed.pt failed:", e)

    context_input = encoder_hidden_states
    traced = torch.jit.trace(transformer.context_embedder, context_input)
    context_embedder_path = os.path.join(output_dir, "context_embedder.pt")
    torch.jit.save(traced, context_embedder_path)
    loaded_context_embedder = torch.jit.load(context_embedder_path)
    try:
        _ = loaded_context_embedder(context_input)
        print("context_embedder.pt loaded and ran successfully")
    except Exception as e:
        print("context_embedder.pt failed:", e)

    x_embedder_input = hidden_states
    traced = torch.jit.trace(transformer.x_embedder, x_embedder_input)
    x_embedder_path = os.path.join(output_dir, "x_embedder.pt")
    torch.jit.save(traced, x_embedder_path)
    loaded_x_embedder = torch.jit.load(x_embedder_path)
    try:
        _ = loaded_x_embedder(x_embedder_input)
        print("x_embedder.pt loaded and ran successfully")
    except Exception as e:
        print("x_embedder.pt failed:", e)

    # 构造 每一个block的输入
    # 2. 导出主Transformer块（分块处理）
    block_input = (
        torch.randn(1, 4096, 3072, dtype=torch.bfloat16).cuda(),      # hidden_states
        torch.randn(1, 1241, 3072, dtype=torch.bfloat16).cuda(),     # encoder_hidden_states
        torch.randn(1, 3072, dtype=torch.bfloat16).cuda(),     # temb
        (   torch.randn(5337, 128, dtype=torch.bfloat16).cuda(), 
            torch.randn(5337, 128, dtype=torch.bfloat16).cuda() ),            # rotate img
    )
    for i, block in enumerate(transformer.transformer_blocks):
        traced_block = torch.jit.trace(block, block_input)
        block_path = os.path.join(output_dir, f"transformer_block_{i}.pt")
        torch.jit.save(traced_block, block_path)
        # 立即加载并测试
        loaded_block = torch.jit.load(block_path)
        try:
            _ = loaded_block(*block_input)
            print(f"transformer_block_{i}.pt loaded and ran successfully")
        except Exception as e:
            print(f"transformer_block_{i}.pt failed:", e)

    # 3. 导出单Transformer块（分块处理）
    for i, block in enumerate(transformer.single_transformer_blocks):
        traced_block = torch.jit.trace(block, block_input)
        block_path = os.path.join(output_dir, f"single_block_{i}.pt")
        torch.jit.save(traced_block, block_path)
        # 立即加载并测试
        loaded_block = torch.jit.load(block_path)
        try:
            _ = loaded_block(*block_input)
            print(f"single_block_{i}.pt loaded and ran successfully")
        except Exception as e:
            print(f"single_block_{i}.pt failed:", e)

    # 4. 导出输出部分
    norm_input= (
        torch.randn(1, 4096, 3072, dtype=torch.bfloat16).cuda(),      # hidden_states
        torch.randn(1, 3072, dtype=torch.bfloat16).cuda(),     # temb
    )
    traced = torch.jit.trace(transformer.norm_out, norm_input)
    norm_path = os.path.join(output_dir, "norm.pt")
    torch.jit.save(traced, norm_path)
    loaded_norm = torch.jit.load(norm_path)
    try:
        _ = loaded_norm(*norm_input)
        print("norm.pt loaded and ran successfully")
    except Exception as e:
        print("norm.pt failed:", e)

    proj_input= torch.randn(1, 4096, 3072, dtype=torch.bfloat16).cuda()
    traced = torch.jit.trace(transformer.proj_out, proj_input)
    proj_path = os.path.join(output_dir, "proj.pt")
    torch.jit.save(traced, proj_path)
    loaded_proj = torch.jit.load(proj_path)
    try:
        _ = loaded_proj(proj_input)
        print("proj.pt loaded and ran successfully")
    except Exception as e:
        print("proj.pt failed:", e)


def test_exported():
    # 在python代码中先加载 这个traced 试试
    model = torch.jit.load("demo_export/traced.pt")
    model = model.to('cuda')
    output = model(*example_inputs)  # inputs也应在GPU上
    print(output.shape)

def test_exported_chunks():
    # 读取 demo_export/chunks_exported_transformers 下面的内容
    # 搭建 transformer 并测试 输入--输出
    ''
import torch.nn as nn
class ReconstructedFluxTransformer():
    def __init__(self, parts_dir="demo_export/chunks_exported_transformers"):

        self.parts_dir = parts_dir
        self.load_components()
    
    def load_components(self):
        # 加载所有组件
        self.pos_embed = torch.jit.load(os.path.join(self.parts_dir, "pos_embed.pt"))
        self.time_text_embed = torch.jit.load(os.path.join(self.parts_dir, "time_text_embed.pt"))
        self.context_embedder = torch.jit.load(os.path.join(self.parts_dir, "context_embedder.pt"))
        self.x_embedder = torch.jit.load(os.path.join(self.parts_dir, "x_embedder.pt"))
        
        # 加载transformer blocks
        self.transformer_blocks = []
        i = 0
        while os.path.exists(os.path.join(self.parts_dir, f"transformer_block_{i}.pt")):
            block = torch.jit.load(os.path.join(self.parts_dir, f"transformer_block_{i}.pt"))
            self.transformer_blocks.append(block)
            i += 1
            
        # 加载single transformer blocks
        self.single_transformer_blocks = []
        i = 0
        while os.path.exists(os.path.join(self.parts_dir, f"single_block_{i}.pt")):
            block = torch.jit.load(os.path.join(self.parts_dir, f"single_block_{i}.pt"))
            self.single_transformer_blocks.append(block)
            i += 1
            
        # 加载输出部分
        self.norm_out = torch.jit.load(os.path.join(self.parts_dir, "norm.pt"))
        self.proj_out = torch.jit.load(os.path.join(self.parts_dir, "proj.pt"))
    
    def forward(
        self,
        hidden_states: torch.Tensor,
        encoder_hidden_states: torch.Tensor = None,
        pooled_projections: torch.Tensor = None,
        timestep: torch.LongTensor = None,
        img_ids: torch.Tensor = None,
        txt_ids: torch.Tensor = None,
        guidance: torch.Tensor = None,
    ) -> torch.Tensor:
        
        # 前向传播各组件
        hidden_states = self.x_embedder(hidden_states)
        
        timestep = timestep.to(hidden_states.dtype) * 1000
        if guidance is not None:
            guidance = guidance.to(hidden_states.dtype) * 1000
        
        temb = (
            self.time_text_embed(timestep, guidance, pooled_projections)
        )
        
        encoder_hidden_states = self.context_embedder(encoder_hidden_states)
        
        ids = torch.cat((txt_ids, img_ids), dim=0)
        image_rotary_emb = self.pos_embed(ids)
        
        
        # transformer blocks
        for index_block, block in enumerate(self.transformer_blocks):
            encoder_hidden_states, hidden_states = block(
                hidden_states=hidden_states,
                encoder_hidden_states=encoder_hidden_states,
                temb=temb,
                image_rotary_emb=image_rotary_emb,
            )
            
        
        # single transformer blocks
        for index_block, block in enumerate(self.single_transformer_blocks):
            encoder_hidden_states, hidden_states = block(
                hidden_states=hidden_states,
                encoder_hidden_states=encoder_hidden_states,
                temb=temb,
                image_rotary_emb=image_rotary_emb,
            )
            
        # 输出部分
        hidden_states = self.norm_out(hidden_states, temb)
        output = self.proj_out(hidden_states)
        
        return output[0] if isinstance(output, tuple) else output

# 测试代码
def test_reconstructed_transformer():
    # 创建测试输入 (需要根据你的实际输入形状调整)
    '''
    torch.randn(1, 4096, 128, dtype=torch.bfloat16).cuda(),      # hidden_states
    torch.randn(1, 1241, 4096, dtype=torch.bfloat16).cuda(),     # encoder_hidden_states
    torch.randn(1, 768, dtype=torch.bfloat16).cuda(),            # pooled_projections
    torch.tensor([0.5], dtype=torch.bfloat16).cuda(),            # timestep
    torch.randint(0, 100, (4096,3)).cuda(),# img_ids
    torch.randint(0, 100, (1241,3)).cuda(),# txt_ids
    torch.tensor([3.5], dtype=torch.bfloat16).cuda(),            # guidance
    '''
    hidden_states = torch.randn(1, 4096, 128, dtype=torch.bfloat16).cuda()
    encoder_hidden_states = torch.randn(1, 1241, 4096, dtype=torch.bfloat16).cuda()
    pooled_projections = torch.randn(1, 768, dtype=torch.bfloat16).cuda()
    timestep = torch.tensor([0.5], dtype=torch.long).cuda()
    img_ids = torch.randint(0, 100, (4096, 3), dtype=torch.long).cuda()
    txt_ids = torch.randint(0, 100, (1241, 3), dtype=torch.long).cuda()
    guidance = torch.tensor([3.5], dtype=torch.bfloat16).cuda()

    # 重建transformer
    transformer = ReconstructedFluxTransformer("demo_export/chunks_exported_transformers")
    # transformer = transformer.eval()
    
    # 运行前向传播
    import time
    times = []
    for i in range(10):
        torch.cuda.synchronize()
        start = time.time()
        with torch.no_grad():
            output = transformer.forward(
                hidden_states=hidden_states,
                encoder_hidden_states=encoder_hidden_states,
                pooled_projections=pooled_projections,
                timestep=timestep,
                img_ids=img_ids,
                txt_ids=txt_ids,
                guidance=guidance,
            )
        torch.cuda.synchronize()
        end = time.time()
        times.append(end - start)
        print(f"Run {i+1}: {end - start:.4f} seconds")
    print("Average time:", sum(times)/len(times), "seconds")
    # with torch.no_grad():
    #     output = transformer.forward(
    #         hidden_states=hidden_states,
    #         encoder_hidden_states=encoder_hidden_states,
    #         pooled_projections=pooled_projections,
    #         timestep=timestep,
    #         img_ids=img_ids,
    #         txt_ids=txt_ids,
    #         guidance=guidance,
    #     )
    
    print("Output shape:", output.shape) # Output shape: torch.Size([1, 4096, 64])
    return output


if __name__=='__main__':
    # export()
    # test_exported_chunks()
    test_reconstructed_transformer()
