#!/usr/bin/env python3
import torch
import torch.onnx
import onnx
import onnxruntime
import numpy as np
from mmdet.apis import init_detector, inference_detector
from mmengine.config import Config
from mmengine.registry import MODELS
from mmengine.structures import InstanceData
import os
import subprocess
import sys
import netron
from onnx.shape_inference import infer_shapes

def create_custom_forward(model):
    """创建自定义的forward方法来避免DetDataSample问题"""
    class CustomModel(torch.nn.Module):
        def __init__(self, original_model):
            super().__init__()
            self.model = original_model
            
        def forward(self, x):
            # 直接返回模型的原始输出，避免DetDataSample
            batch_inputs = {'inputs': x}
            data_samples = self.model.data_preprocessor(batch_inputs, training=False)
            
            # 获取模型的主要组件
            backbone = self.model.backbone
            neck = self.model.neck
            bbox_head = self.model.bbox_head
            
            # 前向传播
            feat = backbone(data_samples['inputs'])
            if hasattr(self.model, 'neck') and neck is not None:
                feat = neck(feat)
            
            # 获取检测结果 - 使用更确定性的方法
            # results = bbox_head.predict(feat, data_samples, rescale=True)
            
            batch_img_metas=[{
                'batch_input_shape': (750, 1333),
                'img_shape':(750, 1333),
                'pad_shape':(750, 1333),
                'img_path':'/disk2/xd/project/mmdetection/demo/demo.jpg',
                'ori_shape':(1080, 1920),
                'scale_factor':(0.6942708333, 0.6944444),
                'img_id':0,
            }]
            
            # 使用predict_query_head避免条件分支
            results_list = self.model.predict_query_head(
                feat, batch_img_metas, rescale=True)
            
            # 确保输出是确定性的张量
            bboxes = results_list[0].bboxes.reshape([1,300,4])
            labels = results_list[0].labels.reshape([1,300])
            scores = results_list[0].scores.reshape([1,300,1])
            
            # 合并bboxes和scores
            bboxes_with_scores = torch.cat((bboxes, scores), dim=2)
            
            return bboxes_with_scores, labels
    
    return CustomModel(model)




def main():
    # 设置路径
    config_file = '/disk2/xd/project/mmdetection/projects/CO-DETR/configs/codino/co_dino_5scale_r50_8xb2_1x_coco.py'
    checkpoint_file = '/disk2/xd/project/mmdetection/work_dirs/co_dino_5scale_r50_8xb2_1x_coco/epoch_12.pth'
    output_file = '/disk2/xd/project/mmdetection/checkpoints/co_detr_custom.onnx'
    
    # 加载配置
    cfg = Config.fromfile(config_file)
    
    # 初始化模型
    model = init_detector(cfg, checkpoint_file, device='cpu')
    
    # 创建自定义模型
    custom_model = create_custom_forward(model)
    custom_model.eval()
    
    # 设置确定性模式
    torch.manual_seed(42)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    # 创建示例输入
    dummy_input = torch.randn(1, 3, 750, 1333)
    
    # 导出ONNX
    print("开始导出ONNX...")
    torch.onnx.export(
        custom_model,
        dummy_input,
        output_file,
        export_params=True,
        opset_version=17,  # 改为16以支持grid_sampler
        do_constant_folding=True,
        input_names=['input'],
        output_names=['bboxes', 'labels'],
        # 添加这些参数来减少if节点
        keep_initializers_as_inputs=False,
        verbose=True
    )
    
    print(f"ONNX模型已导出到: {output_file}")
    
    # 验证ONNX模型
    ort_session = onnxruntime.InferenceSession(output_file)
    print("ONNX模型验证成功!")
    
    # 检查ONNX模型中的节点
    import onnx
    onnx_model = onnx.load(output_file)
    from onnx.shape_inference import infer_shapes
    inferred_model = infer_shapes(onnx_model)
    onnx.save(inferred_model, output_file)
    netron.start(output_file)

        
        

if __name__ == "__main__":
    main() 