import os
import sys
import warnings
import logging
import torch
import onnx
import onnxruntime as ort
import numpy as np


# 现在可以安全导入mmdeploy
from mmdeploy.apis import torch2onnx
from mmdeploy.backend.sdk.export_info import export2SDK

def generate_triton_config(model_name, input_shape, output_shapes, max_batch_size=8, output_dir=".", onnx_filename="dual_output_model.onnx"):
    """生成Triton Inference Server配置文件
    
    Args:
        model_name (str): 模型名称
        input_shape (tuple): 输入形状 (C, H, W)
        output_shapes (dict): 输出形状字典 {"output_name": (C, H, W)}
        max_batch_size (int): 最大批处理大小
        output_dir (str): 输出目录
        onnx_filename (str): ONNX文件名
    """
    config_content = f'''name: "{model_name}"
platform: "onnxruntime_onnx"
max_batch_size: {max_batch_size}

version_policy: {{ all {{ }} }}

input [
  {{
    name: "input"
    data_type: TYPE_FP32
    dims: [ {", ".join(map(str, input_shape))} ]
  }}
]
'''
    
    # 添加输出配置
    config_content += "output [\n"
    for output_name, output_shape in output_shapes.items():
        config_content += f'''  {{
    name: "{output_name}"
    data_type: TYPE_FP32
    dims: [ {", ".join(map(str, output_shape))} ]
  }}
'''
    config_content += "]\n\n"
    
    # 添加实例组配置
    config_content += '''instance_group [
  {
    count: 1
    kind: KIND_GPU
  }
]

optimization {
  cuda {
    graphs: true
  }
}

# TensorRT优化配置（可选）
# optimization {
#   execution_accelerators {
#     gpu_execution_accelerator : [ {
#       name : "tensorrt"
#       parameters { key: "precision_mode" value: "FP16" }
#       parameters { key: "max_workspace_size_bytes" value: "1073741824" }
#     }]
#   }
# }

dynamic_batching {
  preferred_batch_size: [1, 2, 4]
  max_queue_delay_microseconds: 100
}

model_warmup [
  {
    name: "vessel_segmentation_sample"
    batch_size: 1
    inputs {
      key: "input"
      value: {
        data_type: TYPE_FP32
        dims : [{", ".join(map(str, input_shape))}]
        zero_data : true
      }
    }
  }
]

# 模型文件位置
# 请确保以下文件存在于模型目录中：
# - 1/model.onnx (ONNX模型文件)
# - config.pbtxt (本配置文件)
'''
    
    # 保存配置文件
    config_path = os.path.join(output_dir, "config.pbtxt")
    with open(config_path, 'w', encoding='utf-8') as f:
        f.write(config_content)
    
    # 生成模型目录结构说明
    readme_content = f'''# Triton模型部署说明

## 目录结构
请按以下结构组织模型文件：

```
{model_name}/
├── config.pbtxt          # 模型配置文件（已生成）
└── 1/
    └── model.onnx        # ONNX模型文件（请将 {onnx_filename} 重命名并复制到此处）
```

## 部署步骤

1. 创建模型目录：
   ```bash
   mkdir -p /path/to/triton/models/{model_name}/1
   ```

2. 复制文件：
   ```bash
   cp config.pbtxt /path/to/triton/models/{model_name}/
   cp {onnx_filename} /path/to/triton/models/{model_name}/1/model.onnx
   ```

3. 启动Triton服务器：
   ```bash
   docker run --rm -p 8000:8000 -p 8001:8001 -p 8002:8002 \\
     -v /path/to/triton/models:/models \\
     nvcr.io/nvidia/tritonserver:23.10-py3 \\
     tritonserver --model-repository=/models
   ```

## 推理接口

### HTTP API (端口8000)
```python
import requests
import numpy as np

# 准备输入数据
input_data = np.random.randn(1, {", ".join(map(str, input_shape))}).astype(np.float32)

# 发送推理请求
response = requests.post(
    "http://localhost:8000/v2/models/{model_name}/infer",
    json={{
        "inputs": [{{
            "name": "input",
            "shape": [1, {", ".join(map(str, input_shape))}],
            "datatype": "FP32",
            "data": input_data.flatten().tolist()
        }}]
    }}
)

result = response.json()
```

### gRPC API (端口8001)
```python
import tritonclient.grpc as grpcclient
import numpy as np

client = grpcclient.InferenceServerClient(url="localhost:8001")

# 准备输入
input_data = np.random.randn(1, {", ".join(map(str, input_shape))}).astype(np.float32)
inputs = [grpcclient.InferInput("input", input_data.shape, "FP32")]
inputs[0].set_data_from_numpy(input_data)

# 设置输出
outputs = ['''
    
    for output_name in output_shapes.keys():
        readme_content += f'grpcclient.InferRequestedOutput("{output_name}"), '
    
    readme_content += f''']

# 执行推理
result = client.infer("{model_name}", inputs, outputs=outputs)

# 获取结果'''
    
    for output_name, output_shape in output_shapes.items():
        readme_content += f'''
{output_name}_data = result.as_numpy("{output_name}")  # shape: [batch, {", ".join(map(str, output_shape))}]'''
    
    readme_content += '''
```

## 模型输出说明

- **output_lcx**: LCX（左回旋支）分割logits，需要argmax处理获取分割掩码
- **output_lad**: LAD（左前降支）分割logits，需要argmax处理获取分割掩码

### 后处理示例：
```python
import numpy as np

# 获取分割掩码
lcx_mask = np.argmax(output_lcx, axis=1)  # [batch, H, W]
lad_mask = np.argmax(output_lad, axis=1)  # [batch, H, W]

# 获取概率分布
from scipy.special import softmax
lcx_prob = softmax(output_lcx, axis=1)  # [batch, num_classes, H, W]
lad_prob = softmax(output_lad, axis=1)  # [batch, num_classes, H, W]
```
'''
    
    readme_path = os.path.join(output_dir, "triton_deployment_guide.md")
    with open(readme_path, 'w', encoding='utf-8') as f:
        f.write(readme_content)
    
    return config_path, readme_path

def setup_logging():
    """设置日志级别以减少不必要的警告"""
    # 设置mmengine日志级别
    logging.getLogger('mmengine').setLevel(logging.ERROR)
    
    # 过滤特定的警告
    warnings.filterwarnings('ignore', category=UserWarning, module='.*anchor_generator.*')
    warnings.filterwarnings('ignore', category=DeprecationWarning, message='.*anchor_generator.*')
    warnings.filterwarnings('ignore', category=DeprecationWarning, message='.*get_onnx_config.*')
    warnings.filterwarnings('ignore', category=torch.jit.TracerWarning)

def verify_dual_output_model(onnx_path):
    """验证ONNX模型是否为双输出模型"""
    try:
        # 加载ONNX模型
        onnx_model = onnx.load(onnx_path)
        
        # 检查输出节点
        outputs = [output.name for output in onnx_model.graph.output]
        print(f"模型输出节点: {outputs}")
        
        # 验证是否为双输出
        if len(outputs) == 2:
            print("✓ 检测到双输出模型")
            return True
        else:
            print(f"✗ 期望2个输出，实际发现{len(outputs)}个输出")
            return False
            
    except Exception as e:
        print(f"✗ 模型验证失败: {e}")
        return False

def test_onnx_inference(onnx_path, input_shape=(1, 3, 512, 512)):
    """测试ONNX模型推理"""
    try:
        # 创建ONNXRuntime会话
        session = ort.InferenceSession(onnx_path, providers=['CPUExecutionProvider'])
        
        # 获取输入输出信息
        input_name = session.get_inputs()[0].name
        output_names = [output.name for output in session.get_outputs()]
        
        print(f"输入节点: {input_name}")
        print(f"输出节点: {output_names}")
        
        # 创建随机输入数据
        input_data = np.random.randn(*input_shape).astype(np.float32)
        
        # 执行推理
        outputs = session.run(output_names, {input_name: input_data})
        
        print(f"✓ 推理测试成功!")
        for i, (name, output) in enumerate(zip(output_names, outputs)):
            print(f"  输出{i+1} ({name}): shape={output.shape}, dtype={output.dtype}")
            
        return True
        
    except Exception as e:
        print(f"✗ 推理测试失败: {e}")
        return False

def export_onnx_model():
    """导出ONNX模型的主函数"""
    # 配置参数
    img = 'demo.png'
    work_dir = 'work_dirs/20250924_170325/onnx'
    save_file = 'dual_output_model.onnx'  # 更新文件名以反映双输出特性
    deploy_cfg = 'mmdeploy/configs/mmseg/segmentation_onnxruntime_dynamic_dual_output_logits.py'  # 使用新的双输出配置
    model_cfg = 'work_dirs/20250924_170325/vis_data/config.py'
    model_checkpoint = 'work_dirs/20250924_170325/epoch_300.pth'
    device = 'cuda'
    
    # 检查文件是否存在
    if not os.path.exists(img):
        print(f"警告: 测试图片 {img} 不存在")
    if not os.path.exists(model_cfg):
        print(f"错误: 模型配置文件 {model_cfg} 不存在")
        return False
    if not os.path.exists(model_checkpoint):
        print(f"错误: 模型权重文件 {model_checkpoint} 不存在")
        return False
    if not os.path.exists(deploy_cfg):
        print(f"错误: 部署配置文件 {deploy_cfg} 不存在")
        return False
    
    # 创建输出目录
    os.makedirs(work_dir, exist_ok=True)
    
    print("开始导出双输出ONNX模型...")
    print(f"模型配置: {model_cfg}")
    print(f"模型权重: {model_checkpoint}")
    print(f"部署配置: {deploy_cfg}")
    print(f"输出路径: {os.path.join(work_dir, save_file)}")
    
    try:
        # 1. 转换模型到ONNX
        torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg,
                   model_checkpoint, device)
        
        # 检查输出文件
        output_path = os.path.join(work_dir, save_file)
        if os.path.exists(output_path):
            file_size = os.path.getsize(output_path) / (1024 * 1024)  # MB
            print(f"✓ ONNX模型导出成功!")
            print(f"  文件路径: {output_path}")
            print(f"  文件大小: {file_size:.2f} MB")
            
            # 2. 验证双输出模型
            print("\n验证模型结构...")
            if verify_dual_output_model(output_path):
                print("✓ 双输出模型验证通过")
                
                # 3. 测试推理功能
                print("\n测试推理功能...")
                if test_onnx_inference(output_path):
                    print("✓ 推理功能测试通过")
                    
                    # 4. 生成Triton配置文件
                    print("\n生成Triton部署配置...")
                    try:
                        triton_config, readme_path = generate_triton_config(
                            model_name="vessel_segmentation_dual",
                            input_shape=(3, 512, 512),
                            output_shapes={
                                "output_lcx": (2, 512, 512),  # LCX分支，2个类别
                                "output_lad": (2, 512, 512)   # LAD分支，2个类别
                            },
                            max_batch_size=8,
                            output_dir=work_dir,
                            onnx_filename=save_file
                        )
                        print(f"✓ Triton配置文件已生成: {triton_config}")
                        print(f"✓ 部署指南已生成: {readme_path}")
                    except Exception as e:
                        print(f"✗ Triton配置生成失败: {e}")
                        
                else:
                    print("✗ 推理功能测试失败")
                    
            else:
                print("✗ 双输出模型验证失败")
            
            return True
        else:
            print("✗ ONNX模型导出失败 - 输出文件不存在")
            return False
            
    except Exception as e:
        print(f"✗ ONNX模型导出失败: {e}")
        import traceback
        traceback.print_exc()
        return False

if __name__ == "__main__":
    # 设置日志和警告过滤
    setup_logging()
    
    # 导出模型
    success = export_onnx_model()
    
    if success:
        print("\n🎉 导出完成! 双输出模型使用说明:")
        print("=" * 60)
        print("📋 模型信息:")
        print("  • 模型类型: 双分支血管分割 (LCX + LAD)")
        print("  • 输出1: output_lcx（左回旋支）- shape: [batch, 2, 512, 512]")
        print("  • 输出2: output_lad（左前降支）- shape: [batch, 2, 512, 512]")
        print("  • 输出格式: logits（需要argmax处理）")
        print()
        print("🔧 验证ONNX Runtime:")
        print("   python -c \"import onnxruntime; print('ONNX Runtime版本:', onnxruntime.__version__)\"")
        print()
        print("🚀 Triton部署:")
        print("  • 配置文件: work_dirs/20250924_170325/onnx/config.pbtxt")
        print("  • 部署指南: work_dirs/20250924_170325/onnx/triton_deployment_guide.md")
        print("  • 模型名称: vessel_segmentation_dual")
        print()
        print("📊 后处理示例:")
        print("   lcx_mask = np.argmax(output_lcx, axis=1)  # 获取LCX分割掩码")
        print("   lad_mask = np.argmax(output_lad, axis=1)  # 获取LAD分割掩码")
        print("=" * 60)
    else:
        print("\n导出失败，请检查上述错误信息")
