#!/usr/bin/env python3
"""
构建Co-DETR混合精度TensorRT引擎
精确控制敏感层保持FP32精度
"""

import os
import sys
import json
import tensorrt as trt
import numpy as np
import argparse

class SensitiveLayerBuilder:
    """敏感层构建器"""
    
    def __init__(self, config_file):
        """初始化构建器"""
        self.config = self.load_config(config_file)
        self.sensitive_layers = {layer['name']: layer for layer in self.config['sensitive_layers']}
        self.logger = trt.Logger(trt.Logger.WARNING)
        
    def load_config(self, config_file):
        """加载配置文件"""
        with open(config_file, 'r', encoding='utf-8') as f:
            return json.load(f)
    
    def build_mixed_precision_engine(self, onnx_path, output_path):
        """构建混合精度引擎"""
        print(f"=== 构建混合精度TensorRT引擎 ===")
        print(f"ONNX模型: {onnx_path}")
        print(f"输出引擎: {output_path}")
        print(f"敏感层数量: {len(self.sensitive_layers)}")
        
        # 创建构建器
        builder = trt.Builder(self.logger)
        config = builder.create_builder_config()
        
        # 设置最大工作空间
        config.max_workspace_size = 16 * 1024 * 1024 * 1024  # 16GB
        
        # 启用FP16
        if builder.platform_has_fast_fp16:
            config.set_flag(trt.BuilderFlag.FP16)
            print("✅ FP16已启用")
        else:
            print("⚠️ 平台不支持FP16")
        
        # 设置优化级别
        config.set_flag(trt.BuilderFlag.DIRECT_IO)
        
        # 解析ONNX模型
        network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
        parser = trt.OnnxParser(network, self.logger)
        
        with open(onnx_path, 'rb') as f:
            if not parser.parse(f.read()):
                print("❌ ONNX解析失败")
                for error in range(parser.num_errors):
                    print(parser.get_error(error))
                return False
        
        print(f"✅ ONNX模型解析成功")
        print(f"网络层数: {network.num_layers}")
        
        # 设置敏感层为FP32
        self.set_sensitive_layers_to_fp32(network)
        
        # 构建引擎
        print("开始构建引擎...")
        engine = builder.build_engine(network, config)
        
        if engine is None:
            print("❌ 引擎构建失败")
            return False
        
        # 保存引擎
        with open(output_path, 'wb') as f:
            f.write(engine.serialize())
        
        print(f"✅ 混合精度引擎已保存: {output_path}")
        return True
    
    def set_sensitive_layers_to_fp32(self, network):
        """将敏感层设置为FP32精度"""
        print("设置敏感层为FP32精度...")
        
        fp32_layer_count = 0
        for i in range(network.num_layers):
            layer = network.get_layer(i)
            
            # 检查是否是敏感层
            if layer.name in self.sensitive_layers:
                # 设置层为FP32
                layer.precision = trt.DataType.FLOAT
                layer.set_output_type(0, trt.DataType.FLOAT)
                
                sensitive_info = self.sensitive_layers[layer.name]
                print(f"  FP32层: {layer.name} ({sensitive_info['type']})")
                print(f"    原因: {sensitive_info['reason']}")
                
                fp32_layer_count += 1
        
        print(f"✅ 设置了 {fp32_layer_count} 个敏感层为FP32精度")
    
    def build_fp32_engine(self, onnx_path, output_path):
        """构建纯FP32引擎作为对比"""
        print(f"\n=== 构建FP32 TensorRT引擎 ===")
        print(f"ONNX模型: {onnx_path}")
        print(f"输出引擎: {output_path}")
        
        # 创建构建器
        builder = trt.Builder(self.logger)
        config = builder.create_builder_config()
        
        # 设置最大工作空间
        config.max_workspace_size = 16 * 1024 * 1024 * 1024  # 16GB
        
        # 不启用FP16，保持FP32
        print("使用FP32精度")
        
        # 设置优化级别
        config.set_flag(trt.BuilderFlag.DIRECT_IO)
        
        # 解析ONNX模型
        network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
        parser = trt.OnnxParser(network, self.logger)
        
        with open(onnx_path, 'rb') as f:
            if not parser.parse(f.read()):
                print("❌ ONNX解析失败")
                for error in range(parser.num_errors):
                    print(parser.get_error(error))
                return False
        
        print(f"✅ ONNX模型解析成功")
        print(f"网络层数: {network.num_layers}")
        
        # 构建引擎
        print("开始构建FP32引擎...")
        engine = builder.build_engine(network, config)
        
        if engine is None:
            print("❌ FP32引擎构建失败")
            return False
        
        # 保存引擎
        with open(output_path, 'wb') as f:
            f.write(engine.serialize())
        
        print(f"✅ FP32引擎已保存: {output_path}")
        return True
    
    def analyze_engine_info(self, engine_path):
        """分析引擎信息"""
        print(f"\n=== 分析引擎: {engine_path} ===")
        
        with open(engine_path, 'rb') as f:
            engine_data = f.read()
        
        runtime = trt.Runtime(self.logger)
        engine = runtime.deserialize_cuda_engine(engine_data)
        
        print(f"引擎层数: {engine.num_layers}")
        print(f"最大批量大小: {engine.max_batch_size}")
        # 移除不存在的属性访问
        # print(f"工作空间大小: {engine.max_workspace_size}")
        
        # 分析层类型和精度
        layer_types = {}
        fp32_layers = []
        fp16_layers = []
        
        for i in range(engine.num_layers):
            layer = engine.get_layer(i)
            layer_type = str(layer.type)
            layer_types[layer_type] = layer_types.get(layer_type, 0) + 1
            
            # 检查层精度
            if hasattr(layer, 'precision'):
                if layer.precision == trt.DataType.FLOAT:
                    fp32_layers.append(layer.name)
                elif layer.precision == trt.DataType.HALF:
                    fp16_layers.append(layer.name)
        
        print(f"层类型分布:")
        for layer_type, count in layer_types.items():
            print(f"  {layer_type}: {count}")
        
        print(f"FP32层数量: {len(fp32_layers)}")
        print(f"FP16层数量: {len(fp16_layers)}")
        
        if fp32_layers:
            print(f"FP32层示例:")
            for layer_name in fp32_layers[:5]:
                print(f"  {layer_name}")
        
        return {
            'total_layers': engine.num_layers,
            'fp32_layers': fp32_layers,
            'fp16_layers': fp16_layers,
            'layer_types': layer_types
        }

def main():
    parser = argparse.ArgumentParser(description='构建Co-DETR混合精度TensorRT引擎')
    parser.add_argument('--onnx', '-i', default='co_detr_sim.onnx',
                       help='输入ONNX模型文件路径')
    parser.add_argument('--config', '-c', default='co_detr_layernorm_split_fixed_tensorrt_config.json',
                       help='TensorRT配置文件路径')
    parser.add_argument('--output-dir', '-o', default='./',
                       help='输出目录')
    parser.add_argument('--analyze-only', action='store_true',
                       help='仅分析现有引擎，不构建新引擎')
    
    args = parser.parse_args()
    
    # 检查文件是否存在
    if not os.path.exists(args.onnx):
        print(f"❌ ONNX文件不存在: {args.onnx}")
        return
    
    if not os.path.exists(args.config):
        print(f"❌ 配置文件不存在: {args.config}")
        return
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    if args.analyze_only:
        # 仅分析现有引擎
        mixed_engine_path = os.path.join(args.output_dir, "co_detr_mixed_precision.plan")
        fp32_engine_path = os.path.join(args.output_dir, "co_detr_fp32.plan")
        
        if os.path.exists(mixed_engine_path):
            builder = SensitiveLayerBuilder(args.config)
            builder.analyze_engine_info(mixed_engine_path)
        
        if os.path.exists(fp32_engine_path):
            builder = SensitiveLayerBuilder(args.config)
            builder.analyze_engine_info(fp32_engine_path)
    else:
        # 构建引擎
        builder = SensitiveLayerBuilder(args.config)
        
        # 构建混合精度引擎
        mixed_engine_path = os.path.join(args.output_dir, "co_detr_mixed_precision.plan")
        success = builder.build_mixed_precision_engine(args.onnx, mixed_engine_path)
        
        if success:
            # 分析混合精度引擎
            builder.analyze_engine_info(mixed_engine_path)
        
        # 构建FP32引擎作为对比
        fp32_engine_path = os.path.join(args.output_dir, "co_detr_fp32.plan")
        success = builder.build_fp32_engine(args.onnx, fp32_engine_path)
        
        if success:
            # 分析FP32引擎
            builder.analyze_engine_info(fp32_engine_path)
    
    print("\n🎉 引擎构建和分析完成!")

if __name__ == "__main__":
    main() 