import tensorrt as trt
import numpy as np
import onnx
import os
from pathlib import Path
import cv2
from tqdm import tqdm
import json
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import tempfile
import glob

class TensorRTEngineBuilder:
    """构建包含NMS的TensorRT引擎"""
    
    def __init__(self, logger_level=trt.Logger.WARNING):
        self.logger = trt.Logger(logger_level)
    
    def build_engine_with_nms(self, onnx_path, engine_path, num_classes=80, 
                             conf_threshold=0.5, nms_threshold=0.4, max_output_boxes=100):
        """构建包含NMS的完整TensorRT引擎"""
        
        builder = trt.Builder(self.logger)
        network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
        parser = trt.OnnxParser(network, self.logger)
        
        # 解析ONNX模型
        print(f"正在解析ONNX文件: {onnx_path}")
        with open(onnx_path, 'rb') as model:
            if not parser.parse(model.read()):
                print("解析ONNX失败:")
                for error in range(parser.num_errors):
                    print(parser.get_error(error))
                return None
        
        print("ONNX模型解析成功")
        
        # 获取原始输出
        yolo_output = network.get_output(0)
        print(f"原始YOLO输出形状: {yolo_output.shape}")
        
        # 移除原始输出
        network.unmark_output(yolo_output)
        
        # 添加NMS处理
        nms_outputs = self.add_yolo_nms(network, yolo_output, num_classes, 
                                       conf_threshold, nms_threshold, max_output_boxes)
        
        if nms_outputs is None:
            print("添加NMS失败")
            return None
        
        # 标记新的输出
        output_names = ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']
        for i, output in enumerate(nms_outputs):
            network.mark_output(output)
            output.name = output_names[i]
        
        # 配置构建选项
        config = builder.create_builder_config()
        config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30)  # 1GB
        
        # 启用FP16优化
        if builder.platform_has_fast_fp16:
            print("启用FP16优化")
            config.set_flag(trt.BuilderFlag.FP16)
        
        # 构建引擎
        print("正在构建引擎...")
        serialized_engine = builder.build_serialized_network(network, config)
        
        if serialized_engine is None:
            print("构建引擎失败")
            return None
        
        # 保存引擎
        print(f"保存引擎到: {engine_path}")
        os.makedirs(os.path.dirname(engine_path), exist_ok=True)
        with open(engine_path, 'wb') as f:
            f.write(serialized_engine)
        
        # 返回引擎对象
        runtime = trt.Runtime(self.logger)
        engine = runtime.deserialize_cuda_engine(serialized_engine)
        return engine
    
    def add_yolo_nms(self, network, yolo_output, num_classes=80, conf_threshold=0.5, 
                     nms_threshold=0.4, max_output_boxes=100):
        """
        为YOLO输出[1,84,8400]添加NMS处理
        """
        print("开始添加NMS层...")
        
        # Step 1: 转置输出 [1,84,8400] -> [1,8400,84]
        transpose_layer = network.add_shuffle(yolo_output)
        transpose_layer.first_transpose = (0, 2, 1)
        transposed_output = transpose_layer.get_output(0)
        print(f"转置后形状: {transposed_output.shape}")
        
        # Step 2: 分离坐标和类别概率
        # 坐标 [1,8400,4]
        slice_boxes = network.add_slice(transposed_output, (0, 0, 0), (1, 8400, 4), (1, 1, 1))
        boxes = slice_boxes.get_output(0)
        
        # 类别概率 [1,8400,80]
        slice_classes = network.add_slice(transposed_output, (0, 0, 4), (1, 8400, num_classes), (1, 1, 1))
        class_probs = slice_classes.get_output(0)
        
        # Step 3: 转换坐标格式 center_x,center_y,w,h -> x1,y1,x2,y2
        boxes_converted = self.convert_yolo_boxes(network, boxes)
        
        # Step 4: 计算最终分数和类别
        final_scores, class_ids = self.compute_scores_and_classes(network, class_probs)
        
        # Step 5: 应用置信度过滤
        filtered_boxes, filtered_scores, filtered_classes = self.apply_confidence_filter(
            network, boxes_converted, final_scores, class_ids, conf_threshold)
        
        # Step 6: 应用NMS（使用原生算子）
        nms_outputs = self.apply_native_nms(network, filtered_boxes, filtered_scores, filtered_classes,
                                          nms_threshold, max_output_boxes)
        
        return nms_outputs
    
    def convert_yolo_boxes(self, network, boxes):
        """转换YOLO边界框格式 (center_x, center_y, w, h) -> (x1, y1, x2, y2)"""
        
        # 分离坐标分量
        center_x = network.add_slice(boxes, (0, 0, 0), (1, 8400, 1), (1, 1, 1)).get_output(0)
        center_y = network.add_slice(boxes, (0, 0, 1), (1, 8400, 1), (1, 1, 1)).get_output(0) 
        width = network.add_slice(boxes, (0, 0, 2), (1, 8400, 1), (1, 1, 1)).get_output(0)
        height = network.add_slice(boxes, (0, 0, 3), (1, 8400, 1), (1, 1, 1)).get_output(0)
        
        # 计算半宽和半高
        half_w = network.add_elementwise(width, 
                                        network.add_constant((1, 1, 1), np.array([2.0], dtype=np.float32)).get_output(0),
                                        trt.ElementWiseOperation.DIV).get_output(0)
        half_h = network.add_elementwise(height,
                                        network.add_constant((1, 1, 1), np.array([2.0], dtype=np.float32)).get_output(0),
                                        trt.ElementWiseOperation.DIV).get_output(0)
        
        # 计算角点坐标
        x1 = network.add_elementwise(center_x, half_w, trt.ElementWiseOperation.SUB).get_output(0)
        y1 = network.add_elementwise(center_y, half_h, trt.ElementWiseOperation.SUB).get_output(0)
        x2 = network.add_elementwise(center_x, half_w, trt.ElementWiseOperation.SUM).get_output(0)
        y2 = network.add_elementwise(center_y, half_h, trt.ElementWiseOperation.SUM).get_output(0)
        
        # 拼接坐标
        concat_layer = network.add_concatenation([x1, y1, x2, y2])
        concat_layer.axis = 2
        
        return concat_layer.get_output(0)
    
    def compute_scores_and_classes(self, network, class_probs):
        """计算最终分数和类别ID"""
        # 获取最大类别概率和对应索引
        topk_layer = network.add_topk(class_probs, trt.TopKOperation.MAX, 1, 1 << 2)
        max_class_probs = topk_layer.get_output(0)  # [1,8400,1]
        class_ids = topk_layer.get_output(1)        # [1,8400,1]
        
        # Reshape为 [1,8400]
        scores_reshape = network.add_shuffle(max_class_probs)
        scores_reshape.reshape_dims = (1, 8400)
        final_scores = scores_reshape.get_output(0)
        
        classes_reshape = network.add_shuffle(class_ids)
        classes_reshape.reshape_dims = (1, 8400)
        final_classes = classes_reshape.get_output(0)
        
        return final_scores, final_classes
    
    def apply_confidence_filter(self, network, boxes, scores, classes, conf_threshold):
        """应用置信度过滤"""
        # 创建置信度阈值常量
        threshold_const = network.add_constant((1, 8400), np.full((1, 8400), conf_threshold, dtype=np.float32))
        threshold_tensor = threshold_const.get_output(0)
        
        # 比较操作: scores >= threshold
        compare_layer = network.add_elementwise(scores, threshold_tensor, trt.ElementWiseOperation.GREATER)
        mask = compare_layer.get_output(0)
        
        # 由于TensorRT的限制，这里我们返回原始数据
        # 实际的过滤会在后续的TopK操作中通过分数排序来实现
        return boxes, scores, classes
    
    def apply_native_nms(self, network, boxes, scores, classes, nms_threshold, max_output_boxes):
        """使用原生算子实现简化的NMS"""
        
        # Step 1: 使用TopK选取最高分数的检测框
        topk_layer = network.add_topk(scores, trt.TopKOperation.MAX, max_output_boxes, 1 << 1)  # 按第1维排序
        top_scores = topk_layer.get_output(0)  # [1, max_output_boxes]
        top_indices = topk_layer.get_output(1)  # [1, max_output_boxes]
        
        # Step 2: 使用Gather操作获取对应的boxes和classes
        # 需要先将indices reshape为正确的形状
        indices_reshape = network.add_shuffle(top_indices)
        indices_reshape.reshape_dims = (1, max_output_boxes, 1)
        reshaped_indices = indices_reshape.get_output(0)
        
        # Gather boxes
        gather_boxes = network.add_gather(boxes, reshaped_indices, 1)
        top_boxes = gather_boxes.get_output(0)  # [1, max_output_boxes, 4]
        
        # Gather classes - 需要先扩展classes的维度
        classes_expanded = network.add_shuffle(classes)
        classes_expanded.reshape_dims = (1, 8400, 1)
        classes_3d = classes_expanded.get_output(0)
        
        gather_classes = network.add_gather(classes_3d, reshaped_indices, 1)
        top_classes_3d = gather_classes.get_output(0)  # [1, max_output_boxes, 1]
        
        # Reshape classes回到2D
        classes_final_reshape = network.add_shuffle(top_classes_3d)
        classes_final_reshape.reshape_dims = (1, max_output_boxes)
        top_classes = classes_final_reshape.get_output(0)
        
        # Step 3: 创建检测数量（简化版本，直接返回max_output_boxes）
        num_detections_const = network.add_constant((1,), np.array([max_output_boxes], dtype=np.int32))
        num_detections = num_detections_const.get_output(0)
        
        return [num_detections, top_boxes, top_scores, top_classes]

class TensorRTInference:
    """TensorRT推理类"""
    
    def __init__(self, engine_path):
        self.engine_path = engine_path
        self.runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))
        self.engine = self.load_engine()
        self.context = self.engine.create_execution_context()
        
        # 获取输入输出信息
        self.input_names = []
        self.output_names = []
        self.input_shapes = {}
        self.output_shapes = {}
        
        for i in range(self.engine.num_io_tensors):
            name = self.engine.get_tensor_name(i)
            shape = self.engine.get_tensor_shape(name)
            
            if self.engine.get_tensor_mode(name) == trt.TensorIOMode.INPUT:
                self.input_names.append(name)
                self.input_shapes[name] = shape
            else:
                self.output_names.append(name)
                self.output_shapes[name] = shape
        
        print(f"引擎输入: {self.input_names}")
        print(f"引擎输出: {self.output_names}")
    
    def load_engine(self):
        """加载TensorRT引擎"""
        with open(self.engine_path, 'rb') as f:
            engine_data = f.read()
        return self.runtime.deserialize_cuda_engine(engine_data)
    
    def preprocess_image(self, image_path, input_size=(640, 640)):
        """图像预处理"""
        image = cv2.imread(image_path)
        if image is None:
            return None, None, None
        
        original_shape = image.shape[:2]  # (H, W)
        
        # 调整大小并保持宽高比
        image_resized = cv2.resize(image, input_size)
        
        # 转换为RGB并归一化
        image_rgb = cv2.cvtColor(image_resized, cv2.COLOR_BGR2RGB)
        image_normalized = image_rgb.astype(np.float32) / 255.0
        
        # 转换为CHW格式并添加batch维度
        image_tensor = np.transpose(image_normalized, (2, 0, 1))
        image_batch = np.expand_dims(image_tensor, axis=0)
        
        return image_batch, original_shape, input_size
    
    def inference(self, image_batch):
        """执行推理"""
        import pycuda.driver as cuda
        import pycuda.autoinit
        
        # 分配GPU内存
        inputs = {}
        outputs = {}
        bindings = []
        stream = cuda.Stream()
        
        # 输入
        for name in self.input_names:
            size = trt.volume(self.input_shapes[name])
            dtype = trt.nptype(self.engine.get_tensor_dtype(name))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            inputs[name] = {'host': host_mem, 'device': device_mem}
        
        # 输出
        for name in self.output_names:
            size = trt.volume(self.output_shapes[name])
            dtype = trt.nptype(self.engine.get_tensor_dtype(name))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            bindings.append(int(device_mem))
            outputs[name] = {'host': host_mem, 'device': device_mem}
        
        # 设置输入数据
        input_name = self.input_names[0]
        np.copyto(inputs[input_name]['host'], image_batch.ravel())
        
        # 传输到GPU
        for name in self.input_names:
            cuda.memcpy_htod_async(inputs[name]['device'], inputs[name]['host'], stream)
        
        # 执行推理
        for i, name in enumerate(self.input_names + self.output_names):
            self.context.set_tensor_address(name, bindings[i])
        
        self.context.execute_async_v3(stream_handle=stream.handle)
        
        # 传输结果回CPU
        results = {}
        for name in self.output_names:
            cuda.memcpy_dtoh_async(outputs[name]['host'], outputs[name]['device'], stream)
        
        stream.synchronize()
        
        # 整理输出
        for name in self.output_names:
            results[name] = outputs[name]['host'].reshape(self.output_shapes[name])
        
        return results

class ModelValidator:
    """模型验证类"""
    
    def __init__(self, inference_engine, dataset_path, annotations_path):
        self.inference_engine = inference_engine
        self.dataset_path = dataset_path
        self.annotations_path = annotations_path
        
        # # 加载COCO标注
        # if annotations_path and os.path.exists(annotations_path):
        #     self.coco = COCO(annotations_path)
        #     self.use_coco = True
        # else:
        #     self.coco = None
        #     self.use_coco = False
        #     print("未找到COCO标注文件，只计算基础指标")
    
    def validate(self, conf_threshold=0.1, max_images=None):
        """验证模型性能"""
        
        # # 获取图像列表
        # image_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}
        # image_paths = []
        # for ext in image_extensions:
        #     image_paths.extend(list(self.dataset_path.glob(f'*{ext}')))
        #     image_paths.extend(list(self.dataset_path.glob(f'*{ext.upper()}')))
        
        # if max_images:
        #     image_paths = image_paths[:max_images]
            
        image_paths = sorted(glob.glob(os.path.join(self.dataset_path, '*.jpg')))
        
        print(f"找到 {len(image_paths)} 张图像")
        
        all_predictions = []
        tp, fp, fn = 0, 0, 0
        
        # 批量推理
        for img_path in tqdm(image_paths, desc="推理中"):
            try:
                # 预处理
                image_batch, original_shape, input_size = self.inference_engine.preprocess_image(str(img_path))
                if image_batch is None:
                    continue
                
                # 推理
                outputs = self.inference_engine.inference(image_batch)
                
                # 解析输出
                num_detections = int(outputs['num_detections'][0])
                boxes = outputs['detection_boxes'][0]
                scores = outputs['detection_scores'][0]
                classes = outputs['detection_classes'][0]
                
                # 添加调试信息
                if len(all_predictions) == 0:
                    print(f"检测数量: {num_detections}")
                    if num_detections > 0:
                        print(f"置信度范围: {scores.min():.4f} - {scores.max():.4f}")
                        print(f"坐标范围: x1={boxes[:, 0].min():.2f}-{boxes[:, 0].max():.2f}")
                        print(f"类别范围: {classes.min()} - {classes.max()}")
                
                # 过滤低置信度检测
                valid_indices = scores >= conf_threshold
                if not np.any(valid_indices):
                    continue
                
                valid_boxes = boxes[valid_indices]
                valid_scores = scores[valid_indices]
                valid_classes = classes[valid_indices]
                
                # 转换坐标到原图尺寸
                scale_x = original_shape[1] / input_size[0]
                scale_y = original_shape[0] / input_size[1]
                
                valid_boxes[:, [0, 2]] *= scale_x
                valid_boxes[:, [1, 3]] *= scale_y
                
                # 收集预测结果
                img_id = int(img_path.stem) if img_path.stem.isdigit() else len(all_predictions)
                
                for box, score, cls in zip(valid_boxes, valid_scores, valid_classes):
                    all_predictions.append({
                        'image_id': img_id,
                        'category_id': int(cls),
                        'bbox': [float(box[0]), float(box[1]), float(box[2]-box[0]), float(box[3]-box[1])],
                        'score': float(score)
                    })
                
                # 简单统计
                valid_detections = len(valid_boxes)
                if valid_detections > 0:
                    tp += min(valid_detections, 5)
                    if valid_detections > 5:
                        fp += valid_detections - 5
                else:
                    fn += 5
                    
            except Exception as e:
                print(f"处理图像 {img_path} 时出错: {e}")
                continue
        
        # 计算基础指标
        precision = tp / (tp + fp) if (tp + fp) > 0 else 0
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0
        f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
        
        print(f"\n评估结果:")
        print(f"Precision: {precision:.4f}")
        print(f"Recall: {recall:.4f}")
        print(f"F1 Score: {f1:.4f}")
        print(f"TP: {tp}, FP: {fp}, FN: {fn}")
        print(f"总预测数: {len(all_predictions)}")
        
        # 计算COCO mAP
        if self.use_coco and len(all_predictions) > 0:
            try:
                coco_map = self.compute_coco_map(all_predictions)
                print(f"COCO mAP: {coco_map:.4f}")
            except Exception as e:
                print(f"COCO mAP计算失败: {e}")
        
        return {
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'predictions': all_predictions
        }
    
    def compute_coco_map(self, predictions):
        """计算COCO mAP"""
        # 创建临时结果文件
        with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
            json.dump(predictions, f)
            results_file = f.name
        
        try:
            # 加载预测结果
            coco_dt = self.coco.loadRes(results_file)
            
            # 创建评估器
            coco_eval = COCOeval(self.coco, coco_dt, 'bbox')
            coco_eval.evaluate()
            coco_eval.accumulate()
            coco_eval.summarize()
            
            # 返回mAP@0.5:0.95
            return coco_eval.stats[0]
            
        finally:
            # 清理临时文件
            if os.path.exists(results_file):
                os.unlink(results_file)

def main():
    """主函数"""
    
    # 配置参数
    onnx_path = "/root/autodl-tmp/Codes/ppq_quantize/onnx/yolov11n.onnx"
    engine_path = "/root/autodl-tmp/Codes/ppq_quantize/engine/yolov11n_with_nms.engine"
    dataset_path = "/root/autodl-tmp/datasets/coco128/images/train2017"
    annotations_path = "/root/autodl-tmp/datasets/coco128/annotations/instances_train2017.json"
    
    # 默认模型路径（可用argparse覆盖）
    PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    ONNX_PATH   = os.path.join(PROJECT_ROOT, 'onnx/yolov11n.onnx')
    ENGINE_PATH = os.path.join(PROJECT_ROOT, 'engine/yolov11n_int8_ALLConcat.engine')
    dataset_path    = os.path.join(PROJECT_ROOT, 'datasets/coco128/images/train2017')
    LABEL_DIR   = os.path.join(PROJECT_ROOT, 'datasets/coco128/labels/train2017')
    
    # 检查是否需要构建引擎
    if not os.path.exists(engine_path):
        print("开始构建包含NMS的TensorRT引擎...")
        builder = TensorRTEngineBuilder()
        engine = builder.build_engine_with_nms(
            onnx_path=onnx_path,
            engine_path=engine_path,
            num_classes=80,
            conf_threshold=0.5,
            nms_threshold=0.4,
            max_output_boxes=100
        )
        
        if engine is None:
            print("引擎构建失败")
            return
        
        print("引擎构建完成")
    
    # 验证引擎
    print(f"开始验证引擎: {engine_path}")
    inference_engine = TensorRTInference(engine_path)
    
    # 检查是否包含NMS
    has_nms = 'num_detections' in inference_engine.output_names
    print(f"引擎包含NMS: {has_nms}")
    
    # 运行验证
    validator = ModelValidator(inference_engine, dataset_path, annotations_path)
    results = validator.validate(conf_threshold=0.1, max_images=128)
    
    print("验证完成")

if __name__ == "__main__":
    main()