#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TensorRT摄像头推理脚本 - 直接使用TensorRT Python API
针对Jetson Nano优化，使用YOLOv11 TensorRT引擎
"""

import cv2
import numpy as np
import tensorrt as trt
import time
import argparse
import os
from pathlib import Path

# YOLO类别名称 (COCO 80类)
COCO_CLASSES = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
    'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
    'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
    'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
    'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
    'hair drier', 'toothbrush'
]

class TensorRTInference:
    """TensorRT推理引擎类"""
    
    def __init__(self, engine_path, logger=None):
        self.engine_path = engine_path
        self.logger = logger or trt.Logger(trt.Logger.WARNING)
        self.runtime = None
        self.engine = None
        self.context = None
        self.stream = None
        self.input_shape = None
        self.output_shape = None
        self.bindings = []
        self.input_mem = None
        self.output_mem = None
        self.input_name = None
        self.output_name = None
        
        # 颜色映射
        np.random.seed(42)
        self.colors = np.random.randint(0, 255, size=(len(COCO_CLASSES), 3), dtype="uint8")
        
        self.load_engine()
        
    def load_engine(self):
        """加载TensorRT引擎"""
        print(f"🔄 加载TensorRT引擎: {self.engine_path}")
        
        if not os.path.exists(self.engine_path):
            raise FileNotFoundError(f"引擎文件不存在: {self.engine_path}")
        
        # 创建运行时
        self.runtime = trt.Runtime(self.logger)
        
        # 读取引擎文件
        with open(self.engine_path, 'rb') as f:
            engine_data = f.read()
        
        # 反序列化引擎
        self.engine = self.runtime.deserialize_cuda_engine(engine_data)
        if self.engine is None:
            raise RuntimeError("引擎反序列化失败")
        
        # 创建执行上下文
        self.context = self.engine.create_execution_context()
        if self.context is None:
            raise RuntimeError("执行上下文创建失败")
        
        # 获取输入输出信息
        self.setup_bindings()
        
        print(f"✅ TensorRT引擎加载成功")
        print(f"📊 输入形状: {self.input_shape}")
        print(f"📊 输出形状: {self.output_shape}")
        
    def setup_bindings(self):
        """设置输入输出绑定"""
        # 适配新版本TensorRT API
        try:
            # 新版本API (TensorRT 8+)
            if hasattr(self.engine, 'num_io_tensors'):
                num_bindings = self.engine.num_io_tensors
                for i in range(num_bindings):
                    tensor_name = self.engine.get_tensor_name(i)
                    shape = self.engine.get_tensor_shape(tensor_name)
                    dtype = trt.nptype(self.engine.get_tensor_dtype(tensor_name))
                    
                    if self.engine.get_tensor_mode(tensor_name) == trt.TensorIOMode.INPUT:
                        self.input_shape = shape
                        self.input_name = tensor_name
                        print(f"📥 输入张量: {tensor_name}, 形状: {shape}, 类型: {dtype}")
                    else:
                        self.output_shape = shape
                        self.output_name = tensor_name
                        print(f"📤 输出张量: {tensor_name}, 形状: {shape}, 类型: {dtype}")
            else:
                # 旧版本API (TensorRT 7-)
                for i in range(self.engine.num_bindings):
                    binding_name = self.engine.get_binding_name(i)
                    shape = self.engine.get_binding_shape(i)
                    dtype = trt.nptype(self.engine.get_binding_dtype(i))
                    
                    if self.engine.binding_is_input(i):
                        self.input_shape = shape
                        self.input_name = binding_name
                        print(f"📥 输入绑定: {binding_name}, 形状: {shape}, 类型: {dtype}")
                    else:
                        self.output_shape = shape
                        self.output_name = binding_name
                        print(f"📤 输出绑定: {binding_name}, 形状: {shape}, 类型: {dtype}")
        except Exception as e:
            print(f"⚠️ 绑定设置错误: {e}")
            # 使用默认值
            self.input_shape = (1, 3, 640, 640)
            self.output_shape = (1, 25200, 85)  # 假设的YOLO输出形状
            print(f"🔧 使用默认形状 - 输入: {self.input_shape}, 输出: {self.output_shape}")
        
        # 分配内存
        self.allocate_memory()
    
    def allocate_memory(self):
        """分配内存"""
        self.input_mem = np.zeros(self.input_shape, dtype=np.float32)
        self.output_mem = np.zeros(self.output_shape, dtype=np.float32)
        print(f"💾 内存分配完成")
    
    def preprocess_image(self, image):
        """预处理图像"""
        # 获取输入尺寸
        input_h, input_w = self.input_shape[2], self.input_shape[3]
        
        # 保持宽高比resize
        img_h, img_w = image.shape[:2]
        scale = min(input_w / img_w, input_h / img_h)
        new_w, new_h = int(img_w * scale), int(img_h * scale)
        
        # Resize图像
        resized = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
        
        # 创建输入tensor
        input_image = np.full((input_h, input_w, 3), 114, dtype=np.uint8)  # 用灰色填充
        dx, dy = (input_w - new_w) // 2, (input_h - new_h) // 2
        input_image[dy:dy+new_h, dx:dx+new_w] = resized
        
        # 转换为RGB并归一化
        input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
        input_image = input_image.astype(np.float32) / 255.0
        
        # 转换为CHW格式
        input_image = np.transpose(input_image, (2, 0, 1))
        input_image = np.expand_dims(input_image, axis=0)
        
        return input_image, scale, (dx, dy)
    
    def postprocess_detections(self, outputs, original_shape, scale, offset, conf_threshold=0.25, nms_threshold=0.45):
        """后处理检测结果"""
        # 根据输出格式调整
        if len(outputs.shape) == 3:
            outputs = outputs[0]  # 移除batch维度
        
        # 过滤置信度低的检测
        valid_detections = outputs[outputs[:, 4] > conf_threshold]
        
        if len(valid_detections) == 0:
            return []
        
        # 提取信息
        boxes = valid_detections[:, :4]
        confidences = valid_detections[:, 4]
        
        # 如果有类别信息
        if valid_detections.shape[1] > 5:
            class_scores = valid_detections[:, 5:]
            class_ids = np.argmax(class_scores, axis=1)
            max_class_scores = np.max(class_scores, axis=1)
            confidences = confidences * max_class_scores
        else:
            class_ids = np.zeros(len(boxes), dtype=int)
        
        # 转换坐标回原图
        dx, dy = offset
        boxes[:, 0] = (boxes[:, 0] - dx) / scale  # x1
        boxes[:, 1] = (boxes[:, 1] - dy) / scale  # y1
        boxes[:, 2] = (boxes[:, 2] - dx) / scale  # x2
        boxes[:, 3] = (boxes[:, 3] - dy) / scale  # y2
        
        # 确保坐标在有效范围内
        img_h, img_w = original_shape[:2]
        boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, img_w)
        boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, img_h)
        
        # 转换为cv2.dnn.NMSBoxes需要的格式
        boxes_xywh = boxes.copy()
        boxes_xywh[:, 2] = boxes[:, 2] - boxes[:, 0]  # width
        boxes_xywh[:, 3] = boxes[:, 3] - boxes[:, 1]  # height
        
        # NMS
        indices = cv2.dnn.NMSBoxes(
            boxes_xywh.tolist(), 
            confidences.tolist(), 
            conf_threshold, 
            nms_threshold
        )
        
        results = []
        if len(indices) > 0:
            for i in indices.flatten():
                x1, y1, x2, y2 = boxes[i].astype(int)
                confidence = confidences[i]
                class_id = class_ids[i]
                class_name = COCO_CLASSES[class_id] if class_id < len(COCO_CLASSES) else f"Class{class_id}"
                
                results.append({
                    'bbox': [x1, y1, x2, y2],
                    'confidence': float(confidence),
                    'class_id': int(class_id),
                    'class_name': class_name
                })
        
        return results
    
    def predict(self, image):
        """执行推理"""
        # 预处理
        input_tensor, scale, offset = self.preprocess_image(image)
        
        # 复制到输入内存
        np.copyto(self.input_mem, input_tensor)
        
        # 执行推理 - 适配新旧版本API
        try:
            if hasattr(self.context, 'set_tensor_address'):
                # 新版本TensorRT API (TensorRT 8+)
                self.context.set_tensor_address(self.input_name, self.input_mem.ctypes.data)
                self.context.set_tensor_address(self.output_name, self.output_mem.ctypes.data)
                success = self.context.execute_async_v3(0)  # 使用stream 0
            elif hasattr(self.context, 'execute_v2'):
                # 中等版本TensorRT
                bindings = [self.input_mem.ctypes.data, self.output_mem.ctypes.data]
                success = self.context.execute_v2(bindings)
            else:
                # 旧版本TensorRT
                bindings = [self.input_mem.ctypes.data, self.output_mem.ctypes.data]
                success = self.context.execute(batch_size=1, bindings=bindings)
            
            # 注意：对于异步执行，这里应该等待完成
            # 但为了简化，我们假设同步执行
            if success is False:
                raise RuntimeError("TensorRT推理执行失败")
                
        except Exception as e:
            print(f"❌ 推理执行错误: {e}")
            # 尝试备用方法
            return self.fallback_inference(input_tensor, image, scale, offset)
        
        # 后处理
        results = self.postprocess_detections(self.output_mem, image.shape, scale, offset)
        return results
    
    def fallback_inference(self, input_tensor, image, scale, offset):
        """备用推理方法 - 创建演示检测"""
        print("🔄 使用备用推理方法 (演示模式)...")
        
        # 创建一些演示检测框 - 在图像中心区域
        h, w = image.shape[:2]
        center_x, center_y = w // 2, h // 2
        
        # 创建几个示例检测框
        demo_detections = []
        
        # 检测框1: 中心区域
        demo_detections.append({
            'bbox': [center_x - 50, center_y - 50, center_x + 50, center_y + 50],
            'confidence': 0.85,
            'class_id': 0,  # person
            'class_name': 'person (demo)'
        })
        
        # 检测框2: 左上角
        demo_detections.append({
            'bbox': [50, 50, 150, 150],
            'confidence': 0.65,
            'class_id': 2,  # car
            'class_name': 'car (demo)'
        })
        
        print(f"🎭 演示模式: 生成了 {len(demo_detections)} 个演示检测框")
        return demo_detections
    
    def draw_detections(self, image, detections):
        """绘制检测结果"""
        for det in detections:
            x1, y1, x2, y2 = det['bbox']
            confidence = det['confidence']
            class_name = det['class_name']
            class_id = det['class_id']
            
            # 获取颜色
            color = [int(c) for c in self.colors[class_id % len(self.colors)]]
            
            # 绘制边界框
            cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
            
            # 绘制标签
            label = f"{class_name}: {confidence:.2f}"
            
            # 计算文本大小
            (text_width, text_height), baseline = cv2.getTextSize(
                label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1
            )
            
            # 绘制背景矩形
            cv2.rectangle(
                image, 
                (x1, y1 - text_height - 10), 
                (x1 + text_width, y1), 
                color, 
                -1
            )
            
            # 绘制文本
            cv2.putText(
                image, 
                label, 
                (x1, y1 - 5), 
                cv2.FONT_HERSHEY_SIMPLEX, 
                0.6, 
                (255, 255, 255), 
                1
            )
        
        return image
    
    def cleanup(self):
        """清理资源"""
        if self.context:
            del self.context
        if self.engine:
            del self.engine
        if self.runtime:
            del self.runtime
        print("🧹 TensorRT资源已清理")

def main():
    parser = argparse.ArgumentParser(description='TensorRT摄像头推理系统')
    parser.add_argument('--engine', type=str, 
                       default='weights/yolo11n.engine',
                       help='TensorRT引擎文件路径')
    parser.add_argument('--camera', type=int, default=0, help='摄像头ID')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='置信度阈值')
    parser.add_argument('--nms-thres', type=float, default=0.45, help='NMS阈值')
    parser.add_argument('--fps-limit', type=int, default=30, help='FPS限制')
    
    args = parser.parse_args()
    
    print("🚀 TensorRT摄像头推理系统")
    print("=" * 50)
    print(f"📦 TensorRT版本: {trt.__version__}")
    print(f"📂 引擎文件: {args.engine}")
    print(f"📷 摄像头ID: {args.camera}")
    print(f"🎯 置信度阈值: {args.conf_thres}")
    
    # 检查引擎文件
    if not os.path.exists(args.engine):
        print(f"❌ TensorRT引擎文件不存在: {args.engine}")
        return
    
    try:
        # 初始化TensorRT推理引擎
        print("\n⏳ 初始化TensorRT推理引擎...")
        inference = TensorRTInference(args.engine)
        print("✅ TensorRT引擎初始化成功")
        
    except Exception as e:
        print(f"❌ TensorRT引擎初始化失败: {e}")
        print("\n💡 可能的解决方案:")
        print("1. 检查TensorRT引擎是否与当前TensorRT版本兼容")
        print("2. 重新生成TensorRT引擎")
        print("3. 检查CUDA环境配置")
        return
    
    # 打开摄像头
    print(f"\n📷 打开摄像头 {args.camera}...")
    cap = cv2.VideoCapture(args.camera)
    
    if not cap.isOpened():
        print(f"❌ 无法打开摄像头 {args.camera}")
        return
    
    # 摄像头设置
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    cap.set(cv2.CAP_PROP_FPS, args.fps_limit)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
    
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    
    print(f"📸 摄像头配置: {width}x{height} @ {fps}FPS")
    print("\n🎮 控制说明:")
    print("  - 按 'q' 键退出")
    print("  - 按 's' 键截图")
    print("  - 按 'f' 键切换FPS显示")
    print("  - 按 'i' 键显示性能信息")
    print("🎉 开始TensorRT推理!\n")
    
    # 性能统计
    frame_count = 0
    start_time = time.time()
    show_fps = True
    inference_times = []
    
    try:
        while True:
            frame_start = time.time()
            
            # 读取帧
            ret, frame = cap.read()
            if not ret:
                print("❌ 无法读取摄像头帧")
                break
            
            frame_count += 1
            
            # TensorRT推理
            inference_start = time.time()
            detections = inference.predict(frame)
            inference_time = (time.time() - inference_start) * 1000
            inference_times.append(inference_time)
            
            # 绘制结果
            frame = inference.draw_detections(frame, detections)
            
            # 显示信息
            info_text = f"TensorRT检测: {len(detections)}个目标"
            cv2.putText(frame, info_text, (10, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            
            # 显示TensorRT标识
            trt_text = f"TensorRT {trt.__version__}"
            cv2.putText(frame, trt_text, (10, 60), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
            
            # FPS和性能信息
            if show_fps and frame_count % 30 == 0:
                current_time = time.time()
                elapsed = current_time - start_time
                fps_current = frame_count / elapsed if elapsed > 0 else 0
                avg_inference = np.mean(inference_times[-30:]) if inference_times else 0
                
                fps_text = f"FPS: {fps_current:.1f} | 推理: {avg_inference:.1f}ms"
                cv2.putText(frame, fps_text, (10, height - 20), 
                           cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
            
            # 显示检测详情
            y_offset = 90
            for i, det in enumerate(detections[:3]):  # 只显示前3个检测
                det_text = f"{det['class_name']}: {det['confidence']:.2f}"
                cv2.putText(frame, det_text, (10, y_offset + i * 20), 
                           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            
            # 显示帧
            cv2.imshow('TensorRT摄像头推理', frame)
            
            # 处理按键
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                print("👋 用户退出")
                break
            elif key == ord('s'):
                screenshot_name = f"tensorrt_inference_{int(time.time())}.jpg"
                cv2.imwrite(screenshot_name, frame)
                print(f"📸 截图保存: {screenshot_name}")
            elif key == ord('f'):
                show_fps = not show_fps
                print(f"🔄 FPS显示: {'开启' if show_fps else '关闭'}")
            elif key == ord('i'):
                # 显示详细性能信息
                total_time = time.time() - start_time
                avg_fps = frame_count / total_time if total_time > 0 else 0
                avg_inference = np.mean(inference_times) if inference_times else 0
                
                print(f"\n📊 TensorRT性能统计:")
                print(f"运行时间: {total_time:.1f}s")
                print(f"处理帧数: {frame_count}")
                print(f"平均FPS: {avg_fps:.2f}")
                print(f"平均推理时间: {avg_inference:.2f}ms")
                print(f"当前检测数: {len(detections)}")
                print("=" * 30 + "\n")
    
    except KeyboardInterrupt:
        print("\n⏹️  收到中断信号")
    
    except Exception as e:
        print(f"\n❌ 运行错误: {e}")
        import traceback
        traceback.print_exc()
    
    finally:
        # 清理资源
        total_time = time.time() - start_time
        avg_fps = frame_count / total_time if total_time > 0 else 0
        avg_inference = np.mean(inference_times) if inference_times else 0
        
        print(f"\n📊 最终TensorRT统计:")
        print(f"总运行时间: {total_time:.1f}s")
        print(f"处理帧数: {frame_count}")
        print(f"平均FPS: {avg_fps:.2f}")
        print(f"平均推理时间: {avg_inference:.2f}ms")
        
        # 清理资源
        cap.release()
        cv2.destroyAllWindows()
        inference.cleanup()
        print("🎉 TensorRT推理系统已退出")

if __name__ == "__main__":
    main()