#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简化的TensorRT推理脚本 - Jetson Nano优化版本
支持YOLOv11 TensorRT引擎 + 摄像头实时推理
"""

import cv2
import numpy as np
import time
import argparse
from pathlib import Path

try:
    import torch
    TORCH_AVAILABLE = True
    print("✅ PyTorch已导入")
except ImportError:
    print("❌ PyTorch未安装，尝试使用OpenCV DNN后端")
    TORCH_AVAILABLE = False

def preprocess_image(image, input_size=(640, 640)):
    """
    预处理图像用于YOLO推理
    """
    # 保持宽高比的resize
    h, w = image.shape[:2]
    scale = min(input_size[0]/w, input_size[1]/h)
    nw, nh = int(w * scale), int(h * scale)
    
    # Resize图像
    resized = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_LINEAR)
    
    # 创建输入tensor
    new_image = np.full((input_size[1], input_size[0], 3), 128, dtype=np.uint8)
    dx, dy = (input_size[0] - nw) // 2, (input_size[1] - nh) // 2
    new_image[dy:dy+nh, dx:dx+nw, :] = resized
    
    # 转换为float32并归一化
    input_image = new_image.astype(np.float32) / 255.0
    
    # 转换为CHW格式
    input_image = np.transpose(input_image, (2, 0, 1))
    input_image = np.expand_dims(input_image, axis=0)
    
    return input_image, scale, (dx, dy)

def postprocess_detections(outputs, input_shape, original_shape, scale, offset, conf_threshold=0.25, nms_threshold=0.45):
    """
    后处理检测结果
    """
    if len(outputs[0].shape) == 3:
        outputs = outputs[0][0]  # 移除batch维度
    else:
        outputs = outputs[0]
    
    # 过滤置信度低的检测
    scores = outputs[:, 4]
    valid_detections = scores > conf_threshold
    outputs = outputs[valid_detections]
    
    if len(outputs) == 0:
        return []
    
    # 提取坐标和置信度
    boxes = outputs[:, :4]
    confidences = outputs[:, 4]
    
    # 如果有类别信息
    if outputs.shape[1] > 5:
        class_scores = outputs[:, 5:]
        class_ids = np.argmax(class_scores, axis=1)
        max_class_scores = np.max(class_scores, axis=1)
        confidences = confidences * max_class_scores
    else:
        class_ids = np.zeros(len(boxes), dtype=int)
    
    # 转换坐标回原图
    dx, dy = offset
    boxes[:, 0] = (boxes[:, 0] - dx) / scale  # x1
    boxes[:, 1] = (boxes[:, 1] - dy) / scale  # y1
    boxes[:, 2] = (boxes[:, 2] - dx) / scale  # x2
    boxes[:, 3] = (boxes[:, 3] - dy) / scale  # y2
    
    # 确保坐标在有效范围内
    boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, original_shape[1])
    boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, original_shape[0])
    
    # NMS
    indices = cv2.dnn.NMSBoxes(
        boxes.tolist(), 
        confidences.tolist(), 
        conf_threshold, 
        nms_threshold
    )
    
    results = []
    if len(indices) > 0:
        for i in indices.flatten():
            x1, y1, x2, y2 = boxes[i].astype(int)
            confidence = confidences[i]
            class_id = class_ids[i]
            results.append({
                'bbox': [x1, y1, x2, y2],
                'confidence': float(confidence),
                'class_id': int(class_id)
            })
    
    return results

class TensorRTInference:
    """TensorRT推理引擎"""
    
    def __init__(self, engine_path, use_pytorch=True):
        self.engine_path = engine_path
        self.use_pytorch = use_pytorch and TORCH_AVAILABLE
        self.model = None
        self.input_size = (640, 640)
        self.device = 'cuda' if self.use_pytorch else 'cpu'
        
        self.load_model()
    
    def load_model(self):
        """加载模型"""
        try:
            if self.use_pytorch:
                # 使用PyTorch后端加载TensorRT引擎
                print(f"🔄 使用PyTorch加载TensorRT引擎: {self.engine_path}")
                self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
                
                # 这里需要使用YOLOv5的DetectMultiBackend
                # 暂时使用简化版本
                print("⚠️  需要完整的YOLOv5环境来加载TensorRT引擎")
                self.use_pytorch = False
                
            if not self.use_pytorch:
                # 使用OpenCV DNN后端
                print(f"🔄 使用OpenCV DNN后端加载模型")
                # OpenCV DNN不直接支持TensorRT引擎，需要ONNX格式
                print("⚠️  OpenCV DNN需要ONNX格式模型，不支持直接加载TensorRT引擎")
                raise Exception("需要ONNX模型或完整PyTorch环境")
                
        except Exception as e:
            print(f"❌ 模型加载失败: {e}")
            raise
    
    def predict(self, image):
        """推理预测"""
        # 预处理
        input_tensor, scale, offset = preprocess_image(image, self.input_size)
        
        try:
            if self.use_pytorch:
                # PyTorch推理
                input_tensor = torch.from_numpy(input_tensor).to(self.device)
                with torch.no_grad():
                    outputs = self.model(input_tensor)
                    if isinstance(outputs, tuple):
                        outputs = outputs[0]
                    outputs = outputs.cpu().numpy()
            else:
                # OpenCV推理（需要ONNX模型）
                outputs = self.model.forward(input_tensor)
                
        except Exception as e:
            print(f"❌ 推理失败: {e}")
            return []
        
        # 后处理
        results = postprocess_detections(
            outputs, 
            self.input_size, 
            image.shape, 
            scale, 
            offset
        )
        
        return results

def draw_detections(image, detections, class_names=None):
    """绘制检测结果"""
    for det in detections:
        x1, y1, x2, y2 = det['bbox']
        confidence = det['confidence']
        class_id = det['class_id']
        
        # 绘制边界框
        color = (0, 255, 0)  # 绿色
        cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
        
        # 绘制标签
        class_name = class_names[class_id] if class_names and class_id < len(class_names) else f"Class{class_id}"
        label = f"{class_name}: {confidence:.2f}"
        
        # 计算文本大小
        (text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
        
        # 绘制背景矩形
        cv2.rectangle(image, (x1, y1 - text_height - 10), (x1 + text_width, y1), color, -1)
        
        # 绘制文本
        cv2.putText(image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
    
    return image

def main():
    parser = argparse.ArgumentParser(description='简化TensorRT推理演示')
    parser.add_argument('--engine', type=str, 
                       default='/home/wuyin/ros_ws/src/yolov5-ukf-imm/weights/yolo11n.engine',
                       help='TensorRT引擎文件路径')
    parser.add_argument('--camera', type=int, default=0, help='摄像头ID')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='置信度阈值')
    parser.add_argument('--nms-thres', type=float, default=0.45, help='NMS阈值')
    parser.add_argument('--show-fps', action='store_true', help='显示FPS')
    
    args = parser.parse_args()
    
    print("🚀 启动简化TensorRT推理系统")
    print(f"📂 引擎文件: {args.engine}")
    print(f"📷 摄像头ID: {args.camera}")
    print(f"🎯 置信度阈值: {args.conf_thres}")
    
    # 检查引擎文件
    if not Path(args.engine).exists():
        print(f"❌ 引擎文件不存在: {args.engine}")
        return
    
    try:
        # 初始化推理引擎
        print("⏳ 初始化推理引擎...")
        inference = TensorRTInference(args.engine)
        print("✅ 推理引擎初始化成功")
        
    except Exception as e:
        print(f"❌ 推理引擎初始化失败: {e}")
        print("\n🔧 解决方案:")
        print("1. 确保已安装PyTorch和TensorRT")
        print("2. 或者将TensorRT引擎转换为ONNX格式")
        print("3. 检查CUDA环境配置")
        return
    
    # 打开摄像头
    print(f"📷 打开摄像头 {args.camera}...")
    cap = cv2.VideoCapture(args.camera)
    
    if not cap.isOpened():
        print(f"❌ 无法打开摄像头 {args.camera}")
        return
    
    # 设置摄像头参数
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    cap.set(cv2.CAP_PROP_FPS, 30)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
    
    print("✅ 摄像头初始化成功")
    print("\n🎮 控制说明:")
    print("  - 按 'q' 键退出")
    print("  - 按 's' 键截图")
    print("  - 按 'f' 键切换FPS显示")
    print("🎉 开始实时推理!\n")
    
    # 性能统计
    frame_count = 0
    fps_start_time = time.time()
    avg_inference_time = 0
    show_fps = args.show_fps
    
    while True:
        start_time = time.time()
        
        # 读取摄像头
        ret, frame = cap.read()
        if not ret:
            print("❌ 无法读取摄像头帧")
            break
        
        frame_count += 1
        
        try:
            # 推理
            inference_start = time.time()
            detections = inference.predict(frame)
            inference_time = time.time() - inference_start
            
            # 更新平均推理时间
            avg_inference_time = (avg_inference_time * (frame_count - 1) + inference_time * 1000) / frame_count
            
            # 绘制检测结果
            frame = draw_detections(frame, detections)
            
            # 显示检测信息
            info_text = f"检测到 {len(detections)} 个目标"
            cv2.putText(frame, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            
            # 显示推理时间
            time_text = f"推理: {inference_time*1000:.1f}ms"
            cv2.putText(frame, time_text, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
            
        except Exception as e:
            print(f"⚠️ 推理错误: {e}")
            # 继续处理下一帧
        
        # 计算并显示FPS
        current_time = time.time()
        if show_fps and current_time - fps_start_time >= 1.0:
            fps = frame_count / (current_time - fps_start_time)
            fps_text = f"FPS: {fps:.1f} | 平均推理: {avg_inference_time:.1f}ms"
            cv2.putText(frame, fps_text, (10, frame.shape[0] - 20), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
        
        # 显示图像
        cv2.imshow('简化TensorRT推理', frame)
        
        # 处理按键
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            print("👋 用户退出")
            break
        elif key == ord('s'):
            screenshot_name = f"tensorrt_inference_{int(time.time())}.jpg"
            cv2.imwrite(screenshot_name, frame)
            print(f"📸 截图保存: {screenshot_name}")
        elif key == ord('f'):
            show_fps = not show_fps
            print(f"🔄 FPS显示: {'开启' if show_fps else '关闭'}")
    
    # 最终统计
    total_time = time.time() - fps_start_time
    avg_fps = frame_count / total_time if total_time > 0 else 0
    
    print(f"\n📊 性能统计:")
    print(f"处理帧数: {frame_count}")
    print(f"平均FPS: {avg_fps:.2f}")
    print(f"平均推理时间: {avg_inference_time:.2f}ms")
    
    # 清理资源
    cap.release()
    cv2.destroyAllWindows()
    print("🎉 程序结束")

if __name__ == "__main__":
    main()