#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Jetson Nano 摄像头推理脚本
使用OpenCV DNN后端，无需复杂的PyTorch环境
支持多种模型格式：ONNX, TensorRT, OpenVINO等
"""

import cv2
import numpy as np
import time
import argparse
import os
import sys
from pathlib import Path

# COCO类别名称
COCO_CLASSES = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
    'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
    'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
    'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
    'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
    'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
    'hair drier', 'toothbrush'
]

class JetsonCameraInference:
    """Jetson Nano摄像头推理类"""
    
    def __init__(self, model_path, input_size=(640, 640), conf_threshold=0.25, nms_threshold=0.45):
        self.model_path = model_path
        self.input_size = input_size
        self.conf_threshold = conf_threshold
        self.nms_threshold = nms_threshold
        self.net = None
        self.classes = COCO_CLASSES
        
        # 颜色映射
        np.random.seed(42)
        self.colors = np.random.randint(0, 255, size=(len(self.classes), 3), dtype="uint8")
        
        self.load_model()
    
    def load_model(self):
        """加载模型"""
        print(f"🔄 加载模型: {self.model_path}")
        
        if not os.path.exists(self.model_path):
            raise FileNotFoundError(f"模型文件不存在: {self.model_path}")
        
        # 获取文件扩展名
        model_ext = Path(self.model_path).suffix.lower()
        
        try:
            if model_ext == '.onnx':
                # ONNX模型
                print("📦 使用ONNX后端")
                self.net = cv2.dnn.readNetFromONNX(self.model_path)
                
            elif model_ext == '.engine':
                # TensorRT引擎 - 需要特殊处理
                print("⚡ 检测到TensorRT引擎文件")
                print("❌ OpenCV DNN不直接支持TensorRT引擎")
                print("💡 建议:")
                print("   1. 将模型转换为ONNX格式")
                print("   2. 或使用完整的PyTorch + TensorRT环境")
                print("   3. 或使用TensorRT Python API")
                
                # 尝试使用OpenCV的CUDA后端
                try:
                    # 这里不能直接加载.engine文件，但我们可以提供替代方案
                    raise NotImplementedError("需要ONNX格式模型或完整TensorRT环境")
                except Exception as e:
                    print(f"❌ TensorRT引擎加载失败: {e}")
                    sys.exit(1)
                    
            elif model_ext in ['.pb', '.pbtxt']:
                # TensorFlow模型
                print("🤖 使用TensorFlow后端")
                self.net = cv2.dnn.readNetFromTensorflow(self.model_path)
                
            else:
                # 尝试作为通用DNN模型加载
                print("🔧 尝试通用DNN加载")
                self.net = cv2.dnn.readNet(self.model_path)
            
            # 设置推理后端
            if cv2.cuda.getCudaEnabledDeviceCount() > 0:
                print("🚀 使用CUDA后端")
                self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
                self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
            else:
                print("💻 使用CPU后端")
                self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
                self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
                
        except Exception as e:
            print(f"❌ 模型加载失败: {e}")
            print("💡 请确保:")
            print("   - 模型文件格式正确 (ONNX推荐)")
            print("   - OpenCV版本支持所需后端")
            print("   - CUDA环境配置正确 (如需GPU加速)")
            sys.exit(1)
    
    def preprocess_frame(self, frame):
        """预处理帧"""
        # 创建blob
        blob = cv2.dnn.blobFromImage(
            frame, 
            1/255.0,  # 归一化
            self.input_size, 
            (0, 0, 0), 
            swapRB=True, 
            crop=False
        )
        return blob
    
    def postprocess_detections(self, outputs, frame_shape):
        """后处理检测结果"""
        boxes = []
        confidences = []
        class_ids = []
        
        frame_height, frame_width = frame_shape[:2]
        
        # 处理输出
        for output in outputs:
            if len(output.shape) == 3:
                output = output.reshape(-1, output.shape[-1])
            
            for detection in output:
                if len(detection) < 5:
                    continue
                    
                # YOLO格式: [x_center, y_center, width, height, confidence, class_scores...]
                if len(detection) > 5:
                    scores = detection[5:]
                    class_id = np.argmax(scores)
                    confidence = detection[4] * scores[class_id]
                else:
                    confidence = detection[4]
                    class_id = 0
                
                if confidence > self.conf_threshold:
                    # 转换坐标
                    center_x = int(detection[0] * frame_width)
                    center_y = int(detection[1] * frame_height)
                    width = int(detection[2] * frame_width)
                    height = int(detection[3] * frame_height)
                    
                    # 计算边界框左上角
                    x = int(center_x - width / 2)
                    y = int(center_y - height / 2)
                    
                    boxes.append([x, y, width, height])
                    confidences.append(float(confidence))
                    class_ids.append(class_id)
        
        # 非极大值抑制
        indices = cv2.dnn.NMSBoxes(boxes, confidences, self.conf_threshold, self.nms_threshold)
        
        results = []
        if len(indices) > 0:
            for i in indices.flatten():
                x, y, w, h = boxes[i]
                confidence = confidences[i]
                class_id = class_ids[i]
                
                results.append({
                    'bbox': [x, y, x + w, y + h],
                    'confidence': confidence,
                    'class_id': class_id,
                    'class_name': self.classes[class_id] if class_id < len(self.classes) else f"Class{class_id}"
                })
        
        return results
    
    def predict(self, frame):
        """推理预测"""
        # 预处理
        blob = self.preprocess_frame(frame)
        self.net.setInput(blob)
        
        # 推理
        outputs = self.net.forward()
        
        # 后处理
        results = self.postprocess_detections(outputs, frame.shape)
        
        return results
    
    def draw_detections(self, frame, detections):
        """绘制检测结果"""
        for det in detections:
            x1, y1, x2, y2 = det['bbox']
            confidence = det['confidence']
            class_name = det['class_name']
            class_id = det['class_id']
            
            # 获取颜色
            color = [int(c) for c in self.colors[class_id % len(self.colors)]]
            
            # 绘制边界框
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            
            # 绘制标签
            label = f"{class_name}: {confidence:.2f}"
            
            # 计算文本大小
            (text_width, text_height), baseline = cv2.getTextSize(
                label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1
            )
            
            # 绘制背景矩形
            cv2.rectangle(
                frame, 
                (x1, y1 - text_height - 10), 
                (x1 + text_width, y1), 
                color, 
                -1
            )
            
            # 绘制文本
            cv2.putText(
                frame, 
                label, 
                (x1, y1 - 5), 
                cv2.FONT_HERSHEY_SIMPLEX, 
                0.5, 
                (255, 255, 255), 
                1
            )
        
        return frame

def create_onnx_conversion_script():
    """创建ONNX转换脚本"""
    script_content = '''#!/usr/bin/env python3
# TensorRT转ONNX脚本

import tensorrt as trt
import numpy as np

def convert_tensorrt_to_onnx(engine_path, onnx_path):
    """将TensorRT引擎转换为ONNX (这通常是不直接支持的)"""
    print("❌ 直接从TensorRT引擎转换为ONNX不被支持")
    print("💡 建议使用原始PyTorch/ONNX模型重新导出")
    
    # 如果你有原始的PyTorch模型，可以这样转换:
    # import torch
    # model = torch.load('yolo11n.pt')
    # torch.onnx.export(model, dummy_input, 'yolo11n.onnx')

if __name__ == "__main__":
    print("请使用原始模型重新导出为ONNX格式")
'''
    
    with open('/home/wuyin/ros_ws/src/yolov5-ukf-imm/convert_to_onnx.py', 'w') as f:
        f.write(script_content)
    
    print("📝 已创建ONNX转换脚本: convert_to_onnx.py")

def main():
    parser = argparse.ArgumentParser(description='Jetson Nano摄像头推理')
    parser.add_argument('--model', type=str, 
                       default='/home/wuyin/ros_ws/src/yolov5-ukf-imm/weights/yolo11n.engine',
                       help='模型文件路径 (.onnx, .engine, .pb等)')
    parser.add_argument('--camera', type=int, default=0, help='摄像头ID')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='置信度阈值')
    parser.add_argument('--nms-thres', type=float, default=0.45, help='NMS阈值')
    parser.add_argument('--input-size', type=int, default=640, help='输入尺寸')
    parser.add_argument('--fps-limit', type=int, default=30, help='FPS限制')
    
    args = parser.parse_args()
    
    print("🚀 Jetson Nano摄像头推理系统")
    print(f"📂 模型文件: {args.model}")
    print(f"📷 摄像头ID: {args.camera}")
    print(f"🎯 置信度阈值: {args.conf_thres}")
    print(f"🖼️  输入尺寸: {args.input_size}x{args.input_size}")
    
    # 检查模型文件
    if not os.path.exists(args.model):
        print(f"❌ 模型文件不存在: {args.model}")
        return
    
    # 检查是否为TensorRT引擎文件
    if args.model.endswith('.engine'):
        print("⚠️  检测到TensorRT引擎文件")
        print("❌ OpenCV DNN不直接支持TensorRT引擎")
        print("\n💡 解决方案:")
        print("1. 使用ONNX格式模型 (推荐)")
        print("2. 运行环境配置脚本: bash setup_tensorrt_env.sh")
        print("3. 或使用高级推理脚本: python3 camera_detect_advanced.py")
        
        create_onnx_conversion_script()
        return
    
    try:
        # 初始化推理引擎
        print("⏳ 初始化推理引擎...")
        inference = JetsonCameraInference(
            args.model,
            input_size=(args.input_size, args.input_size),
            conf_threshold=args.conf_thres,
            nms_threshold=args.nms_thres
        )
        print("✅ 推理引擎初始化成功")
        
    except Exception as e:
        print(f"❌ 推理引擎初始化失败: {e}")
        return
    
    # 打开摄像头
    print(f"📷 打开摄像头 {args.camera}...")
    cap = cv2.VideoCapture(args.camera)
    
    if not cap.isOpened():
        print(f"❌ 无法打开摄像头 {args.camera}")
        return
    
    # 摄像头设置
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    cap.set(cv2.CAP_PROP_FPS, args.fps_limit)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
    
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    
    print(f"📸 摄像头配置: {width}x{height} @ {fps}FPS")
    print("\n🎮 控制说明:")
    print("  - 按 'q' 键退出")
    print("  - 按 's' 键截图")
    print("  - 按 'f' 键切换FPS显示")
    print("  - 按 'i' 键显示信息")
    print("🎉 开始实时推理!\n")
    
    # 性能统计
    frame_count = 0
    start_time = time.time()
    show_fps = True
    inference_times = []
    
    try:
        while True:
            frame_start = time.time()
            
            # 读取帧
            ret, frame = cap.read()
            if not ret:
                print("❌ 无法读取摄像头帧")
                break
            
            frame_count += 1
            
            # 推理
            inference_start = time.time()
            detections = inference.predict(frame)
            inference_time = (time.time() - inference_start) * 1000
            inference_times.append(inference_time)
            
            # 绘制结果
            frame = inference.draw_detections(frame, detections)
            
            # 显示信息
            info_text = f"检测: {len(detections)}个目标"
            cv2.putText(frame, info_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            
            # FPS和性能信息
            if show_fps:
                current_time = time.time()
                if frame_count % 30 == 0:  # 每30帧更新一次
                    elapsed = current_time - start_time
                    fps = frame_count / elapsed if elapsed > 0 else 0
                    avg_inference = np.mean(inference_times[-30:]) if inference_times else 0
                    
                    fps_text = f"FPS: {fps:.1f} | 推理: {avg_inference:.1f}ms"
                    cv2.putText(frame, fps_text, (10, height - 20), 
                               cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
            
            # 显示帧
            cv2.imshow('Jetson Camera Inference', frame)
            
            # 处理按键
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                print("👋 用户退出")
                break
            elif key == ord('s'):
                screenshot_name = f"jetson_inference_{int(time.time())}.jpg"
                cv2.imwrite(screenshot_name, frame)
                print(f"📸 截图保存: {screenshot_name}")
            elif key == ord('f'):
                show_fps = not show_fps
                print(f"🔄 FPS显示: {'开启' if show_fps else '关闭'}")
            elif key == ord('i'):
                # 显示详细信息
                total_time = time.time() - start_time
                avg_fps = frame_count / total_time if total_time > 0 else 0
                avg_inference = np.mean(inference_times) if inference_times else 0
                
                print(f"\n📊 性能统计:")
                print(f"运行时间: {total_time:.1f}s")
                print(f"处理帧数: {frame_count}")
                print(f"平均FPS: {avg_fps:.2f}")
                print(f"平均推理时间: {avg_inference:.2f}ms")
                print(f"检测到目标: {len(detections)}")
                print("==================\n")
    
    except KeyboardInterrupt:
        print("\n⏹️  收到中断信号")
    
    except Exception as e:
        print(f"\n❌ 运行错误: {e}")
    
    finally:
        # 清理资源
        total_time = time.time() - start_time
        avg_fps = frame_count / total_time if total_time > 0 else 0
        avg_inference = np.mean(inference_times) if inference_times else 0
        
        print(f"\n📊 最终统计:")
        print(f"总运行时间: {total_time:.1f}s")
        print(f"处理帧数: {frame_count}")
        print(f"平均FPS: {avg_fps:.2f}")
        print(f"平均推理时间: {avg_inference:.2f}ms")
        
        cap.release()
        cv2.destroyAllWindows()
        print("🎉 程序结束")

if __name__ == "__main__":
    main()