# -*- coding: utf-8 -*-
import os
import cv2
import numpy as np
from rknn.api import RKNN

# ==============================================================================
# --- 用户配置区 ---
# ==============================================================================
# 模式选择:
# 'perf'            -> 性能与内存评估 (对应文档 c, d)，启用详细的逐层分析
# 'accuracy'        -> 精度分析 (对比RKNN与ONNX模型的输出差异, 对应文档 b)
# 'inference'       -> 单图推理 (对一张图片进行检测并可视化结果, 对应文档 a)
# 'batch_inference' -> 批量推理 (对数据集中的多张图片进行推理并保存结果)
MODE = 'perf'

# --- 路径配置 ---
# RKNN 模型文件路径
RKNN_MODEL = './yolov8s/best8s.rknn'
# ONNX 模型文件路径 (仅在 'accuracy' 模式下需要)
ONNX_MODEL = './yolov8s/best8s_split.onnx' # 注意：这里使用分割后的ONNX
# 量化数据集路径 (精度分析和模型转换都需要)
QUANTIZATION_DATASET_PATH = './dataset.txt'
# 测试图片路径 (仅在 'inference' 模式下需要)
TEST_IMAGE_PATH = './test_image.jpg'
# 批量推理结果输出目录
BATCH_OUTPUT_DIR = './batch_results'
# 批量推理的图片数量
BATCH_INFERENCE_COUNT = 20

# --- 硬件与模型配置 ---
TARGET_PLATFORM = 'rk3576'
DEVICE_ID = None  # 如果是USB连接，保持为None；如果是IP连接，填写IP地址
IMG_SIZE = 640
CLASSES = ("hand-raising", "read", "write")
NUM_CLASSES = len(CLASSES)

# --- 推理后处理参数 ---
CONF_THRESHOLD = 0.3
NMS_THRESHOLD = 0.45
# ==============================================================================
# --- 配置区结束 ---
# ==============================================================================


# --- 辅助函数 ---

def letterbox(im, new_shape=(640, 640), color=(114, 114, 114)):
    """将图像缩放并填充到指定尺寸 (Resize and pad image while meeting stride-multiple constraints)"""
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, ratio, (dw, dh)

def sigmoid(x):
    """Sigmoid activation function."""
    return 1 / (1 + np.exp(-x))


def postprocess_yolov8_fixed(outputs, img_raw, ratio, dwdh):
    """
    最终版后处理函数, 动态识别并处理分离后的box和class两个输出张量。
    """
    if len(outputs) != 2:
        print(f"!!! 错误: 后处理函数期望2个输出张量, 但收到了 {len(outputs)} 个。请确保您使用的是由新的转换脚本生成的模型。")
        return img_raw
        
    dwdh = np.array(dwdh * 2, dtype=np.float32)

    # --- 动态识别 box 和 class 张量 ---
    box_tensor, cls_tensor = (outputs[0], outputs[1]) if outputs[0].shape[1] == 4 else (outputs[1], outputs[0])
    
    print(f"--- [调试日志] 已识别 Box tensor shape: {box_tensor.shape}, max value: {np.max(box_tensor)}")
    print(f"--- [调试日志] 已识别 Class tensor shape: {cls_tensor.shape}, max value: {np.max(cls_tensor)}")
    
    # --- 【最终修正】移除冗余的Sigmoid ---
    # 分别获取 box 和 class 的数据, 并转置为 (8400, channels)
    boxes_xywh = box_tensor.transpose((0, 2, 1))[0]
    scores = cls_tensor.transpose((0, 2, 1))[0]

    # scores 已经是概率值，无需再应用 sigmoid
    # scores = sigmoid(scores) 
    
    # 找到每个预测框中概率最高的类别及其概率值
    class_ids = np.argmax(scores, axis=1)
    conf_scores = np.max(scores, axis=1)

    print(f"--- [调试日志] 解析后的最大置信度分数为: {np.max(conf_scores):.4f}")

    # 坐标转换: (center_x, center_y, width, height) -> (x1, y1, x2, y2)
    cx, cy, w, h = boxes_xywh[:, 0], boxes_xywh[:, 1], boxes_xywh[:, 2], boxes_xywh[:, 3]
    x1 = cx - w / 2
    y1 = cy - h / 2
    x2 = cx + w / 2
    y2 = cy + h / 2
    boxes_xyxy = np.stack((x1, y1, x2, y2), axis=1)

    # 第一次过滤：过滤掉所有分数低于置信度阈值的框
    mask = conf_scores >= CONF_THRESHOLD
    if not np.any(mask):
        print("--> 根据置信度阈值未检测到目标。")
        return img_raw
        
    boxes_xyxy = boxes_xyxy[mask]
    conf_scores = conf_scores[mask]
    class_ids = class_ids[mask]

    print(f"--- [后处理日志] 应用置信度阈值后，剩下 {len(boxes_xyxy)} 个候选框。")

    # 第二次过滤：使用标准的NMS算法
    boxes_for_nms = np.stack([boxes_xyxy[:, 0], boxes_xyxy[:, 1], boxes_xyxy[:, 2] - boxes_xyxy[:, 0], boxes_xyxy[:, 3] - boxes_xyxy[:, 1]], axis=1)
    
    print(f"--> 正在对 {len(boxes_for_nms)} 个框执行 NMS...")
    indices = cv2.dnn.NMSBoxesBatched(boxes_for_nms.tolist(), conf_scores.tolist(), class_ids.tolist(), CONF_THRESHOLD, NMS_THRESHOLD)

    if len(indices) == 0:
        print("--> NMS后未检测到目标。")
        return img_raw
        
    # 根据NMS结果，筛选最终的检测结果
    final_boxes = boxes_xyxy[indices]
    final_scores = conf_scores[indices]
    final_class_ids = class_ids[indices]

    # 将坐标映射回原始图像
    final_boxes -= dwdh
    final_boxes /= ratio[0]

    # 绘制最终结果
    print(f"--> NMS后最终检测到 {len(final_boxes)} 个目标。")
    for i in range(len(final_boxes)):
        box = final_boxes[i]
        x1, y1, x2, y2 = [int(coord) for coord in box]
        class_id = final_class_ids[i]
        score = final_scores[i]

        # 确保坐标在图像范围内，并且是有效的框
        x1, y1 = max(0, x1), max(0, y1)
        x2, y2 = min(img_raw.shape[1], x2), min(img_raw.shape[0], y2)
        
        if x1 >= x2 or y1 >= y2:
            print(f"  -> 过滤掉无效框: ({x1}, {y1}, {x2}, {y2})")
            continue

        # 详细打印检测结果
        print(f"  -> {CLASSES[class_id]} @ ({x1}, {y1}, {x2}, {y2}) score={score:.2f}")

        # 绘制
        color = (0, 255, 0)
        cv2.rectangle(img_raw, (x1, y1), (x2, y2), color, 2)
        label = f"{CLASSES[class_id]}: {score:.2f}"
        
        # 确保标签文本不会画出图像顶部
        label_y = y1 - 10 if y1 - 10 > 10 else y1 + 20
        cv2.putText(img_raw, label, (x1, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)

    return img_raw


def run_accuracy_analysis():
    """执行精度分析的独立函数"""
    print("\n--> 开始精度评估 (b)...")
    if not os.path.exists(ONNX_MODEL) or not os.path.exists(QUANTIZATION_DATASET_PATH):
        print("错误: 'accuracy' 模式需要 ONNX_MODEL 和 QUANTIZATION_DATASET_PATH 文件。"); exit()

    rknn = RKNN(verbose=True)

    # 1. 配置
    print("--- 1. 配置 RKNN Toolkit...")
    rknn.config(
        mean_values=[[0, 0, 0]],
        std_values=[[255, 255, 255]],
        target_platform=TARGET_PLATFORM
    )

    # 2. 加载 ONNX 模型
    print("--- 2. 加载 ONNX 模型...")
    ret = rknn.load_onnx(model=ONNX_MODEL)
    if ret != 0:
        print("!!! 加载 ONNX 模型失败!"); rknn.release(); return

    # 3. 构建模型 (用于精度分析)
    print("--- 3. 构建模型...")
    ret = rknn.build(do_quantization=True, dataset=QUANTIZATION_DATASET_PATH)
    if ret != 0:
        print("!!! 构建模型失败!"); rknn.release(); return

    # 4. 读取数据集文件为列表
    with open(QUANTIZATION_DATASET_PATH, 'r') as f:
        image_paths = [line.strip() for line in f.readlines()]

    # 5. 执行精度分析
    print(f"--- 4. 正在使用 {len(image_paths)} 张图片进行精度对比...")
    # accuracy_analysis 函数会自动处理连接设备
    rknn.accuracy_analysis(inputs=image_paths, target=TARGET_PLATFORM, device_id=DEVICE_ID)

    print("精度分析报告已生成在 'snapshots' 目录。")
    rknn.release()


# --- 主函数 ---
if __name__ == '__main__':

    # 精度分析模式有独立的执行流程
    if MODE == 'accuracy':
        run_accuracy_analysis()
        exit()

    # --- perf, inference, batch_inference 模式的执行流程 ---
    rknn = RKNN(verbose=False)

    print(f"正在加载 RKNN 模型: {RKNN_MODEL}...")
    if not os.path.exists(RKNN_MODEL):
        print("错误: RKNN 模型文件不存在!"); exit()
    ret = rknn.load_rknn(RKNN_MODEL)
    if ret != 0:
        print("加载 RKNN 模型失败!"); rknn.release(); exit()
    print("加载成功。")

    is_perf_debug = (MODE == 'perf')
    if is_perf_debug:
        print(f"\n--> 正在连接设备 (ID: {DEVICE_ID or '默认USB设备'})... (性能评估模式，已启用详细调试)")
    else:
        print(f"\n--> 正在连接设备 (ID: {DEVICE_ID or '默认USB设备'})...")

    ret = rknn.init_runtime(target=TARGET_PLATFORM,
                            device_id=DEVICE_ID,
                            perf_debug=is_perf_debug,
                            core_mask=RKNN.NPU_CORE_ALL)
    if ret != 0:
        print("初始化运行时环境失败! 请检查USB连接和'rknn_server'服务。"); rknn.release(); exit()
    print("设备连接成功。")

    if MODE == 'perf':
        # c. 性能评估
        print("\n--> 开始性能评估 (c)...")
        rknn.eval_perf(is_print=True)
        # d. 内存评估
        print("\n--> 开始内存评估 (d)...")
        rknn.eval_memory(is_print=True)

    elif MODE == 'inference':
        # a. 连板调试 (单图推理)
        print("\n--> 开始单图推理验证 (a)...")
        if not os.path.exists(TEST_IMAGE_PATH):
            print(f"错误: 测试图片不存在于 '{TEST_IMAGE_PATH}'"); rknn.release(); exit()

        img_raw = cv2.imread(TEST_IMAGE_PATH)
        if img_raw is None:
            print("错误: 无法读取图片。"); rknn.release(); exit()
        
        # 复制一份原始图像用于绘制，避免在原始数据上直接操作
        img_to_draw = img_raw.copy()

        img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
        img_resized, ratio, dwdh = letterbox(img_rgb, new_shape=(IMG_SIZE, IMG_SIZE))

        print("--> 正在推理...")
        # 明确指定输入格式为 nhwc, 与 letterbox 保持一致
        outputs = rknn.inference(inputs=[img_resized], data_format='nhwc')

        # 增加详细日志: 打印原始输出信息
        print("--- [调试日志] NPU 输出信息 ---")
        for i, out in enumerate(outputs):
            print(f"  - Output {i}: shape={out.shape}, dtype={out.dtype}, min={np.min(out):.4f}, max={np.max(out):.4f}")

        print("\n--> 正在后处理...")
        # 调用修正后的后处理函数
        result_img = postprocess_yolov8_fixed(outputs, img_to_draw, ratio, dwdh)

        output_path = "result.jpg"
        cv2.imwrite(output_path, result_img)
        print(f"\n推理完成！结果已保存到 '{output_path}'")

    elif MODE == 'batch_inference':
      print(f"\n--> 开始批量推理 ({BATCH_INFERENCE_COUNT} 张图片)...")
      if not os.path.exists(QUANTIZATION_DATASET_PATH):
          print(f"错误: 数据集文件不存在于 '{QUANTIZATION_DATASET_PATH}'"); rknn.release(); exit()

      if not os.path.exists(BATCH_OUTPUT_DIR):
          os.makedirs(BATCH_OUTPUT_DIR)
          print(f"创建输出目录: {BATCH_OUTPUT_DIR}")

      with open(QUANTIZATION_DATASET_PATH, 'r') as f:
          image_paths = [line.strip() for line in f.readlines()]

      for i, img_path in enumerate(image_paths[:BATCH_INFERENCE_COUNT]):
          print(f"\n--- 正在处理第 {i+1}/{BATCH_INFERENCE_COUNT} 张图片: {img_path} ---")
          img_raw = cv2.imread(img_path)
          if img_raw is None:
              print(f"警告: 无法读取图片 {img_path}, 跳过。")
              continue
          
          img_to_draw = img_raw.copy()
          img_rgb = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
          img_resized, ratio, dwdh = letterbox(img_rgb, new_shape=(IMG_SIZE, IMG_SIZE))

          outputs = rknn.inference(inputs=[img_resized], data_format='nhwc')

          result_img = postprocess_yolov8_fixed(outputs, img_to_draw, ratio, dwdh)

          output_filename = f"result_{os.path.basename(img_path)}"
          output_path = os.path.join(BATCH_OUTPUT_DIR, output_filename)
          cv2.imwrite(output_path, result_img)
          print(f"结果已保存到 '{output_path}'")
      print("\n批量推理完成！")

    else:
        print(f"错误: 无效的 MODE '{MODE}'。请选择 'perf', 'accuracy', 'inference'或 'batch_inference'。")

    rknn.release()


