#-*-coding:utf-8-*-
# date:2021-03-16
# Author: Eric.Lee
# function: yolo v5 video inference

import warnings
warnings.filterwarnings("ignore")
import argparse
from utils.datasets import *
from utils.utils import *
import torch
import numpy as np
import os
import time
from tqdm import tqdm
from tqdm import trange

from utils.wiky_datasets_ver3 import *


def F1_Score(precision,recall, beta = 1.0):

    return (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-8)

def detect(data_path = None, distance = None, save_path = None):
    # 解析配置参数
    source, weights, half, imgsz = \
        opt.source, opt.weights, opt.half, opt.img_size
    webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
    # 初始化 模型推理硬件
    device = torch_utils.select_device(opt.device)

    # 模型加载初始化：支持单模型（兼容原来 --weights）或双模型（--weights_rgb & --weights_eve）
    model = None
    model_rgb = None
    model_eve = None
    half = half and device.type != 'cpu'  # half precision only supported on CUDA

    if getattr(opt, 'weights_rgb', None) and getattr(opt, 'weights_eve', None):
        # 加载两个模型：rgb 和 event
        print(f"Loading rgb model: {opt.weights_rgb}")
        model_rgb = torch.load(opt.weights_rgb, map_location=device)['model']
        model_rgb.to(device).eval()
        if half:
            model_rgb.half()

        print(f"Loading event model: {opt.weights_eve}")
        model_eve = torch.load(opt.weights_eve, map_location=device)['model']
        model_eve.to(device).eval()
        if half:
            model_eve.half()

        names = model_rgb.names if hasattr(model_rgb, 'names') else model_rgb.modules.names
    else:
        raise ValueError("Please two dual independent models (rgb & event)!!!")

    
    data,info = opt.data,opt.info


    # Run inference
    
    img = torch.zeros((1, 3, imgsz[0], imgsz[1]), device=device)  # init img

    # warm-up: 单模型或双模型分别运行一次
    if model is not None:
        _ = model(img.half() if half else img.float()) if device.type != 'cpu' else None
        print(" You choose single path network.\n")
    else:
        # 双模型：rgb 使用 img，event 使用 img (占位) 或后续会使用 eve
        _ = model_rgb(img.half() if half else img.float()) if device.type != 'cpu' else None
        _ = model_eve(img.half() if half else img.float()) if device.type != 'cpu' else None
        print(" You choose dual independent models (rgb & event).\n")

    if info == "test":
  
        dataset  = BigHandDetectionDataset_Dual_2Label(path = data_path,
                                                is_train= False,
                                        event_subdir = opt.event_subdir,
                                        event_needed = opt.eve_bin_needed,
                                        train_step = 1,
                                        distance = distance)
    else:

        raise ValueError("Please select test !")

    
    if not os.path.exists(save_path):
            os.makedirs(save_path, 0o777,True)

    
    # 新增：打开txt文件用于写入检测框
    result_txt_path = os.path.join(save_path, f"detect_results_{distance}.txt")
    result_txt = open(result_txt_path, "w")

    start = (1280-720)//2
    end = start + 720

    # 统计变量
    all_pred_num = 0
    all_gt_num = 0
    tp_num = 0
    fp_num = 0
    fn_num = 0
    center_errors = []

    
    # 检测框的宽度
    box_widths = []


    from collections import defaultdict
    # 新增：每类别统计
    per_class_tp = defaultdict(int)
    per_class_fp = defaultdict(int)
    per_class_fn = defaultdict(int)
    per_class_center_errors = defaultdict(list)

    # 混淆矩阵初始化：使用 (num_classes + 1) x (num_classes + 1)
    # 最后一行/列为 "no_pred"（预测/GT 无对应项）
    num_classes = len(names)
    bg_idx = num_classes
    confusion_mat = np.zeros((num_classes + 1, num_classes + 1), dtype=int)

    # 双模型融合统计（仅在同时使用 --weights_rgb & --weights_eve 时有效）
    valid_batch_count = 0
    valid_center_errors = []
    valid_ious = []
    valid_details = []

    count = 0
    valid_count = 0
    no_2_target_count = 0
    iou_unvalid_count = 0

    total_time = 0

    average_rgb_confidences = 0
    average_eve_confidences = 0

    for batch in tqdm(dataset):
       

        count+=1


        img, eve, labels, eve_labels,  name = batch

        

        date =  name.split("/")[-3]
        name = name.split("/")[-1].split(".")[0]      

        if info == "test":

            if opt.event_subdir == "event_frame":
            
                eve = eve.float().to(device)
                
                eve = torch.where(eve == 255, torch.tensor(200.0).to(device), eve)
                eve = torch.where(eve == 0, torch.tensor(100.0).to(device), eve)
                eve = torch.where((eve != 200) & (eve != 100), torch.tensor(0.0).to(device), eve)  # eve != 0 and eve != 255  wrong
                eve = eve.cpu()

            elif opt.event_subdir[-10:] != "_count_raw": # "event_img_60fps":
                pass
            
            elif opt.event_subdir[-10:] == "_count_raw": # == "event_img_60fps_count_raw":

                C = opt.event_threshold
                eve = eve.float().to(device) 
                eve = torch.where(eve >= 128 + C, torch.tensor(200.0).to(device), eve)
                eve = torch.where(eve <= 128 - C, torch.tensor(100.0).to(device), eve)
                eve = torch.where((eve != 200) & (eve != 100), torch.tensor(0.0).to(device), eve) 
                eve = eve.cpu()

            else:
                raise Exception("event_subdir error!")

        else:
            raise Exception("you should usetest mode!")

                
        
        # 保存原始图像 可视化测试结果
        if data in ["image","fusion","decision_fusion"]:
            img0 = img.numpy()
        else:
            img0 = eve[-3:,:,:].numpy()
        img0 = img0.transpose(1, 2, 0)  # CHW to HWC
        img0 = img0[:, :, ::-1]  # RGB to BGR
       

        # labels_out = torch.zeros((nL, 6)) (0,class,x,y,w,h)
        # label还原为 原始坐标
        def label_restore(labels):
            if len(labels):
                w = img.shape[-1]
                h = img.shape[-2]
                labels[:, 2] = labels[:, 2] * w
                labels[:, 3] = labels[:, 3] * h
                labels[:, 4] = labels[:, 4] * w
                labels[:, 5] = labels[:, 5] * h
                
                # 转为 (x1,y1,x2,y2)
                labels_out = torch.zeros((len(labels), 6))
                labels_out[:, 0] = labels[:, 0]
                labels_out[:, 1] = labels[:, 1]
                labels_out[:, 2] = labels[:, 2] - labels[:, 4]/2    
                labels_out[:, 3] = labels[:, 3] - labels[:, 5]/2
                labels_out[:, 4] = labels[:, 2] + labels[:, 4]/2
                labels_out[:, 5] = labels[:, 3] + labels[:, 5]/2
            else:
                labels_out = labels
            return labels_out

        img_labels_out = label_restore(labels)
        eve_labels_out = label_restore(eve_labels)

        # 必须有都目标才有合法检测，否则跳过

        if len(img_labels_out) == 0 or len(eve_labels_out) == 0:
            continue

        valid_count+=1



        gt_boxes_1 = img_labels_out[:, 2:6].cpu().numpy() 
        gt_boxes_2 = eve_labels_out[:, 2:6].cpu().numpy()

        # 对应 坐标求平均

        gt_boxes = (gt_boxes_1 + gt_boxes_2) / 2


        gt_classes = img_labels_out[:, 1].cpu().numpy() # if len(img_labels_out) else np.zeros((0, 4))
        all_gt_num += len(gt_boxes)

        pred_boxes = []
        
        pred_classes = []


        img =  img.to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        eve = eve.to(device)
        eve = eve.half() if half else eve.float()  # uint8 to fp16/32
        eve /= 255.0  # 0 - 255 to 0.0 - 1.0
        if eve.ndimension() == 3:
            eve = eve.unsqueeze(0)


        # 模型推理
        t1 = torch_utils.time_synchronized()


        # 双模型独立推理分支：分别对 img 与 eve 运行 model_rgb 与 model_eve，然后进行实验性融合判断
        # 1) forward
        pred_rgb_raw = model_rgb(img)[0]
        pred_eve_raw = model_eve(eve)[0]
        t2 = torch_utils.time_synchronized()
        total_time += (t2 - t1)

        # 2) NMS separately
        pred_rgb = non_max_suppression(pred_rgb_raw, opt.conf_thres, opt.iou_thres,
                                        fast=True, classes=opt.classes, agnostic=opt.agnostic_nms)
        pred_eve = non_max_suppression(pred_eve_raw, opt.conf_thres, opt.iou_thres,
                                        fast=True, classes=opt.classes, agnostic=opt.agnostic_nms)

        # helper iou
        def compute_iou(boxA, boxB):
            # boxes as [x1,y1,x2,y2]
            xa1, ya1, xa2, ya2 = boxA
            xb1, yb1, xb2, yb2 = boxB
            xi1 = max(xa1, xb1)
            yi1 = max(ya1, yb1)
            xi2 = min(xa2, xb2)
            yi2 = min(ya2, yb2)
            inter_w = max(0.0, xi2 - xi1)
            inter_h = max(0.0, yi2 - yi1)
            inter = inter_w * inter_h
            areaA = max(0.0, xa2 - xa1) * max(0.0, ya2 - ya1)
            areaB = max(0.0, xb2 - xb1) * max(0.0, yb2 - yb1)
            union = areaA + areaB - inter + 1e-8
            return inter / union

        # get first (and only) element per batch
        det_rgb = pred_rgb[0] if pred_rgb is not None else None
        det_eve = pred_eve[0] if pred_eve is not None else None

        im0 = img0.copy()

        # scale and convert detections to numpy lists per side
        def extract_top(det_tensor, src_shape):
            if det_tensor is None or len(det_tensor) == 0:
                return None
            det = det_tensor.clone()
            det[:, :4] = scale_coords(src_shape, det[:, :4], im0.shape).round()
            det_np = det.cpu().numpy()
            # choose the detection with max confidence
            best_idx = int(np.argmax(det_np[:, 4]))
            x1, y1, x2, y2, conf, cls = det_np[best_idx]
            return ([float(x1), float(y1), float(x2), float(y2)], float(conf), int(cls))

        out_rgb = extract_top(det_rgb, img.shape[2:])
        out_eve = extract_top(det_eve, eve.shape[2:])

        # write raw outputs to txt for debug
        def format_out(out):
            x1,y1,x2,y2 = int(out[0][0]),int(out[0][1]),int(out[0][2]),int(out[0][3])

            # 四舍五入保留三位小数

            p = round(out[1],3)

            return (x1,y1,x2,y2, p, out[2]) if out is not None else None

        # 两边网络均有一个输出才进行后续融合
        if out_rgb is None or out_eve is None:
            
            # 非双输出跳过模式
            if opt.no_2_target_skip:
                no_2_target_count += 1
                continue

        ###############################################################################################
        ###############################################################################################

        if out_rgb is not None and out_eve is not None:

            box_rgb, score_rgb, cls_rgb = out_rgb
            box_eve, score_eve, cls_eve = out_eve

            average_eve_confidences += score_eve
            average_rgb_confidences += score_rgb
            # 判断两个预测是否为同一目标：预测框iou>fusion_iou_threshold
            iou_pred = compute_iou(box_rgb, box_eve)

            result_txt.write(f"{date}-{name}: rgb={format_out(out_rgb)}, eve={format_out(out_eve)},IOU= {iou_pred:0.3f}")
            
            if iou_pred >= opt.fusion_iou:
                # 同一目标：融合预测
                # 概率取二者概率最大值对应的class
                if score_rgb >= score_eve:
                    fused_class = cls_rgb
                else:
                    fused_class = cls_eve

                # 框的位置按概率加权求和
                sum_scores = score_rgb + score_eve + 1e-8
                fused_box = [ (box_rgb[i] * score_rgb + box_eve[i] * score_eve) / sum_scores for i in range(4) ]
                fused_score = max(score_rgb, score_eve)

                box_widths.append(abs(int(fused_box[0])- int(fused_box[2]))) #绝对值

                pred_boxes.append(fused_box)

                # GT 融合（假设概率相同即求平均）
                fused_gt = gt_boxes[0].tolist()

                # 计算融合预测与融合GT的IoU & 中心误差
                fused_iou = compute_iou(fused_box, fused_gt)
                gx = (fused_gt[0] + fused_gt[2]) / 2.0
                gy = (fused_gt[1] + fused_gt[3]) / 2.0
                px = (fused_box[0] + fused_box[2]) / 2.0
                py = (fused_box[1] + fused_box[3]) / 2.0
                center_err = float(np.sqrt((gx - px) ** 2 + (gy - py) ** 2))

                valid_batch_count += 1
                valid_center_errors.append(center_err)
                valid_ious.append(fused_iou)
                plot_one_box(fused_box, im0, label=f'fused cls:{fused_class} {fused_score:.2f}', color=(0,255,0), line_thickness=3)
                plot_one_box(fused_gt, im0, label=f"gt:"+str(int(gt_classes[0])), color=(0,0,255), line_thickness=3, label_position='bottom_right')

                # 保存无水印图片（仅可选）
                if opt.save_img and count % 10 == 0:
                    no_watermark_dir = os.path.join(save_path, f"no_watermark_{distance}")
                    if not os.path.exists(no_watermark_dir):
                        os.makedirs(no_watermark_dir, 0o777, True)
                    frame_name = f"{date}-{name}_id_{count:04d}.jpg"
                    cv2.imwrite(os.path.join(no_watermark_dir, frame_name), im0)

                pred_classes.append(fused_class)

                pred_boxes = np.array(pred_boxes)

                result_txt.write(f" OUT: {fused_class}\n")
            else:
                iou_unvalid_count+=1

                result_txt.write("\n")
            
        ###############################################################################################
        ###############################################################################################

        elif out_rgb is not None and out_eve is None:

            box_rgb, score_rgb, cls_rgb = out_rgb
   
            average_rgb_confidences += score_rgb


            result_txt.write(f"{date}-{name}: rgb={format_out(out_rgb)},")
            
            fused_class = cls_rgb
            fused_box = [box_rgb[i] for i in range(4)]
            fused_score = score_rgb

            box_widths.append(abs(int(fused_box[0])- int(fused_box[2]))) #绝对值
            pred_boxes.append(fused_box)

            fused_gt = gt_boxes_1[0].tolist()

            # 计算融合预测与(伪)融合GT的IoU & 中心误差
            fused_iou = compute_iou(fused_box, fused_gt)
            gx = (fused_gt[0] + fused_gt[2]) / 2.0
            gy = (fused_gt[1] + fused_gt[3]) / 2.0
            px = (fused_box[0] + fused_box[2]) / 2.0
            py = (fused_box[1] + fused_box[3]) / 2.0
            center_err = float(np.sqrt((gx - px) ** 2 + (gy - py) ** 2))

            valid_batch_count += 1
            valid_center_errors.append(center_err)
            valid_ious.append(fused_iou)
            plot_one_box(fused_box, im0, label=f'img cls:{fused_class} {fused_score:.2f}', color=(0,255,0), line_thickness=3)
            plot_one_box(fused_gt, im0, label=f"gt:"+str(int(gt_classes[0])), color=(0,0,255), line_thickness=3, label_position='bottom_right')

            # 保存无水印图片（仅可选）
            if opt.save_img and count % 10 == 0:
                no_watermark_dir = os.path.join(save_path, f"no_watermark_{distance}")
                if not os.path.exists(no_watermark_dir):
                    os.makedirs(no_watermark_dir, 0o777, True)
                frame_name = f"{date}-{name}_id_{count:04d}.jpg"
                cv2.imwrite(os.path.join(no_watermark_dir, frame_name), im0)

            pred_classes.append(fused_class)

            pred_boxes = np.array(pred_boxes)

            result_txt.write(f" OUT: {fused_class}\n")

        ###############################################################################################
        ###############################################################################################

        elif out_rgb is None and out_eve is not None:

            box_eve, score_eve, cls_eve = out_eve
   
            average_rgb_confidences += score_eve


            result_txt.write(f"{date}-{name}: eve={format_out(out_eve)},")
            
            fused_class = cls_eve
            fused_box = [box_eve[i] for i in range(4)]
            fused_score = score_eve

            box_widths.append(abs(int(fused_box[0])- int(fused_box[2]))) #绝对值
            pred_boxes.append(fused_box)

            fused_gt = gt_boxes_2[0].tolist()

            # 计算融合预测与(伪)融合GT的IoU & 中心误差
            fused_iou = compute_iou(fused_box, fused_gt)
            gx = (fused_gt[0] + fused_gt[2]) / 2.0
            gy = (fused_gt[1] + fused_gt[3]) / 2.0
            px = (fused_box[0] + fused_box[2]) / 2.0
            py = (fused_box[1] + fused_box[3]) / 2.0
            center_err = float(np.sqrt((gx - px) ** 2 + (gy - py) ** 2))

            valid_batch_count += 1
            valid_center_errors.append(center_err)
            valid_ious.append(fused_iou)
            plot_one_box(fused_box, im0, label=f'eve cls:{fused_class} {fused_score:.2f}', color=(0,255,0), line_thickness=3)
            plot_one_box(fused_gt, im0, label=f"gt:"+str(int(gt_classes[0])), color=(0,0,255), line_thickness=3, label_position='bottom_right')

            # 保存无水印图片（仅可选）
            if opt.save_img and count % 10 == 0:
                no_watermark_dir = os.path.join(save_path, f"no_watermark_{distance}")
                if not os.path.exists(no_watermark_dir):
                    os.makedirs(no_watermark_dir, 0o777, True)
                frame_name = f"{date}-{name}_id_{count:04d}.jpg"
                cv2.imwrite(os.path.join(no_watermark_dir, frame_name), im0)

            pred_classes.append(fused_class)

            pred_boxes = np.array(pred_boxes)

            result_txt.write(f" OUT: {fused_class}\n")

        else:
            iou_unvalid_count+=1       
        ###############################################################################################
        ###############################################################################################

        pred_classes = np.array(pred_classes)

        # 支持两种匹配模式：'iou'（基于 IoU 的贪心匹配）或 'direct'（按类别直接一对一匹配）
        matched_gt = set()
        matched_pred = set()

        def iou_matrix(boxes1, boxes2):
            m = len(boxes1)
            n = len(boxes2)
            if m == 0 or n == 0:
                return np.zeros((m, n))
            ious = np.zeros((m, n))
            for i in range(m):
                for j in range(n):
                    ious[i, j] = compute_iou(boxes1[i], boxes2[j])
            return ious

        if opt.cm_match_mode == 'iou':
            if len(gt_boxes) and len(pred_boxes):
                ious = iou_matrix(gt_boxes, pred_boxes)
                while True:
                    if ious.size == 0:
                        break
                    idx = np.unravel_index(np.argmax(ious), ious.shape)
                    i, j = int(idx[0]), int(idx[1])
                    max_iou = ious[i, j]
                    if max_iou < opt.iou_thres:
                        break
                    # 记录匹配
                    matched_gt.add(i)
                    matched_pred.add(j)
                    gt_c = int(gt_classes[i])
                    pred_c = int(pred_classes[j])
                    confusion_mat[gt_c, pred_c] += 1
                    tp_num += 1
                    per_class_tp[gt_c] += 1
                    # 中心误差
                    gx = (gt_boxes[i][0] + gt_boxes[i][2]) / 2
                    gy = (gt_boxes[i][1] + gt_boxes[i][3]) / 2
                    px = (pred_boxes[j][0] + pred_boxes[j][2]) / 2
                    py = (pred_boxes[j][1] + pred_boxes[j][3]) / 2
                    error = np.sqrt((gx - px) ** 2 + (gy - py) ** 2)
                    center_errors.append(error)
                    per_class_center_errors[gt_c].append(error)
                    ious[i, :] = -1
                    ious[:, j] = -1
        else:
            raise NotImplementedError

        # 未匹配的 GT -> FN
        for i in range(len(gt_boxes)):
            if i not in matched_gt:
                gt_c = int(gt_classes[i])
                confusion_mat[gt_c, bg_idx] += 1 
                fn_num += 1
                per_class_fn[gt_c] += 1

        # 未匹配的预测 -> FP
        for j in range(len(pred_boxes)):
            if j not in matched_pred:
                pred_c = int(pred_classes[j])
                confusion_mat[bg_idx, pred_c] += 1
                fp_num += 1
                per_class_fp[pred_c] += 1


    # 统计总体指标
    # precision = tp_num / (tp_num + fp_num + 1e-8)
    # recall = tp_num / (tp_num + fn_num + 1e-8)
    # center_errors = np.array(center_errors)
    # mean_error = center_errors.mean() if len(center_errors) else 0
    # std_error = center_errors.std() if len(center_errors) else 0

    # 双模型实验有效批次统计输出
    average_eve_confidences /= (valid_count - no_2_target_count)
    average_rgb_confidences /= (valid_count - no_2_target_count)

    s = f"\n\naverage_rgb_confidences: {average_rgb_confidences:.4f}, average_eve_confidences: {average_eve_confidences:.4f}."
    print(s)
    result_txt.write(s + "\n")

    s = f"\n\nTotal ; Valid =  no2X +  iouX +  True (ps: fusion_iou= {opt.fusion_iou})" + \
        f"\n{count:6d};{valid_count:6d}={no_2_target_count:6d}+{iou_unvalid_count:6d}+{valid_batch_count:6d} " + \
        f"\n\nTrue/(Valid - no2X) = {valid_batch_count/(valid_count - no_2_target_count):.4f}\n"

    print(s)
    result_txt.write(s + "\n")

    if valid_batch_count:
        mean_fused_iou = float(np.mean(valid_ious)) if len(valid_ious) else 0
        mean_fused_center = float(np.mean(valid_center_errors)) if len(valid_center_errors) else 0
        s = f"Valid fusion: mean IoU: {mean_fused_iou:.4f}, mean center error: {mean_fused_center:.2f}"
        print(s)
        result_txt.write(s + "\n")

    s = f"Total len :{count}"
    print(s)
    result_txt.write("\n" +s +"\n" )

    s = f"Average time :{total_time/count:.4f}s"
    print(s)
    result_txt.write("\n" +s +"\n" )



    result_txt.write("\n\n*******************\n*******************\n" )

    # ----- 打印并保存混淆矩阵（计数 + 按 GT 行归一化的百分比），使用可配置宽度对齐 -----
    labels = [names[i] for i in range(num_classes)] + ['no_pred']

    # 百分比按 GT 行归一化（若某行为 0，则保持 0）
    row_sums = confusion_mat.sum(axis=1, keepdims=True).astype(float)
    percent = np.divide(confusion_mat, row_sums, where=(row_sums != 0)) * 100

    # 新增 按列（预测）的归一化百分比
    col_sums = confusion_mat.sum(axis=0, keepdims=True).astype(float)
    percent_col = np.divide(confusion_mat, col_sums, where=(col_sums != 0)).T * 100

    cell_w = getattr(opt, 'cm_cell_width', 10)
    def fmt_label(s):
        return f"{s:>{cell_w}}"

    header = fmt_label('') + ''.join(fmt_label(lbl) for lbl in labels)
    print('\nConfusion matrix (counts):')
    print(header)
    result_txt.write('\nConfusion matrix (counts):\n')
    result_txt.write(header + '\n')
    for i, lbl in enumerate(labels):
        counts_row = ''.join(f"{int(confusion_mat[i, j]):>{cell_w}}" for j in range(confusion_mat.shape[1]))
        line = f"{fmt_label(lbl)}{counts_row}"
        print(line)
        result_txt.write(line + '\n')

    print('\nConfusion matrix (% per GT row →):')
    print(header)
    result_txt.write('\nConfusion matrix (% per GT row):\n')
    result_txt.write(header + '\n')
    for i, lbl in enumerate(labels):
        perc_row = ''.join(f"{percent[i, j]:>{cell_w-1}.1f}%" for j in range(confusion_mat.shape[1]))
        line = f"{fmt_label(lbl)}{perc_row}"
        print(line)
        result_txt.write(line + '\n')

    print('\nConfusion matrix (% per Pre column ↓):')
    print(header)
    result_txt.write('\nConfusion matrix (% per Pre column):\n')
    result_txt.write(header + '\n')
    for i, lbl in enumerate(labels):
        perc_row = ''.join(f"{percent_col[j,i]:>{cell_w-1}.1f}%" for j in range(confusion_mat.shape[1]))
        line = f"{fmt_label(lbl)}{perc_row}"
        print(line)
        result_txt.write(line + '\n')


    tp_total, fp_total,fn_total,f1_s_total = 0,0,0,0
    for cls_id in range(len(confusion_mat)-1):

        if cls_id == 0:
            result_txt.write("\n\n*******************\n*******************\n" )

        tp_ = int(confusion_mat[cls_id, cls_id])
        tp_total += tp_

        fn_ = int(confusion_mat[cls_id, -1])
        fn_total += fn_
        # fp 是 confusion_mat[:, cls_id]的和 减去 tp_
        fp_ = int(confusion_mat[:, cls_id].sum()) - tp_
        fp_total += fp_

        precision_c = tp_ / (tp_ + fp_ + 1e-8)
        recall_c = tp_ / (tp_ + fn_ + 1e-8)
        errors = np.array(per_class_center_errors[cls_id])
        mean_e = errors.mean() if len(errors) else 0
        std_e = errors.std() if len(errors) else 0

        f1_s_ = F1_Score(precision_c,recall_c)
        f1_s_total += f1_s_

        s = f"Class {cls_id}: PRE {precision_c:.4f}, REC {recall_c:.4f}, F1S {f1_s_:.4f} Center error mean {mean_e:.2f}, std {std_e:.2f},(TP:{tp_}, FP:{fp_}, FN:{fn_})"
        print(s)
        result_txt.write(s +"\n" )
    

    result_txt.write("\n\n*******************\n*******************\n" )


    # 统计总体指标
    precision = tp_total / (tp_total + fp_total + 1e-8)
    recall = tp_total / (tp_total + fn_total + 1e-8)

    micro_f1_score= F1_Score(precision,recall)
    center_errors = np.array(center_errors)
    mean_error = center_errors.mean() if len(center_errors) else 0
    std_error = center_errors.std() if len(center_errors) else 0


    s = f"PRE: {precision:.4f}, REC: {recall:.4f} F1_Score_Micro: {micro_f1_score:.4f} (F1_Score_Macro: {f1_s_total/(len(confusion_mat)-1):.4f})"
    print(s)
    result_txt.write(s +"\n" )
    
    s = f"Center error mean: {mean_error:.2f}, std: {std_error:.2f}"
    print(s)
    result_txt.write(s +"\n" )

    # box_width max min mean
    box_width = np.array(box_widths)
    mean_box_width, max_box_width, min_box_with= box_width.mean() , box_width.max() , box_width.min()
    
    s = f"About the pre_boxs: MAX {max_box_width:.0f} MIN {min_box_with:.0f} MEAN {mean_box_width:.2f}"
    print(s)
    result_txt.write(s +"\n" )
  


    result_txt.write("\n\n*******************\n*******************\n" )




    for k, v in opt.__dict__.items():
        if k == 'opt':
            print(v)
            result_txt.write('%s\n' % v)
        else:
            print('%s: %s' % (k, v))
            result_txt.write('%s: %s\n' % (k, v))

    result_txt.close()
   

if __name__ == '__main__':

    weights_rgb = "hand_v5s_1114_2020_img/last.pt" 

    weights_eve = "hand_v5s_1114_2022_eve_120fps/last.pt"

    event_subdir = "event_img_120fps"#"event_img_120fps_count_raw" # "event_frame" gray background 0 128 255 it need to change

    # "event_img_60fps_count_raw" # gray background but count, it need be output as event_img with threshold 

    fusion_iou = 0.33

    no_2_target_skip = False

    
    event_needed = -1 


    save_img = False

    event_threshold =   1

    data_path  = "/home_ssd/lhc/hand_detect_v3_test2"
    distances = ["3m","2m","1p5m"] # ["all"] #

    parser = argparse.ArgumentParser()
    
    parser.add_argument('--no_2_target_skip', type=bool, default=no_2_target_skip, help=' True if you need to skip the no-2 target result')
    parser.add_argument('--data_path', type=str, default=data_path, help='the data path you need to process') 
    parser.add_argument('--distances', type=list, default=distances, help='the distances you need to test')
    parser.add_argument('--save_img', type=bool, default=save_img, help=' True if you need to save img')
    parser.add_argument('--save_path_plus_time', type=bool, default=True, help=' True if you need to add time and type to save path')

    parser.add_argument('--data', type=str, default="decision_fusion", help='  event fusion image')
    parser.add_argument('--info', type=str, default="test", help=' use train or test')
    parser.add_argument('--event_subdir', type=str, default=event_subdir, help=' event source')
    parser.add_argument('--event_threshold', type=int, default=event_threshold, help=' event threshold will be used on the event image raw')
    parser.add_argument('--eve_bin_needed', default= event_needed, help = "-1 means the last bin event will be used")

    parser.add_argument('--fusion_iou', type=float, default=fusion_iou, help='IOU threshold for decision fusion')


    parser.add_argument('--weights', type=str, default=None, help='model.pt path')#model_exp_hand_x/hand_x.pt
    # 支持分别传入两个权重: rgb 和 event 两个单独模型
    parser.add_argument('--weights_rgb', type=str, default=weights_rgb, help='rgb model weight path (optional)')
    parser.add_argument('--weights_eve', type=str, default=weights_eve, help='event model weight path (optional)')
    parser.add_argument('--source', type=str, default=" ", help='source')  # file/folder, 0 for webcam
    parser.add_argument('--img-size', type=int, default=[1280,1280], help='inference size (pixels)')
    
    parser.add_argument('--conf-thres', type=float, default=0.35, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')

    
    parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
    parser.add_argument('--half', default=False, help='half precision FP16 inference')
    parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    
    parser.add_argument('--classes',nargs='+', type=int, help='filter by class')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', default=False, help='augmented inference')
    parser.add_argument('--cm-match-mode', type=str, default='iou', choices=['iou','direct'],
                        help="matching mode for confusion matrix: 'iou' (greedy IoU) or 'direct' (per-class direct matching). Default 'direct' to preserve original behavior")
    parser.add_argument('--cm-cell-width', type=int, default=10,
                        help='cell width for confusion matrix pretty print (default 10)')
    opt = parser.parse_args()
    print(opt) # 打印输入配置参数

    # 将两个模型路径优先级：若单个 --weights_rgb/--weights_eve 提供则使用它们；否则回退到 --weights（旧行为）
    # 把解析后的选项保回 opt
    if opt.weights_rgb is None and opt.weights_eve is None:
        # 兼容老脚本：如果没有分别提供，则只使用 opt.weights（单模型）
        opt.weights_rgb = None
        opt.weights_eve = None

    data,info = opt.data,opt.info
    # save_path = f"./test_image_{time.strftime('%Y%m%d_%H%M')}" # 里面要用单引号
    if opt.save_path_plus_time:
        save_path = f"./{info}_{data}_{time.strftime('%m%d_%H%M')}"
    else:
        save_path = f"./{info}_{data}"

    with torch.no_grad():
        for i in range(len(opt.distances)):
            detect(data_path =opt.data_path ,distance =opt.distances[i], save_path =save_path)