#-*-coding:utf-8-*-
# date:2021-03-16
# Author: Eric.Lee
# function: yolo v5 video inference

import warnings
warnings.filterwarnings("ignore")
import argparse
from utils.datasets import *
from utils.utils import *
import cv2
import torch
import time
import numpy as np
import os
from tqdm import tqdm
from tqdm import trange

from utils.wiky_datasets_ver3 import *


def F1_Score(precision,recall, beta = 1.0):

    return (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-8)

def detect(data_path = None, distance = None, save_path = None):
    # 解析配置参数
    source, weights, half, imgsz = \
        opt.source, opt.weights, opt.half, opt.img_size
    webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
    # 初始化 模型推理硬件
    device = torch_utils.select_device(opt.device)

    # 模型加载初始化
    model = torch.load(weights, map_location=device)['model']
    # 模型设置为推理模式
    model.to(device).eval()
    # Cpu 半精度 flost16 推理设置 ：Half precision
    half = half and device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    
    data,info = opt.data,opt.info

    # Get names and colors
    names = model.names if hasattr(model, 'names') else model.modules.names
    # colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
    colors = [[0,255,0]]

    # Run inference
    
    img = torch.zeros((1, 3, imgsz[0], imgsz[1]), device=device)  # init img

    if data == "fusion":
        _ = model(img.half() if half else img.float(),img.half() if half else img.float()) if device.type != 'cpu' else None  # run once
        print(" You choose dual path fusion network.\n")
    else:
        _ = model(img.half() if half else img.float()) if device.type != 'cpu' else None
        print(" You choose single path network.\n")

    if info == "test":
  
        dataset  = BigHandDetectionDataset_Dual_2Label(path = data_path,
                                                is_train= False,
                                        event_subdir = opt.event_subdir,
                                        event_needed = opt.eve_bin_needed,
                                        train_step = 1,
                                        distance = distance)
    else:

        raise ValueError("Please select test !")

    

    if not os.path.exists(save_path):
        os.makedirs(save_path, 0o777,True)

    
    # 新增：打开txt文件用于写入检测框
    result_txt_path = os.path.join(save_path, f"detect_results_{distance}.txt")
    result_txt = open(result_txt_path, "w")

    start = (1280-720)//2
    end = start + 720

    # 统计变量
    all_pred_num = 0
    all_gt_num = 0
    tp_num = 0
    fp_num = 0
    fn_num = 0
    center_errors = []

    # 光流时间统计
    flow_time_total = 0.0
    flow_count = 0
    # 三种分法相似度统计
    sim_center_ll_cos = []
    sim_center_flow_cos = []
    sim_ll_flow_cos = []

    sim_center_ll_rel = []
    sim_center_flow_rel = []
    sim_ll_flow_rel = []

    # 检测框的宽度
    box_widths = []


    from collections import defaultdict
    # 新增：每类别统计
    per_class_tp = defaultdict(int)
    per_class_fp = defaultdict(int)
    per_class_fn = defaultdict(int)
    per_class_center_errors = defaultdict(list)

    # 混淆矩阵初始化：使用 (num_classes + 1) x (num_classes + 1)
    # 最后一行/列为 "无预测/无GT"（background / none）
    num_classes = len(names)
    bg_idx = num_classes
    confusion_mat = np.zeros((num_classes + 1, num_classes + 1), dtype=int)

    count = 0
    # count = int(3) if info == "test" else int(25)
    total_time = 0

    valid_count = 0

    for batch in tqdm(dataset):

        img, eve, img_labels, eve_lavels, name = batch

        if data in["fusion","image"]:
            labels = img_labels
        else:
            labels = eve_lavels
             

        if len(labels) == 0: # no label pass
            continue

        count+=1

        # 按照 test_range 跳过范围外的帧以节省时间（在 argparse 中定义 --test-range start end）
        try:
            test_start, test_end = int(opt.test_range[0]), int(opt.test_range[1])
        except Exception:
            test_start, test_end = 0, 10**9
        frame_idx = count
        if not (test_start <= frame_idx <= test_end):
            # 跳过本次循环，避免执行后续昂贵的模型推理与可视化
            continue

        date =  name.split("/")[-3]
        name = name.split("/")[-1].split(".")[0]      

        if info == "test":

            if opt.event_subdir == "event_frame":
            
                eve = eve.float().to(device)
                
                eve = torch.where(eve == 255, torch.tensor(200.0).to(device), eve)
                eve = torch.where(eve == 0, torch.tensor(100.0).to(device), eve)
                eve = torch.where((eve != 200) & (eve != 100), torch.tensor(0.0).to(device), eve)  # eve != 0 and eve != 255  wrong
                eve = eve.cpu()

            elif opt.event_subdir[-10:] != "_count_raw": # "event_img_60fps":
                pass
            
            elif opt.event_subdir[-10:] == "_count_raw": # == "event_img_60fps_count_raw":

                C = opt.event_threshold
                eve = eve.float().to(device) 
                eve = torch.where(eve >= 128 + C, torch.tensor(200.0).to(device), eve)
                eve = torch.where(eve <= 128 - C, torch.tensor(100.0).to(device), eve)
                eve = torch.where((eve != 200) & (eve != 100), torch.tensor(0.0).to(device), eve) 
                eve = eve.cpu()

            else:
                raise Exception("event_subdir error!")

        else:
            raise Exception("you should usetest mode!")

                
        
        # 保存原始图像 可视化测试结果
        if data =="image" or data =="fusion":
            img0 = img.numpy()
        else:
            img0 = eve[-3:,:,:].numpy()
        img0 = img0.transpose(1, 2, 0)  # CHW to HWC
        img0 = img0[:, :, ::-1]  # RGB to BGR
       

        # labels_out = torch.zeros((nL, 6)) (0,class,x,y,w,h)
        # label还原为 原始坐标

        if len(labels):
            w = img.shape[-1]
            h = img.shape[-2]
            labels[:, 2] = labels[:, 2] * w
            labels[:, 3] = labels[:, 3] * h
            labels[:, 4] = labels[:, 4] * w
            labels[:, 5] = labels[:, 5] * h
            
            # 转为 (x1,y1,x2,y2)
            labels_out = torch.zeros((len(labels), 6))
            labels_out[:, 0] = labels[:, 0]
            labels_out[:, 1] = labels[:, 1]
            labels_out[:, 2] = labels[:, 2] - labels[:, 4]/2    
            labels_out[:, 3] = labels[:, 3] - labels[:, 5]/2
            labels_out[:, 4] = labels[:, 2] + labels[:, 4]/2
            labels_out[:, 5] = labels[:, 3] + labels[:, 5]/2
        else:
            labels_out = labels

        gt_boxes = labels_out[:, 2:6].cpu().numpy() if len(labels_out) else np.zeros((0, 4))
        gt_classes = labels_out[:, 1].cpu().numpy() if len(labels_out) else np.zeros((0,))
        all_gt_num += len(gt_boxes)

        pred_boxes = []
        pred_scores = []
        pred_classes = []


        img =  img.to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        eve = eve.to(device)
        eve = eve.half() if half else eve.float()  # uint8 to fp16/32
        eve /= 255.0  # 0 - 255 to 0.0 - 1.0
        if eve.ndimension() == 3:
            eve = eve.unsqueeze(0)

        # 模型推理
        t1 = torch_utils.time_synchronized()

        if data =="fusion":
            pred = model(img,eve)[0]
        elif data =="image":
            pred = model(img)[0]
        else:
            pred = model(eve)[0]
        # pred = model(img,augment=opt.augment)[0]
        t2 = torch_utils.time_synchronized()

        total_time += (t2 - t1)

      
        # NMS 操作
        pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
                                    fast=True, classes=opt.classes, agnostic=opt.agnostic_nms)
        # print("opt.classes",opt.classes,opt.agnostic_nms,opt.augment)

        for i, det in enumerate(pred): 
            

            p, s, im0 = source, '', img0.copy()
         
            s += '%gx%g ' % img.shape[2:]  # print string
            # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            
            if det is not None and len(det):

                

                # 推理的图像分辨率转为原图分辨率：Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() #实际上就是裁剪
                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                det  = det.cpu().numpy()
              
                output_dict_ = []
                class_confs = []
                area, event_area, ratio = 0,0,0
                for *xyxy, conf, cls in det:
                    x1, y1, x2, y2 = xyxy
                    output_dict_.append((float(x1), float(y1), float(x2), float(y2)))
                    class_confs.append((names[int(cls)],float(conf)))
                    label = '%s %.2f' % (names[int(cls)], conf)
                    plot_one_box(xyxy, im0, label=label, color=colors[0], line_thickness=3)# int(cls)

                    plot_one_box(gt_boxes[0] if len(gt_boxes)>0 else xyxy,
                                 im0,
                                 label= "gt:"+str(int(gt_classes[0])) if len(gt_boxes)>0 else label,
                                 color=(0,0,255) if len(gt_boxes)>0 else colors[0],
                                 line_thickness=3,
                                 label_position = "bottom_right")

                    pred_boxes.append([x1, y1, x2, y2])
                    pred_scores.append(conf)
                    pred_classes.append(int(cls))

                    box_widths.append(abs(x2-x1)) #绝对值

                    if data == "event":
                        area, event_area, ratio = count_in_box(xyxy, im0)
        
                result_txt.write(f"{date}-{name}: {output_dict_,class_confs}")

                if data == "event":
                    result_txt.write(f" ({area:d},{event_area:d}, {ratio:.3f})\n")
                else:
                    result_txt.write(f"\n")

                valid_count +=1
            # print('%sDone. (%.3fs)' % (s, t2 - t1))

            # 保存无水印图片
            if opt.save_img and count%10==0:
                no_watermark_dir = os.path.join(save_path, f"no_watermark_{distance}")
                if not os.path.exists(no_watermark_dir):
                    os.makedirs(no_watermark_dir, 0o777,True)
                frame_name = f"{date}-{name}_id_{count:04d}.jpg"
                cv2.imwrite(os.path.join(no_watermark_dir, frame_name), im0)

        '''
        计算位移与光流处：
        - 以预测框中心点(center)和左下角(lower-left)为参考点计算位移
        - 使用 LK(Pyr) + 加权最小二乘(这里用加权平均) 在两个帧预测框并集矩形区域内计算总体光流
        - 支持帧范围测试、丢帧处理（使用最近一张有检测的帧）
        - 将每帧结果写入 txt 并保存可视化图像
        '''

        pred_boxes = np.array(pred_boxes)
        pred_classes = np.array(pred_classes)
        all_pred_num += len(pred_boxes)

        # 初始化上一帧缓存（首次进入循环前在外部初始化）
        if 'prev_pred_boxes' not in locals():
            prev_pred_boxes = None
            prev_pred_classes = None
            prev_im0 = None
            prev_count_idx = None

        # 解析测试范围
        test_range = getattr(opt, 'test_range', None)
        if test_range is None:
            test_start, test_end = 0, 10**9
        else:
            test_start, test_end = test_range

        # 当前帧索引使用 count（只对有标签的帧计数）
        frame_idx = count

        def iou(boxA, boxB):
            # boxes: [x1,y1,x2,y2]
            xA = max(boxA[0], boxB[0])
            yA = max(boxA[1], boxB[1])
            xB = min(boxA[2], boxB[2])
            yB = min(boxA[3], boxB[3])
            interW = max(0, xB - xA)
            interH = max(0, yB - yA)
            inter = interW * interH
            areaA = max(0, (boxA[2]-boxA[0])) * max(0, (boxA[3]-boxA[1]))
            areaB = max(0, (boxB[2]-boxB[0])) * max(0, (boxB[3]-boxB[1]))
            union = areaA + areaB - inter + 1e-8
            return inter / union

        def compute_lk_overall_flow(prev_img, cur_img, boxA, boxB, max_corners=200):
            # 计算两帧 box 联合矩形区域内的整体光流（像素位移向量）
            # prev_img, cur_img: BGR uint8
            x1 = int(min(boxA[0], boxB[0]))
            y1 = int(min(boxA[1], boxB[1]))
            x2 = int(max(boxA[2], boxB[2]))
            y2 = int(max(boxA[3], boxB[3]))
            h = y2 - y1
            w = x2 - x1
            if w <= 2 or h <= 2:
                return (0.0, 0.0), (x1,y1,x2,y2)

            prev_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)
            cur_gray = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY)

            roi_prev = prev_gray[y1:y2, x1:x2]
            roi_cur = cur_gray[y1:y2, x1:x2]

            # 特征点选取（在 roi 内）
            corners = cv2.goodFeaturesToTrack(roi_prev, maxCorners=max_corners, qualityLevel=0.01, minDistance=3)
            if corners is None:
                return (0.0, 0.0), (x1,y1,x2,y2)
            pts_prev = corners.reshape(-1,2)
            pts_prev_full = pts_prev + np.array([x1,y1])

            # 计算 LK 跟踪
            pts_prev_float = pts_prev_full.astype(np.float32)
            pts_next, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, cur_gray, pts_prev_float, None)
            if pts_next is None:
                return (0.0, 0.0), (x1,y1,x2,y2)

            st = st.reshape(-1)
            valid_prev = pts_prev_float[st==1]
            valid_next = pts_next[st==1]
            if len(valid_prev) == 0:
                return (0.0, 0.0), (x1,y1,x2,y2)

            disps = (valid_next - valid_prev)
            # 权重：使用 1/(err+eps) 若 err 可用
            if err is not None:
                w = 1.0 / (err.reshape(-1)[st==1] + 1e-6)
            else:
                w = np.ones((disps.shape[0],), dtype=float)

            # 去极端值（用中位数绝对偏差）
            med = np.median(disps, axis=0)
            mad = np.median(np.abs(disps - med), axis=0) + 1e-6
            inliers = (np.abs(disps - med) <= 3*mad).all(axis=1)
            if inliers.sum() == 0:
                inliers = np.ones((disps.shape[0],), dtype=bool)

            disps_in = disps[inliers]
            w_in = w[inliers]

            # 加权平均作为整体位移 (dx,dy)
            dx = (disps_in[:,0] * w_in).sum() / (w_in.sum() + 1e-8)
            dy = (disps_in[:,1] * w_in).sum() / (w_in.sum() + 1e-8)

            return (float(dx), float(dy)), (x1,y1,x2,y2)

        # 若当前帧不在测试范围，仍更新 prev caches（以保证丢帧处理），但不输出结果
        in_test_range = (frame_idx >= test_start and frame_idx <= test_end)

        # 处理丢帧：如果 pred_boxes 为空，则只更新 prev_* (不覆盖已有 prev)
        if len(pred_boxes) == 0:
            # 没有预测结果，跳过位移计算，但保留 prev_* 不变
            # 记录并继续
            if prev_pred_boxes is None:
                # 没有历史，什么也不做
                pass
            # continue loop
        else:
            # 若存在上一帧预测，则匹配并计算位移
            if prev_pred_boxes is not None:
                # 计算 IoU 矩阵并进行贪心匹配
                ious = np.zeros((len(prev_pred_boxes), len(pred_boxes)))
                for i in range(len(prev_pred_boxes)):
                    for j in range(len(pred_boxes)):
                        ious[i,j] = iou(prev_pred_boxes[i], pred_boxes[j])

                matched_prev = set()
                matched_cur = set()
                matches = []  # tuples (i,j)
                while True:
                    if ious.size == 0:
                        break
                    idx = np.unravel_index(np.argmax(ious), ious.shape)
                    i, j = int(idx[0]), int(idx[1])
                    if ious[i,j] < 0.1:
                        break
                    matches.append((i,j))
                    matched_prev.add(i)
                    matched_cur.add(j)
                    ious[i,:] = -1
                    ious[:,j] = -1

                # 为可视化创建目录
                vis_dir = os.path.join(save_path, f"flow_vis_{distance}")
                if not os.path.exists(vis_dir):
                    os.makedirs(vis_dir, exist_ok=True)

                # 遍历匹配对并计算三种位移
                for (i,j) in matches:
                    prev_box = prev_pred_boxes[i]
                    cur_box = pred_boxes[j]

                    # 参考点：中心
                    prev_cx = (prev_box[0] + prev_box[2]) / 2.0
                    prev_cy = (prev_box[1] + prev_box[3]) / 2.0
                    cur_cx = (cur_box[0] + cur_box[2]) / 2.0
                    cur_cy = (cur_box[1] + cur_box[3]) / 2.0
                    center_dx = cur_cx - prev_cx
                    center_dy = cur_cy - prev_cy

                    # 参考点：左下角 (x1,y2) 这里左下按图像坐标系(y向下为正)
                    prev_llx = prev_box[0]
                    prev_lly = prev_box[3]
                    cur_llx = cur_box[0]
                    cur_lly = cur_box[3]
                    ll_dx = cur_llx - prev_llx
                    ll_dy = cur_lly - prev_lly

                    # 光流法（计时）
                    t_flow1 = time.time()
                    (flow_dx, flow_dy), union_rect = compute_lk_overall_flow(prev_im0, im0, prev_box, cur_box)
                    t_flow2 = time.time()
                    flow_time_total += (t_flow2 - t_flow1)
                    flow_count += 1

                    # 计算三种位移向量和相似度（余弦相似度与相对差异）
                    v_center = np.array([center_dx, center_dy], dtype=float)
                    v_ll = np.array([ll_dx, ll_dy], dtype=float)
                    v_flow = np.array([flow_dx, flow_dy], dtype=float)

                    def safe_cosine(a, b):
                        norma = np.linalg.norm(a)
                        normb = np.linalg.norm(b)
                        if norma < 1e-8 and normb < 1e-8:
                            return 1.0
                        if norma < 1e-8 or normb < 1e-8:
                            return 0.0
                        cos = np.dot(a, b) / (norma * normb)
                        return float(np.clip(cos, -1.0, 1.0))

                    def rel_diff(a, b):
                        # 相对差异：||a-b|| / (||a||+||b||)
                        na = np.linalg.norm(a)
                        nb = np.linalg.norm(b)
                        denom = (na + nb) + 1e-8
                        return float(np.linalg.norm(a - b) / denom)

                    c_ll_cos = safe_cosine(v_center, v_ll)
                    c_flow_cos = safe_cosine(v_center, v_flow)
                    ll_flow_cos = safe_cosine(v_ll, v_flow)

                    c_ll_rel = 1.0 - rel_diff(v_center, v_ll)  # 越大越相似
                    c_flow_rel = 1.0 - rel_diff(v_center, v_flow)
                    ll_flow_rel_v = 1.0 - rel_diff(v_ll, v_flow)

                    sim_center_ll_cos.append(c_ll_cos)
                    sim_center_flow_cos.append(c_flow_cos)
                    sim_ll_flow_cos.append(ll_flow_cos)

                    sim_center_ll_rel.append(c_ll_rel)
                    sim_center_flow_rel.append(c_flow_rel)
                    sim_ll_flow_rel.append(ll_flow_rel_v)

                    # 输出到 txt（仅当在测试范围内）
                    if in_test_range:
                        result_txt.write(f"Frame {frame_idx}: match prev_id={i} cur_id={j} center_dx={center_dx:.2f},{center_dy:.2f} ll_dx={ll_dx:.2f},{ll_dy:.2f} flow_dx={flow_dx:.2f},{flow_dy:.2f} union={union_rect}\n")

                        # 可视化：在 im0 上画出 prev center -> cur center, prev ll -> cur ll, 并在 union_rect 上绘制箭头表示 flow
                        vis = im0.copy()
                        # 中心点箭头 (红)
                        cv2.arrowedLine(vis, (int(prev_cx), int(prev_cy)), (int(cur_cx), int(cur_cy)), (0,0,255), 2, tipLength=0.3)
                        # 左下角箭头 (绿色)
                        cv2.arrowedLine(vis, (int(prev_llx), int(prev_lly)), (int(cur_llx), int(cur_lly)), (0,255,0), 2, tipLength=0.3)
                        # 光流箭头 (天蓝色) 在 union 中心位置绘制整体位移
                        ux1,uy1,ux2,uy2 = union_rect
                        ucx = int((ux1+ux2)/2)
                        ucy = int((uy1+uy2)/2)
                        sky_blue = (235,206,135)  # BGR 天蓝色
                        cv2.rectangle(vis, (ux1,uy1), (ux2,uy2), sky_blue, 1)
                        cv2.arrowedLine(vis, (ucx,ucy), (int(ucx+flow_dx), int(ucy+flow_dy)), sky_blue, 2, tipLength=0.3)

                        cv2.arrowedLine(vis, (ucx,ucy), (int(ucx+flow_dx), int(ucy+flow_dy)), (255,0,0), 2, tipLength=0.3)

                        # 保存可视化
                        vis_name = f"{date}-{name}_idx_{frame_idx:04d}_match_{i}_{j}.jpg"
                        cv2.imwrite(os.path.join(vis_dir, vis_name), vis)

            # 更新 prev caches（若当前有预测则更新）
            prev_pred_boxes = pred_boxes.copy() if len(pred_boxes) else None
            prev_pred_classes = pred_classes.copy() if len(pred_classes) else None
            prev_im0 = im0.copy()
            prev_count_idx = frame_idx


    for k, v in opt.__dict__.items():
        if k == 'opt':
            print(v)
            result_txt.write('%s\n' % v)
        else:
            print('%s: %s' % (k, v))
            result_txt.write('%s: %s\n' % (k, v))

    # 打印并写入光流平均耗时
    avg_flow_time = flow_time_total / flow_count if flow_count > 0 else 0.0
    s = f"Average LK flow compute time: {avg_flow_time:.6f}s over {flow_count} calls"
    print(s)
    result_txt.write(s + "\n")

    # 三种分法相似度统计，并写入 txt
    def stats(arr):
        if len(arr) == 0:
            return (0.0, 0.0, 0)
        a = np.array(arr)
        return (float(a.mean()), float(a.std()), int(len(a)))

    c_ll_mean, c_ll_std, c_ll_n = stats(sim_center_ll_cos)
    c_flow_mean, c_flow_std, c_flow_n = stats(sim_center_flow_cos)
    ll_flow_mean, ll_flow_std, ll_flow_n = stats(sim_ll_flow_cos)

    s_cos = f"Cosine similarity (center vs ll): mean={c_ll_mean:.4f}, std={c_ll_std:.4f}, n={c_ll_n}"
    print(s_cos)
    result_txt.write(s_cos + "\n")
    s_cos = f"Cosine similarity (center vs flow): mean={c_flow_mean:.4f}, std={c_flow_std:.4f}, n={c_flow_n}"
    print(s_cos)
    result_txt.write(s_cos + "\n")
    s_cos = f"Cosine similarity (ll vs flow): mean={ll_flow_mean:.4f}, std={ll_flow_std:.4f}, n={ll_flow_n}"
    print(s_cos)
    result_txt.write(s_cos + "\n")

    # 相对差异转换为相似度后的统计
    rc_ll_mean, rc_ll_std, rc_ll_n = stats(sim_center_ll_rel)
    rc_flow_mean, rc_flow_std, rc_flow_n = stats(sim_center_flow_rel)
    rll_flow_mean, rll_flow_std, rll_flow_n = stats(sim_ll_flow_rel)

    s_rel = f"Rel-similarity (center vs ll): mean={rc_ll_mean:.4f}, std={rc_ll_std:.4f}, n={rc_ll_n}"
    print(s_rel)
    result_txt.write(s_rel + "\n")
    s_rel = f"Rel-similarity (center vs flow): mean={rc_flow_mean:.4f}, std={rc_flow_std:.4f}, n={rc_flow_n}"
    print(s_rel)
    result_txt.write(s_rel + "\n")
    s_rel = f"Rel-similarity (ll vs flow): mean={rll_flow_mean:.4f}, std={rll_flow_std:.4f}, n={rll_flow_n}"
    print(s_rel)
    result_txt.write(s_rel + "\n")

    result_txt.close()
   

if __name__ == '__main__':

    model_weight = "./hand_v5s_1114_2022_eve_120fps/last.pt"#"hand_v5s_1103_2032_img/last.pt" 

    data_type = 'event' # event fusion image
    
    event_subdir = "event_img_120fps"#"event_img_120fps_count_raw" # "event_frame" gray background 0 128 255 it need to change

    # "event_img_60fps_count_raw" # gray background but count, it need be output as event_img with threshold 

    
    event_needed = -1 if data_type != 'image' else 0
    '''
    data_type为event时,event_needed通常-1,即是取event最后一帧,此时取event端label; 
    data_type为image时,event_needed取0.

    data_type为fusion时,event_need根据训练情况取,但此时默认固定取image端label;
    '''

    save_img = False

    event_threshold =   1

    data_path  = "/home_ssd/lhc/hand_detect_v3_test2"
    distances = ["3m","2m","1p5m"]# ["all"] # 

    parser = argparse.ArgumentParser()

    parser.add_argument('--data_path', type=str, default=data_path, help='the data path you need to process') 
    parser.add_argument('--distances', type=list, default=distances, help='the distances you need to test')
    parser.add_argument('--save_img', type=bool, default=save_img, help=' True if you need to save img')
    parser.add_argument('--save_path_plus_time', type=bool, default=True, help=' True if you need to add time and type to save path')

    parser.add_argument('--data', type=str, default=data_type, help='  event fusion image')
    parser.add_argument('--info', type=str, default="test", help=' use train or test')
    parser.add_argument('--event_subdir', type=str, default=event_subdir, help=' event source')
    parser.add_argument('--event_threshold', type=int, default=event_threshold, help=' event threshold will be used on the event image raw')
    parser.add_argument('--eve_bin_needed', default= event_needed, help = "-1 means the last bin event will be used")


    parser.add_argument('--weights', type=str, default=model_weight, help='model.pt path')#model_exp_hand_x/hand_x.pt
    parser.add_argument('--source', type=str, default=" ", help='source')  # file/folder, 0 for webcam
    parser.add_argument('--img-size', type=int, default=[1280,1280], help='inference size (pixels)')
    
    parser.add_argument('--conf-thres', type=float, default=0.35, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')

    
    # parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
    parser.add_argument('--half', default=False, help='half precision FP16 inference')
    parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    
    parser.add_argument('--classes',nargs='+', type=int, help='filter by class')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', default=False, help='augmented inference')
    parser.add_argument('--cm-match-mode', type=str, default='iou', choices=['iou','direct'],
                        help="matching mode for confusion matrix: 'iou' (greedy IoU) or 'direct' (per-class direct matching)")
    parser.add_argument('--test-range', nargs=2, type=int, default=[0, 2000], help='frame idx range to test (start end)')
    opt = parser.parse_args()
    print(opt) # 打印输入配置参数

    data,info = opt.data,opt.info
    # save_path = f"./test_image_{time.strftime('%Y%m%d_%H%M')}" # 里面要用单引号
    if opt.save_path_plus_time:
        save_path = f"./{info}_{data}_{time.strftime('%m%d_%H%M')}_OF"
    else:
        save_path = f"./{info}_{data}_OF"

    with torch.no_grad():
        for i in range(len(opt.distances)):
            detect(data_path =opt.data_path ,distance =opt.distances[i], save_path =save_path)