#-*-coding:utf-8-*-
# date:2021-03-16
# Author: Eric.Lee
# function: yolo v5 video inference

import warnings
warnings.filterwarnings("ignore")
import argparse
from utils.datasets import *
from utils.utils import *
import time
import numpy as np
import os
from tqdm import tqdm
from tqdm import trange

from utils.wiky_datasets_ver3 import *


def F1_Score(precision,recall, beta = 1.0):

    return (1 + beta ** 2) * precision * recall / (beta ** 2 * precision + recall + 1e-8)

def detect(data_path = None, distance = None, save_path = None):
    # 解析配置参数
    source, weights, half, imgsz = \
        opt.source, opt.weights, opt.half, opt.img_size
    webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
    # 初始化 模型推理硬件
    device = torch_utils.select_device(opt.device)

    # 模型加载初始化
    model = torch.load(weights, map_location=device)['model']
    # 模型设置为推理模式
    model.to(device).eval()
    # Cpu 半精度 flost16 推理设置 ：Half precision
    half = half and device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    
    data,info = opt.data,opt.info

    # Get names and colors
    names = model.names if hasattr(model, 'names') else model.modules.names
    # colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
    colors = [[0,255,0]]

    # Run inference
    
    img = torch.zeros((1, 3, imgsz[0], imgsz[1]), device=device)  # init img

    if data == "fusion":
        _ = model(img.half() if half else img.float(),img.half() if half else img.float()) if device.type != 'cpu' else None  # run once
        print(" You choose dual path fusion network.\n")
    else:
        _ = model(img.half() if half else img.float()) if device.type != 'cpu' else None
        print(" You choose single path network.\n")

    if info == "test":
  
        dataset  = BigHandDetectionDataset_Dual_2Label(path = data_path,
                                                is_train= False,
                                        event_subdir = opt.event_subdir,
                                        event_needed = opt.eve_bin_needed,
                                        train_step = 1,
                                        distance = distance)
    else:

        raise ValueError("Please select test !")

    

    if not os.path.exists(save_path):
        os.makedirs(save_path, 0o777,True)

    
    # 新增：打开txt文件用于写入检测框
    result_txt_path = os.path.join(save_path, f"detect_results_{distance}.txt")
    result_txt = open(result_txt_path, "w")

    start = (1280-720)//2
    end = start + 720

    # 统计变量
    all_pred_num = 0
    all_gt_num = 0
    tp_num = 0
    fp_num = 0
    fn_num = 0
    center_errors = []

    # 检测框的宽度
    box_widths = []


    from collections import defaultdict
    # 新增：每类别统计
    per_class_tp = defaultdict(int)
    per_class_fp = defaultdict(int)
    per_class_fn = defaultdict(int)
    per_class_center_errors = defaultdict(list)

    # 混淆矩阵初始化：使用 (num_classes + 1) x (num_classes + 1)
    # 最后一行/列为 "无预测/无GT"（background / none）
    num_classes = len(names)
    bg_idx = num_classes
    confusion_mat = np.zeros((num_classes + 1, num_classes + 1), dtype=int)

    count = 0
    # count = int(3) if info == "test" else int(25)
    total_time = 0

    valid_count = 0

    for batch in tqdm(dataset):

        img, eve, img_labels, eve_lavels, name = batch

        if data in["fusion","image"]:
            labels = img_labels
        else:
            labels = eve_lavels
             

        if len(labels) == 0: # no label pass
            continue

        count+=1

        date =  name.split("/")[-3]
        name = name.split("/")[-1].split(".")[0]      

        if info == "test":

            if opt.event_subdir == "event_frame":
            
                eve = eve.float().to(device)
                
                eve = torch.where(eve == 255, torch.tensor(200.0).to(device), eve)
                eve = torch.where(eve == 0, torch.tensor(100.0).to(device), eve)
                eve = torch.where((eve != 200) & (eve != 100), torch.tensor(0.0).to(device), eve)  # eve != 0 and eve != 255  wrong
                eve = eve.cpu()

            elif opt.event_subdir[-10:] != "_count_raw": # "event_img_60fps":
                pass
            
            elif opt.event_subdir[-10:] == "_count_raw": # == "event_img_60fps_count_raw":

                C = opt.event_threshold
                eve = eve.float().to(device) 
                eve = torch.where(eve >= 128 + C, torch.tensor(200.0).to(device), eve)
                eve = torch.where(eve <= 128 - C, torch.tensor(100.0).to(device), eve)
                eve = torch.where((eve != 200) & (eve != 100), torch.tensor(0.0).to(device), eve) 
                eve = eve.cpu()

            else:
                raise Exception("event_subdir error!")

        else:
            raise Exception("you should usetest mode!")

                
        
        # 保存原始图像 可视化测试结果
        if data =="image" or data =="fusion":
            img0 = img.numpy()
        else:
            img0 = eve[-3:,:,:].numpy()
        img0 = img0.transpose(1, 2, 0)  # CHW to HWC
        img0 = img0[:, :, ::-1]  # RGB to BGR
       

        # labels_out = torch.zeros((nL, 6)) (0,class,x,y,w,h)
        # label还原为 原始坐标

        if len(labels):
            w = img.shape[-1]
            h = img.shape[-2]
            labels[:, 2] = labels[:, 2] * w
            labels[:, 3] = labels[:, 3] * h
            labels[:, 4] = labels[:, 4] * w
            labels[:, 5] = labels[:, 5] * h
            
            # 转为 (x1,y1,x2,y2)
            labels_out = torch.zeros((len(labels), 6))
            labels_out[:, 0] = labels[:, 0]
            labels_out[:, 1] = labels[:, 1]
            labels_out[:, 2] = labels[:, 2] - labels[:, 4]/2    
            labels_out[:, 3] = labels[:, 3] - labels[:, 5]/2
            labels_out[:, 4] = labels[:, 2] + labels[:, 4]/2
            labels_out[:, 5] = labels[:, 3] + labels[:, 5]/2
        else:
            labels_out = labels

        gt_boxes = labels_out[:, 2:6].cpu().numpy() if len(labels_out) else np.zeros((0, 4))
        gt_classes = labels_out[:, 1].cpu().numpy() if len(labels_out) else np.zeros((0,))
        all_gt_num += len(gt_boxes)

        pred_boxes = []
        pred_scores = []
        pred_classes = []


        img =  img.to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        eve = eve.to(device)
        eve = eve.half() if half else eve.float()  # uint8 to fp16/32
        eve /= 255.0  # 0 - 255 to 0.0 - 1.0
        if eve.ndimension() == 3:
            eve = eve.unsqueeze(0)

        # 模型推理
        t1 = torch_utils.time_synchronized()

        if data =="fusion":
            pred = model(img,eve)[0]
        elif data =="image":
            pred = model(img)[0]
        else:
            pred = model(eve)[0]
        # pred = model(img,augment=opt.augment)[0]
        t2 = torch_utils.time_synchronized()

        total_time += (t2 - t1)

      
        # NMS 操作
        pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
                                    fast=True, classes=opt.classes, agnostic=opt.agnostic_nms)
        # print("opt.classes",opt.classes,opt.agnostic_nms,opt.augment)

        for i, det in enumerate(pred): 
            

            p, s, im0 = source, '', img0.copy()
         
            s += '%gx%g ' % img.shape[2:]  # print string
            # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            
            if det is not None and len(det):

                

                # 推理的图像分辨率转为原图分辨率：Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() #实际上就是裁剪
                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                det  = det.cpu().numpy()
              
                output_dict_ = []
                class_confs = []
                area, event_area, ratio = 0,0,0
                for *xyxy, conf, cls in det:
                    x1, y1, x2, y2 = xyxy
                    output_dict_.append((float(x1), float(y1), float(x2), float(y2)))
                    class_confs.append((names[int(cls)],float(conf)))
                    label = '%s %.2f' % (names[int(cls)], conf)
                    plot_one_box(xyxy, im0, label=label, color=colors[0], line_thickness=3)# int(cls)

                    plot_one_box(gt_boxes[0] if len(gt_boxes)>0 else xyxy,
                                 im0,
                                 label= "gt:"+str(int(gt_classes[0])) if len(gt_boxes)>0 else label,
                                 color=(0,0,255) if len(gt_boxes)>0 else colors[0],
                                 line_thickness=3,
                                 label_position = "bottom_right")

                    pred_boxes.append([x1, y1, x2, y2])
                    pred_scores.append(conf)
                    pred_classes.append(int(cls))

                    box_widths.append(abs(x2-x1)) #绝对值

                    if data == "event":
                        area, event_area, ratio = count_in_box(xyxy, im0)
        
                result_txt.write(f"{date}-{name}: {output_dict_,class_confs}")

                if data == "event":
                    result_txt.write(f" ({area:d},{event_area:d}, {ratio:.3f})\n")
                else:
                    result_txt.write(f"\n")

                valid_count +=1
            # print('%sDone. (%.3fs)' % (s, t2 - t1))

            # 保存无水印图片
            if opt.save_img and count%10==0:
                no_watermark_dir = os.path.join(save_path, f"no_watermark_{distance}")
                if not os.path.exists(no_watermark_dir):
                    os.makedirs(no_watermark_dir, 0o777,True)
                frame_name = f"{date}-{name}_id_{count:04d}.jpg"
                cv2.imwrite(os.path.join(no_watermark_dir, frame_name), im0)

        pred_boxes = np.array(pred_boxes)
        pred_classes = np.array(pred_classes)
        all_pred_num += len(pred_boxes)

        # 两种匹配模式：'iou'（基于 IoU 的贪心匹配） 或 'direct'（按类别直接一对一匹配）
        matched_gt = set()
        matched_pred = set()

        def iou_matrix(boxes1, boxes2):
            if len(boxes1) == 0 or len(boxes2) == 0:
                return np.zeros((len(boxes1), len(boxes2)))
            boxes1 = np.array(boxes1)
            boxes2 = np.array(boxes2)
            area1 = (boxes1[:, 2] - boxes1[:, 0]).clip(0) * (boxes1[:, 3] - boxes1[:, 1]).clip(0)
            area2 = (boxes2[:, 2] - boxes2[:, 0]).clip(0) * (boxes2[:, 3] - boxes2[:, 1]).clip(0)
            iou = np.zeros((len(boxes1), len(boxes2)))
            for ii in range(len(boxes1)):
                xx1 = np.maximum(boxes1[ii, 0], boxes2[:, 0])
                yy1 = np.maximum(boxes1[ii, 1], boxes2[:, 1])
                xx2 = np.minimum(boxes1[ii, 2], boxes2[:, 2])
                yy2 = np.minimum(boxes1[ii, 3], boxes2[:, 3])
                w = (xx2 - xx1).clip(0)
                h = (yy2 - yy1).clip(0)
                inter = w * h
                union = area1[ii] + area2 - inter + 1e-8
                iou[ii, :] = inter / union
            return iou

        if opt.cm_match_mode == 'iou':
            if len(gt_boxes) and len(pred_boxes):
                ious = iou_matrix(gt_boxes, pred_boxes)
                # 贪心匹配：每次选择最大 IoU 的对，若低于阈值则停止
                while True:
                    if ious.size == 0:
                        break
                    idx = np.unravel_index(np.argmax(ious), ious.shape)
                    i, j = int(idx[0]), int(idx[1])
                    max_iou = ious[i, j]
                    if max_iou < opt.iou_thres:
                        break
                    # 记录匹配
                    matched_gt.add(i)
                    matched_pred.add(j)
                    gt_c = int(gt_classes[i])
                    pred_c = int(pred_classes[j])
                    confusion_mat[gt_c, pred_c] += 1

                    # if gt_c == pred_c:
                    #     fp_num += 1
                    tp_num += 1
                    per_class_tp[gt_c] += 1
                    # 中心误差
                    gx = (gt_boxes[i][0] + gt_boxes[i][2]) / 2
                    gy = (gt_boxes[i][1] + gt_boxes[i][3]) / 2
                    px = (pred_boxes[j][0] + pred_boxes[j][2]) / 2
                    py = (pred_boxes[j][1] + pred_boxes[j][3]) / 2
                    error = np.sqrt((gx - px) ** 2 + (gy - py) ** 2)
                    center_errors.append(error)
                    per_class_center_errors[gt_c].append(error)
                    # 将该行列排除
                    ious[i, :] = -1
                    ious[:, j] = -1 

        else: 

            raise NotImplementedError

                # 多余的预测为FP（会在下面统计）

        # 未匹配的 GT 视为 FN（记录到 confusion_mat 的最后一列）
        for i in range(len(gt_boxes)):
            if i not in matched_gt:
                gt_c = int(gt_classes[i])
                confusion_mat[gt_c, bg_idx] += 1
                fn_num += 1
                per_class_fn[gt_c] += 1

        # 未匹配的预测视为 FP（记录到 confusion_mat 的最后一行）
        for j in range(len(pred_boxes)):
            if j not in matched_pred:
                pred_c = int(pred_classes[j])
                confusion_mat[bg_idx, pred_c] += 1
                fp_num += 1
                per_class_fp[pred_c] += 1



    s = f"Total len :{valid_count}/{count} = {valid_count/count:.4f}"
    print(s)
    result_txt.write("\n" +s +"\n" )

    s = f"Average time :{total_time/count:.4f}s"
    print(s)
    result_txt.write("\n" +s +"\n" )


    # ----- 打印并保存混淆矩阵（计数 + 按 GT 行归一化的百分比），使用可配置宽度对齐 -----
    labels = [names[i] for i in range(num_classes)] + ['no_pred']

    # 百分比按 GT 行归一化（若某行为 0，则保持 0）
    row_sums = confusion_mat.sum(axis=1, keepdims=True).astype(float)
    percent = np.divide(confusion_mat, row_sums, where=(row_sums != 0)) * 100

    # 新增 按列（预测）的归一化百分比
    col_sums = confusion_mat.sum(axis=0, keepdims=True).astype(float)
    percent_col = np.divide(confusion_mat, col_sums, where=(col_sums != 0)).T * 100

    cell_w = getattr(opt, 'cm_cell_width', 10)
    def fmt_label(s):
        return f"{s:>{cell_w}}"

    header = fmt_label('') + ''.join(fmt_label(lbl) for lbl in labels)
    print('\nConfusion matrix (counts):')
    print(header)
    result_txt.write('\nConfusion matrix (counts):\n')
    result_txt.write(header + '\n')
    for i, lbl in enumerate(labels):
        counts_row = ''.join(f"{int(confusion_mat[i, j]):>{cell_w}}" for j in range(confusion_mat.shape[1]))
        line = f"{fmt_label(lbl)}{counts_row}"
        print(line)
        result_txt.write(line + '\n')

    print('\nConfusion matrix (% per GT row →):')
    print(header)
    result_txt.write('\nConfusion matrix (% per GT row):\n')
    result_txt.write(header + '\n')
    for i, lbl in enumerate(labels):
        perc_row = ''.join(f"{percent[i, j]:>{cell_w-1}.1f}%" for j in range(confusion_mat.shape[1]))
        line = f"{fmt_label(lbl)}{perc_row}"
        print(line)
        result_txt.write(line + '\n')

    print('\nConfusion matrix (% per Pre column ↓):')
    print(header)
    result_txt.write('\nConfusion matrix (% per Pre column):\n')
    result_txt.write(header + '\n')
    for i, lbl in enumerate(labels):
        perc_row = ''.join(f"{percent_col[j,i]:>{cell_w-1}.1f}%" for j in range(confusion_mat.shape[1]))
        line = f"{fmt_label(lbl)}{perc_row}"
        print(line)
        result_txt.write(line + '\n')

    tp_total, fp_total,fn_total,f1_s_total = 0,0,0,0
    for cls_id in range(len(confusion_mat)-1):

        if cls_id == 0:
            result_txt.write("\n\n*******************\n*******************\n" )

        tp_ = int(confusion_mat[cls_id, cls_id])
        tp_total += tp_

        fn_ = int(confusion_mat[cls_id, -1])
        fn_total += fn_
        # fp 是 confusion_mat[:, cls_id]的和 减去 tp_
        fp_ = int(confusion_mat[:, cls_id].sum()) - tp_
        fp_total += fp_

        precision_c = tp_ / (tp_ + fp_ + 1e-8)
        recall_c = tp_ / (tp_ + fn_ + 1e-8)
        errors = np.array(per_class_center_errors[cls_id])
        mean_e = errors.mean() if len(errors) else 0
        std_e = errors.std() if len(errors) else 0

        f1_s_ = F1_Score(precision_c,recall_c)
        f1_s_total += f1_s_

        s = f"Class {cls_id}: PRE {precision_c:.4f}, REC {recall_c:.4f}, F1S {f1_s_:.4f} Center error mean {mean_e:.2f}, std {std_e:.2f},(TP:{tp_}, FP:{fp_}, FN:{fn_})"
        print(s)
        result_txt.write(s +"\n" )
    

    result_txt.write("\n\n*******************\n*******************\n" )


    # 统计总体指标
    precision = tp_total / (tp_total + fp_total + 1e-8)
    recall = tp_total / (tp_total + fn_total + 1e-8)

    micro_f1_score= F1_Score(precision,recall)
    center_errors = np.array(center_errors)
    mean_error = center_errors.mean() if len(center_errors) else 0
    std_error = center_errors.std() if len(center_errors) else 0


    s = f"PRE: {precision:.4f}, REC: {recall:.4f} F1_Score_Micro: {micro_f1_score:.4f} (F1_Score_Macro: {f1_s_total/(len(confusion_mat)-1):.4f})"
    print(s)
    result_txt.write(s +"\n" )
    
    s = f"Center error mean: {mean_error:.2f}, std: {std_error:.2f}"
    print(s)
    result_txt.write(s +"\n" )

    # box_width max min mean
    box_width = np.array(box_widths)
    mean_box_width, max_box_width, min_box_with= box_width.mean() , box_width.max() , box_width.min()
    
    s = f"About the pre_boxs: MAX {max_box_width:.0f} MIN {min_box_with:.0f} MEAN {mean_box_width:.2f}"
    print(s)
    result_txt.write(s +"\n" )
  


    result_txt.write("\n\n*******************\n*******************\n" )

    # # 输出每类别指标
    # print("Per-class metrics:")

    if False:

        # 另外导出到独立的 txt 文件，便于后续分析或脚本读取
        cm_path = os.path.join(save_path, "confusion_matrix.txt")
        with open(cm_path, 'w', encoding='utf-8') as f:
            f.write("# Confusion matrix counts\n")
            f.write("\t" + "\t".join(labels) + "\n")
            for i, lbl in enumerate(labels):
                counts_row = "\t".join(str(int(confusion_mat[i, j])) for j in range(confusion_mat.shape[1]))
                f.write(f"{lbl}\t{counts_row}\n")
            f.write("\n# Confusion matrix percent (per GT row)\n")
            f.write("\t" + "\t".join(labels) + "\n")
            for i, lbl in enumerate(labels):
                perc_row = "\t".join(f"{percent[i, j]:.1f}%" for j in range(confusion_mat.shape[1]))
                f.write(f"{lbl}\t{perc_row}\n")

        print(f"Confusion matrix exported to: {cm_path}")
        result_txt.write(f"Confusion matrix exported to: {cm_path}\n")

    for k, v in opt.__dict__.items():
        if k == 'opt':
            print(v)
            result_txt.write('%s\n' % v)
        else:
            print('%s: %s' % (k, v))
            result_txt.write('%s: %s\n' % (k, v))

    result_txt.close()
   

if __name__ == '__main__':

    model_weight = "hand_v5s_1115_1343_fusion/last.pt"#"hand_v5s_1103_2032_img/last.pt" 

    data_type = 'fusion' # event fusion image
    
    event_subdir = "event_img_120fps"#"event_img_120fps_count_raw" # "event_frame" gray background 0 128 255 it need to change

    # "event_img_60fps_count_raw" # gray background but count, it need be output as event_img with threshold 

    
    event_needed = -1 if data_type != 'image' else 0
    '''
    data_type为event时,event_needed通常-1,即是取event最后一帧,此时取event端label; 
    data_type为image时,event_needed取0.

    data_type为fusion时,event_need根据训练情况取,但此时默认固定取image端label;
    '''

    save_img = False

    event_threshold =   1

    data_path  = "/home_ssd/lhc/hand_detect_v3_test2"
    distances = ["all"] # ["3m","2m","1p5m"]

    parser = argparse.ArgumentParser()

    parser.add_argument('--data_path', type=str, default=data_path, help='the data path you need to process') 
    parser.add_argument('--distances', type=list, default=distances, help='the distances you need to test')
    parser.add_argument('--save_img', type=bool, default=save_img, help=' True if you need to save img')
    parser.add_argument('--save_path_plus_time', type=bool, default=True, help=' True if you need to add time and type to save path')

    parser.add_argument('--data', type=str, default=data_type, help='  event fusion image')
    parser.add_argument('--info', type=str, default="test", help=' use train or test')
    parser.add_argument('--event_subdir', type=str, default=event_subdir, help=' event source')
    parser.add_argument('--event_threshold', type=int, default=event_threshold, help=' event threshold will be used on the event image raw')
    parser.add_argument('--eve_bin_needed', default= event_needed, help = "-1 means the last bin event will be used")


    parser.add_argument('--weights', type=str, default=model_weight, help='model.pt path')#model_exp_hand_x/hand_x.pt
    parser.add_argument('--source', type=str, default=" ", help='source')  # file/folder, 0 for webcam
    parser.add_argument('--img-size', type=int, default=[1280,1280], help='inference size (pixels)')
    
    parser.add_argument('--conf-thres', type=float, default=0.35, help='object confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')

    
    # parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
    parser.add_argument('--half', default=False, help='half precision FP16 inference')
    parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    
    parser.add_argument('--classes',nargs='+', type=int, help='filter by class')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', default=False, help='augmented inference')
    parser.add_argument('--cm-match-mode', type=str, default='iou', choices=['iou','direct'],
                        help="matching mode for confusion matrix: 'iou' (greedy IoU) or 'direct' (per-class direct matching)")
    opt = parser.parse_args()
    print(opt) # 打印输入配置参数

    data,info = opt.data,opt.info
    # save_path = f"./test_image_{time.strftime('%Y%m%d_%H%M')}" # 里面要用单引号
    if opt.save_path_plus_time:
        save_path = f"./{info}_{data}_{time.strftime('%m%d_%H%M')}"
    else:
        save_path = f"./{info}_{data}"

    with torch.no_grad():
        for i in range(len(opt.distances)):
            detect(data_path =opt.data_path ,distance =opt.distances[i], save_path =save_path)