'''
yolov8 pose推理
    可生成pose标注
    可测试img_glob
'''
import glob
import json
import math
import os
import shutil
import time

import cv2
import numpy as np
import traceback

from ultralytics import YOLO  # v8.1
from line_profiler import LineProfiler  # 逐行计算运行时间

from myutils import to_pose_img
from user_cls_predict import get_yolov8_cls_predict

ctime = LineProfiler()

# from log import get_logger

def get_yolov8_pose_predict(test_config={}):
    model_path = test_config['pose_model_path']
    cls_model_path = test_config['cls_model_path']
    img_glob = test_config['img_glob']
    save_dir = test_config['save_dir']
    action_celue = test_config['action_celue']
    pose_model_type = test_config['pose_model_type']
    is_save_crop = test_config.get('is_save_crop', True)

    # model_path = 'yolov8n.pt'
    if isinstance(model_path, YOLO):
        model = model_path
    else:
        model = YOLO(model_path)
    model.predict(np.zeros((640, 640, 3), np.uint8), imgsz=640, device='0', retina_masks=True)  # 预热

    cls_predict = get_yolov8_cls_predict(model_path=cls_model_path)

    sub_parent_clsname_dict = {
        'ZhCMFangBao': 'IP',
        'ZhCMFuKuang': 'IP',
        'ZhCMNaKuang': 'NP',
        'ZhCMZhanLi': 'NP',
        'BMFangBao': 'NP',
        'BMZhanLi': 'NP',
        'AnJianYuan': 'NP',
    }

    def calculate_angle(A, B, C):
        # 向量 BA
        vector_BA = (A[0] - B[0], A[1] - B[1])
        # 向量 BC
        vector_BC = (C[0] - B[0], C[1] - B[1])

        # 计算向量的点积
        dot_product = vector_BA[0] * vector_BC[0] + vector_BA[1] * vector_BC[1]

        # 计算向量的模长
        magnitude_BA = math.sqrt(vector_BA[0] ** 2 + vector_BA[1] ** 2)
        magnitude_BC = math.sqrt(vector_BC[0] ** 2 + vector_BC[1] ** 2)

        # 计算夹角（弧度）
        if magnitude_BA == 0 or magnitude_BC == 0:
            return 0
        cos_angle = dot_product / (magnitude_BA * magnitude_BC)
        angle_rad = math.acos(cos_angle)

        # 转换为角度
        angle_deg = math.degrees(angle_rad)

        return angle_deg

    def is_place_bag(kps_m3):  # 是否放包 基于位置关系

        # IS_PLACE_BAG = True
        # left_jian,right_jian,left_zhou,right_zhou,left_wan,right_wan = kps_m3[5:10,:2]
        kps = kps_m3[5:11, :2]

        infor_str = ''
        # 未检测到6个点 则 False
        for kp in kps:
            if kp[0] == 0 and kp[1] == 0:
                infor_str += 'NE '  # NOT Enough
                return False, infor_str

        left_x = sum(kps[::2, 0]) / 3  # 左臂x
        right_x = sum(kps[1::2, 0]) / 3
        is_front = left_x > right_x  # 人面是否朝前
        if not is_front:
            infor_str += 'NFR '  # NOT FRONT
            return False, infor_str

        la, lb, lc = kps[::2, :]
        ra, rb, rc = kps[1::2, :]
        left_angle = int(calculate_angle(la, lb, lc))  # 左臂夹角
        right_angle = int(calculate_angle(ra, rb, rc))

        if left_angle > 120 and right_angle > 120:
            infor_str += f'NB {left_angle} {right_angle}'  # NOT BEND 不弯曲
            return False, infor_str
        infor_str += f'IP {left_angle} {right_angle}'  # IS PLACE
        return True, infor_str

    def is_place_bag_v2(kps_m3, xyxy, img):  # 是否放包

        # 生成图片
        if pose_model_type == 'user':
            kps = kps_m3
        else:  # official
            kps = kps_m3[5:11, :2]
        kp_names = ['left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist']
        crop_infor = {}  # {'xyxy':xyxy, 'label': str, kp_name1: [x,y] ,,,}
        crop_infor['xyxy'] = xyxy
        for kp_name in kp_names:
            crop_infor[kp_name] = (kps[kp_names.index(kp_name), 0], kps[kp_names.index(kp_name), 1])
        # crop_img = to_pose_img(img, crop_infor, is_zero_img=True)
        crop_img = to_pose_img(img, crop_infor, is_zero_img=action_celue == 'v2')
        # crop_img = to_pose_img(img, crop_infor, is_zero_img=action_celue == 'v2', is_draw=False)
        if is_save_crop:
            crop_save_dir = f'{save_dir}\crop'
            if not os.path.exists(crop_save_dir):
                os.makedirs(crop_save_dir)
            cv2.imwrite(fr'{crop_save_dir}\crop_img_{xyxy[0]}_{xyxy[1]}.jpg', crop_img)

        # 推理
        id_name_dict = {0: 'NP', 1: 'IP'}
        # cls_names = ['ZhCMFangBao', 'ZhCMFuKuang',
        #              'ZhCMNaKuang',  'ZhCMZhanLi', 'BMFangBao',  'BMZhanLi',
        #              'AnJianYuan','Other'] #
        cls_names = ['AnJianYuan', 'BMFangBao','BMZhanLi', 'ZhCMFangBao', 'ZhCMFuKuang', 'ZhCMNaKuang', 'ZhCMZhanLi']  #
        # {0: 'AnJianYuan', 1: 'BMFangBao', 2: 'BMZhanLi', 3: 'ZhCMFangBao', 4: 'ZhCMFuKuang', 5: 'ZhCMNaKuang',
        #  6: 'ZhCMZhanLi'}
        # show_img, (cls_id, cls_conf) = cls_predict(crop_img)
        # clsname = cls_names[cls_id]
        show_img, (clsname, cls_conf) = cls_predict(crop_img)


        # if clsname in ['ZhCMFangBao', 'ZhCMFuKuang']:
        #     parent_clsname = 'IP'
        # else: # ['AnJianYuan',  'BMFangBao', 'BMZhanLi', 'ZhCMNaKuang', 'ZhCMZhanLi']:
        #     parent_clsname = 'NP'
        # # clsname = id_name_dict[cls_id]
        # infor_str = f'{parent_clsname} {clsname} {cls_conf:.2f}'
        infor_str = f'{clsname} {cls_conf:.2f}'
        return infor_str

    # model.predictor(imgsz=320, conf=0.5)
    # @ctime
    # def yolov8_pose_predict(img, imgsz=640, realtime_conf_thres=0.35, iou_thres=0.8, roi=None, pad=0,
    #                         isShowMask=False, show_img=None, thickness_rate=0.001, isMask2Xy=False, isDraw=True):

    def yolov8_pose_predict(img, **kwargs):
        '''
        推理单张图片， 可roi检测
        show_img, out = seg_v8_step1xjg(img0, realtime_conf_thres=0.5, iou_thres=0.45,
                                        roi=fix_bd_step1['xiangjigai'], pad=20, isShowMask=False)
        Args:
            img: 2048*3072*3 原图
            realtime_conf_thres:
            iou_thres:
            roi: [xyxy] 是否roi
            pad: 填充 px
            show_img : 可外部传入show_img, 为None则copy原图
            isDraw: 是否画图
        Returns:
            show_img:
            out: [[xyxy],cls_ind, clsname, conf, kps_m3] [list(1*4),int,str,float,numpy(1*m*3)]

        '''
        pose_img_size = kwargs.get('pose_img_size', 640)
        realtime_conf_thres = kwargs.get('realtime_conf_thres', 0.35)
        iou_thres = kwargs.get('iou_thres', 0.8)
        roi = kwargs.get('roi', None)
        pad = kwargs.get('pad', 0)
        isShowMask = kwargs.get('isShowMask', False)
        show_img = kwargs.get('show_img', None)
        thickness_rate = kwargs.get('thickness_rate', 0.001)
        isMask2Xy = kwargs.get('isMask2Xy', False)
        isDraw = kwargs.get('isDraw', True)

        H, W = img.shape[:2]
        if roi is not None:
            x1, y1, x2, y2 = roi
            roi = [int(max(x1 - pad, 0)), int(max(y1 - pad, 0)), int(min(x2 + pad, W)), int(min(y2 + pad, H))]

        img2 = img[roi[1]:roi[3], roi[0]:roi[2], ...] if roi is not None else img
        t0 = time.time()
        results = model.predict(img2, imgsz=pose_img_size, conf=realtime_conf_thres, iou=iou_thres, device='0',
                                retina_masks=True)  # retina_masks 高分辨率mask
        print(f'3333333333333333333 {time.time() - t0}')
        result = results[0]  # one img
        boxes = result.boxes  # Boxes object for bounding box outputs
        # masks = result.masks  # Masks object for segmentation masks outputs  n*448*640
        keypoints = result.keypoints  # Keypoints object for pose outputs
        # probs = result.probs  # Probs object for classification outputs
        # result.show()  # display to screen
        # result.save(filename='result.jpg')  # save to disk
        boxes = boxes.cpu().numpy()
        keypoints = keypoints.cpu().numpy()  # 框数，点数
        # if boxes.shape[0] == 0:
        # return img.copy(), []

        out = []
        for i in range(boxes.shape[0]):
            xyxy = boxes.xyxy[i, :].tolist()
            area = (xyxy[2] - xyxy[0]) * (xyxy[3] - xyxy[1])
            if area < 150*150:
                continue
            conf = boxes.conf[i]
            cls_ind = int(boxes.cls[i])
            cls_name = result.names[cls_ind]
            kps_m3 = keypoints.data[i, ...]  # ~*3
            if roi is not None:  # 缩放回原图
                xyxy[0] += roi[0]
                xyxy[1] += roi[1]
                xyxy[2] += roi[0]
                xyxy[3] += roi[1]
                kps_m3 += (roi[0], roi[1], 0)

            if action_celue in ['v2', 'v3']:
                infor_str = is_place_bag_v2(kps_m3, xyxy, img)
            elif action_celue in ['v4']: # 直接使用yolopose分类结果
                infor_str = f'{cls_name} {conf:.2f}'
            else:
                is_place, infor_str = is_place_bag(kps_m3)
            act_name = infor_str
            out.append([xyxy, cls_ind, cls_name, conf, kps_m3, act_name])
        print(f'444444444444444444 {time.time() - t0}')

        # draw
        if not isDraw:  # 是否画图
            show_img = None
        else:
            if show_img is None:
                show_img = img.copy()
            colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 255, 0), (255, 0, 255)]
            img_diag = np.sqrt(show_img.shape[0] ** 2 + show_img.shape[1] ** 2)
            print(img_diag)
            for ind, (xyxy, cls_ind, cls_name, conf, kps_m3, act_name) in enumerate(out):
                px1, py1, px2, py2 = int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])
                # print(show_img.shape)
                # f'{str(np.round(conf, 2))}_{act_name}'
                cv2.putText(show_img, act_name, (px1, py1),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, max(1, int(img_diag * thickness_rate * 0.2)), (0, 255, 0),
                            max(1, int(img_diag * 0.0005)))
                # cv2.rectangle(show_img)
                if action_celue in ['v4']:
                    rect_color = (0, 255, 0) if act_name.split(' ')[0] == 'person_NoPlace' else (0, 0, 255)
                else: #  ['v1', 'v2', 'v3']
                    rect_color = (0, 0, 255) if sub_parent_clsname_dict[act_name.split(' ')[0]] == 'IP' else (0, 255, 0)  # 放包动作为红框
                cv2.rectangle(show_img, (px1, py1), (px2, py2), rect_color, max(1, int(img_diag * thickness_rate)))

                # 画点
                # p6-p11 (手臂) start 1
                # for kp_id in range(kps_m3.shape[0]):
                if pose_model_type == 'user':
                    kp_ids = (0, 1, 2, 3, 4, 5)
                    lines = [(0, 2), (2, 4), (1, 3), (3, 5)]  #
                    line_colors = [(0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255), ]
                else:  # official
                    kp_ids = (5, 6, 7, 8, 9, 10)
                    lines = [(5, 7), (7, 9), (6, 8), (8, 10)]  #
                    line_colors = [(0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255), ]
                for kp_id in kp_ids:
                    # kpx,kpy,conf = kps_m3[kp_id, :]
                    kpx, kpy = kps_m3[kp_id, :2]
                    color = (int(255 * (kp_id / kps_m3.shape[0])), int(255 * (kp_id / kps_m3.shape[0])),
                             int(255 * (kp_id / kps_m3.shape[0])))

                    cv2.circle(show_img, (int(kpx), int(kpy)), 8, color, thickness=-1, lineType=cv2.LINE_AA)
                # 画线
                for ind, line in enumerate(lines):
                    a, b = line  # kp_id
                    kp_start, kp_end = kps_m3[a, :2], kps_m3[b, :2]
                    kp_start, kp_end = list(map(int, kp_start)), list(map(int, kp_end))
                    if (kp_start[0] == 0 and kp_start[1] == 0) or (kp_end[0] == 0 and kp_end[1] == 0):
                        continue
                    cv2.line(show_img, kp_start, kp_end, color=line_colors[ind], thickness=8)

            if roi is not None:
                cv2.rectangle(show_img, (roi[0], roi[1]), (roi[2], roi[3]), (255, 0, 0),
                              max(1, int(img_diag * thickness_rate)))
        print(f'555555555555555 {time.time() - t0}')
        return show_img, out

    return yolov8_pose_predict


def ttest_get_detect_onepic():
    img = cv2.imread(r"D:\data\231207huoni\trainV8Seg_cable\add_imgs\20240104\Image_20240104150413321.jpg")
    model_path = r"D:\data\231207huoni\trainV8Seg_cable\models\640_cable\weights\best.pt"
    detect_onepic = get_yolov8_pose_predict(model_path)
    img_show, out = detect_onepic(img, 0.25, roi=[100, 100, 2500, 1500], isShowMask=True, isMask2Xy=True)
    ctime.print_stats()
    cv2.imwrite(r'D:\data\231207huoni\test_data\1.jpg', img_show)
    # print(out)


def ttest_dir(test_config):
    model_path = test_config['pose_model_path']
    pose_img_size = test_config.get('pose_img_size', 640)
    cls_model_path = test_config['cls_model_path']
    cls_model_type = test_config['cls_model_type']
    img_glob = test_config['img_glob']
    save_dir = test_config['save_dir']
    action_celue = test_config['action_celue']
    pose_model_type = test_config['pose_model_type']
    is_generate_labelmepose = test_config.get('is_generate_labelmepose', False)  # 是否生成标注文件 lamemeposex
    is_save_img = test_config.get('is_save_img', False)

    is_compare_annotation = test_config.get('is_compare_annotation', False)

    print(img_glob)
    ls = glob.glob(img_glob)
    pose_predict = get_yolov8_pose_predict(test_config)

    # save_dir = f'{save_dir}/{action_celue}'
    if os.path.exists(save_dir) and os.path.basename(save_dir)=='temp':
        shutil.rmtree(save_dir)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    os.startfile(save_dir)

    for ind, i in enumerate(ls):
        img = cv2.imread(i)
        # print(i)
        # print(img)
        # try:
        img_show, out = pose_predict(img, pose_img_size=pose_img_size ,realtime_conf_thres=0.5, iou_thres=0.2, isShowMask=False)
        # print(out)
        # ind_str = str(ind).zfill(6)
        filename = os.path.basename(i) # 00100.jpg
        ind_str = os.path.splitext(filename)[0] # 00100
        if is_save_img:
            show_img_save_dir = os.path.join(save_dir, 'show_img')
            if not os.path.exists(show_img_save_dir):
                os.mkdir(show_img_save_dir)
            cv2.imwrite(f'{show_img_save_dir}/{ind_str}+pose_{pose_model_type}+cls_{cls_model_type}+{action_celue}.jpg', img_show)

        if is_generate_labelmepose:
            annotated_save_dir = os.path.join(save_dir, 'annotated')
            if not os.path.exists(annotated_save_dir):
                os.mkdir(annotated_save_dir)
            labelmepose_content = yoloPoseInfer2labelmePose(out,img_width=img.shape[1], img_height=img.shape[0])
            # cv2.imwrite(f'{annotated_save_dir}/{ind_str}.jpg', img)  # 原图
            if is_save_img:
                cv2.imwrite(f'{annotated_save_dir}/{ind_str}.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 85])  # 原图

            json_path = f'{annotated_save_dir}/{ind_str}.json'
            labelmepose_content['imagePath'] = f'{ind_str}.jpg'  # todo
            with open(json_path, 'w') as json_file:
                json.dump(labelmepose_content, json_file, indent=4)  # 标注
        # except:
        #     print(f'error {i}')

    if is_compare_annotation:
        from compare_annotation import compare_annotation
        compare_annotation(test_config)

    print(f'saved to {save_dir}')
#
def yoloPoseInfer2labelmePose(out, **kwargs):
    # out: [[xyxy], cls_ind, clsname, conf, kps_m3][list(1 * 4), int, str, float, numpy(1 * m * 3)]

    # labelmePose

    # {
    #     "version": "5.5.0",
    #     "flags": {},
    #     "shapes": [
    #         {
    #             "label": "p",
    #             "points": [
    #                 [
    #                     1179.5121951219514,
    #                     621.1382113821138
    #                 ],
    #                 [
    #                     1503.089430894309,
    #                     1400.8130081300812
    #                 ]
    #             ],
    #             "group_id": null,
    #             "description": "",
    #             "shape_type": "rectangle",
    #             "flags": {},
    #             "mask": null
    #         },
    #         {
    #             "label": "pt",
    #             "points": [
    #                 [
    #                     1340.3030303030305,
    #                     972.2222222222223
    #                 ]
    #             ],
    #             "group_id": null,
    #             "description": "",
    #             "shape_type": "point",
    #             "flags": {},
    #             "mask": null
    #         },
    #         {
    #             "label": "pt",
    #             "points": [
    #                 [
    #                     1303.4343434343436,
    #                     873.7373737373738
    #                 ]
    #             ],
    #             "group_id": null,
    #             "description": "",
    #             "shape_type": "point",
    #             "flags": {},
    #             "mask": null
    #         },
    #         {
    #             "label": "pt",
    #             "points": [
    #                 [
    #                     1402.929292929293,
    #                     960.1010101010102
    #                 ]
    #             ],
    #             "group_id": null,
    #             "description": "",
    #             "shape_type": "point",
    #             "flags": {},
    #             "mask": null
    #         },
    #         {
    #             "label": "pt",
    #             "points": [
    #                 [
    #                     1360.5050505050506,
    #                     784.8484848484849
    #                 ]
    #             ],
    #             "group_id": null,
    #             "description": "",
    #             "shape_type": "point",
    #             "flags": {},
    #             "mask": null
    #         },
    #     ],
    #     "imagePath": "0.jpg",
    #     "imageData": null,
    #     "imageHeight": 1440,
    #     "imageWidth": 5120
    # }
    img_width = kwargs.get('img_width',5120)
    img_height = kwargs.get('img_height',1440)
    labelmepose_content = {
        "version": "5.5.0",
        "flags": {},
        "imagePath": "",
        "imageData": None,
        "imageHeight": img_height,
        "imageWidth": img_width,
        "shapes": []
    }
    for group_id, ([x1, y1, x2, y2], cls_ind, clsname, conf, kps_m3, act_name) in enumerate(out):
        # 矩形框
        rect_shape_dict = {}
        rect_shape_dict['group_id'] = group_id
        rect_shape_dict['shape_type'] = 'rectangle'
        act_name = act_name.split(' ')[0]
        # act_name = 'Place' if act_name == 'IP' else 'NoPlace'
        # act_name = 'baggage_place' if act_name == 'IP' else 'none_action'
        # rect_shape_dict['label'] = f"{clsname}_{act_name}" # person_Place
        rect_shape_dict['label'] = act_name
        rect_shape_dict['points'] = [[int(x1), int(y1)], [int(x2), int(y2)]]
        rect_shape_dict['flags'] = {}
        rect_shape_dict['description'] = ''
        rect_shape_dict['mask'] = None
        labelmepose_content['shapes'].append(rect_shape_dict)

        # kps
        kp_inds = [5, 6, 7, 8, 9, 10]
        kp_names = ['left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist']
        # kp_names = ['right_shoulder', 'left_shoulder', 'right_elbow', 'left_elbow', 'right_wrist', 'left_wrist']
        for i in range(len(kp_inds)):
            kp_shape_dict = {}
            kp_ind = kp_inds[i]
            kp_name = kp_names[i]
            kp_shape_dict['shape_type'] = 'point'
            kp_shape_dict['label'] = kp_name
            kp_shape_dict['points'] = [[int(kps_m3[kp_ind][0]), int(kps_m3[kp_ind][1])]]
            kp_shape_dict['group_id'] = group_id
            kp_shape_dict['flags'] = {}
            kp_shape_dict['description'] = ''
            kp_shape_dict['mask'] = None
            labelmepose_content['shapes'].append(kp_shape_dict)
    return labelmepose_content


if __name__ == '__main__':
    # ttest_get_detect_onepic()
    root_path = r'D:\DATA\20250519RENBAO'
    # root_path = r'/home/ps/zhangxiancai/data/231207huoni/'
    try:
        # ttest_dir(model_path=rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
        #           img_glob=rf'D:\DATA\20250519RENBAO\trainV8Pose_people\add_imgs\test10\*.jpg',
        #           save_dir=rf"{root_path}/temp")

        # ttest_dir(model_path=rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
        #           img_glob=rf'D:\DATA\20250519RENBAO\caitu\t23724-2025-05-13_16-48-30.mp4_frames\*.jpg',
        #           save_dir=rf"{root_path}/temp")

        # action_celue = 'v2'  # 点线图
        # action_celue = 'v1'  # 基于角度判断
        # action_celue = 'v3'  # 点线图+抠图
        # action_celue = 'v4' # 仅yolopose

        # test_config = {
        #     'pose_model_path': rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
        #     'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people\weights\best.pt", # v2
        #     'save_dir': rf"{root_path}/temp",
        #     'img_glob': rf'D:\DATA\20250519RENBAO\caitu\t23724-2025-05-13_16-48-30.mp4_frames\*.jpg',
        #     'action_celue': 'v2'
        # }

        # test_config = {
        #     'pose_model_path': rf"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8nPose_640_people_20250526105801\weights\best.pt",
        #     'pose_model_type': 'user',
        #     'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250523172153\weights\best.pt",
        #     'cls_model_type': 'img10',
        #     # v3
        #     'save_dir': rf"{root_path}/temp",
        #     'img_glob': rf'D:\DATA\20250519RENBAO\caitu\t23724-2025-05-13_16-48-30.mp4_frames\*.jpg',
        #     'action_celue': 'v4'
        # }

        # test_config = {
        #     'pose_model_path': rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
        #     'pose_model_type': 'official',
        #     # 'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250523172153\weights\best.pt",
        #     'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250526170414\weights\best.pt",
        #     'cls_model_type': 'img10_aug',
        #     # v3
        #     'save_dir': rf"{root_path}/temp",
        #     'img_glob': rf'D:\DATA\20250519RENBAO\zxc\24\t23724-2025-05-13_16-48-30.mp4_frames\*.jpg',
        #     'action_celue': 'v3'
        # }

        #
        # test_config = {
        #     'pose_model_path': rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
        #     'pose_model_type': 'official',
        #     'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250526175042\weights\best.pt",
        #     # 'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250526170414\weights\best.pt",
        #     'cls_model_type': 'img10',
        #     # v3
        #     'save_dir': rf"{root_path}/temp",
        #     'img_glob': rf'D:\DATA\20250519RENBAO\caitu\t23724-2025-05-13_16-48-30.mp4_frames\*.jpg',
        #     'action_celue': 'v3'
        # }

        # 大相机
        # test_config = {
        #     'pose_model_path': rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
        #     'pose_model_type': 'official',
        #     'pose_img_size': 5120,
        #     # 'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250523172153\weights\best.pt",
        #     # 'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250530_153218_yolov8_cls__cls_format_data_img100_200_cls7_aug5\weights\best.pt",
        #     # 'cls_model_path':r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250603_094449_yolov8_cls__cls_format_data_img100_200_cls7_aug3_pad\weights\best.pt",
        #     'cls_model_path':r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250605_150717_yolov8_cls__cls_format_data_img100_200_cls7_augMean3_padSquare_nodraw\weights\best.pt",
        #     'cls_model_type': 'img100_200_cls7_augmean3_pad',
        #     # v3
        #     'save_dir': rf"{root_path}/temp",
        #     'img_glob': rf'D:\DATA\20250519RENBAO\caitu\pose_test_dataset250-labelme\*.jpg',
        #     # 'img_glob': r'D:\WeChat\xwechat_files\wxid_ig5xjsfld7cp22_08e6\msg\file\2025-06\JPEGImages2\JPEGImages2\*.jpg',
        #     # 'img_glob': rf'D:\DATA\20250519RENBAO\caitu\20250527_annotated_100_200\*.jpg',
        #     'action_celue': 'v3',
        #     'is_generate_labelmepose':  True,
        #     'is_save_img': True,
        #     # 'is_compare_annotation': True,
        #     # 'true_annotation_dir': r'D:\DATA\20250519RENBAO\caitu\pose_test_dataset250-labelme',
        #     # 'pre_annotation_dir': rf'D:\DATA\20250519RENBAO\temp\annotated',
        #
        # }
        cls_model_type = 'yolov8nCls_320_closePeople_20250610_103144_cls_format_data__labelmepose400_cls4_augMean3_padSquare'
        # cls_model_type = 'frontCamImg0_100_cls4_augMean3_padSquare_ttest_frontCam_testdata200'
        # cls_model_type = 'frontCamImg0_100_cls4_augMean3_padSquareX_ttest_frontCam_testdata200'
        # cls_model_type = 'bigCamImg100_200_cls7_augmean3_pad'
        test_config = {
            'pose_model_path': rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt",
            'pose_model_type': 'official',
            'pose_img_size': 640,
            # 'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_people\models\yolov8sCls_320_people_20250605_150717_yolov8_cls__cls_format_data_img100_200_cls7_augMean3_padSquare_nodraw\weights\best.pt",
            'cls_model_path': r"D:\DATA\20250519RENBAO\trainV8Pose_closePeople\models\yolov8nCls_320_closePeople_20250610_103144_cls_format_data__labelmepose400_cls4_augMean3_padSquare\weights\best.pt",

            'cls_model_type': cls_model_type,
            # v3
            'save_dir': rf"{root_path}/temp/{time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))}_{cls_model_type}",
            'img_glob': r'D:\DATA\20250519RENBAO\trainV8Pose_closePeople\caitu\64\frontCam_testdata200_t23721-2025-06-04_09-57-50.mp4_frames\*.jpg',
            # 'img_glob':r'D:\DATA\20250519RENBAO\trainV8Pose_closePeople\caitu\64\midCam_testdata250_t23725-2025-06-04_09-57-50.mp4_frames\*.jpg',
            # 'img_glob': r'D:\DATA\20250519RENBAO\trainV8Pose_closePeople\caitu\513\front_cam_t23721-2025-05-13_16-48-43.mp4_frames\*.jpg',
            'action_celue': 'v3',
            'is_generate_labelmepose':  True,
            'is_save_img': True,

            'is_compare_annotation': True,
            # 'true_annotation_dir': r'D:\DATA\20250519RENBAO\caitu\pose_test_dataset250-labelme',
            # 'pre_annotation_dir': r'D:\DATA\20250519RENBAO\temp\annotated',

        }

        ttest_dir(test_config)
    except:
        print(traceback.format_exc())
