'''
yolov8 实例分割推理
'''
import glob
import json
import os
import time

import cv2
import numpy as np

from ultralytics import YOLO  # v8.1




def get_yolov8_seg_predict(model_path='yolov8n-seg.pt'):
    # model_path = 'yolov8n.pt'
    if isinstance(model_path, YOLO):
        model = model_path
    else:
        model = YOLO(model_path)

    # model.predictor(imgsz=320, conf=0.5)

    def yolov8_seg_predict(img, imgsz=640, realtime_conf_thres=0.5, iou_thres=0.45, roi=None, pad=0, isShowMask=False):
        '''
        推理单张图片， 可roi检测
        Args:
            img: 2048*3072*3 原图
            realtime_conf_thres:
            iou_thres:
            roi: [xyxy] 是否roi
            pad: 填充
i           isShowMask: 是否画mask
        Returns:
            show_img:
            out: [[xyxy],cls_ind, clsname,conf,mask] [list(1*4),int,str,float,numpy(1*h*w)]

        '''
        H, W = img.shape[:2]
        if roi is not None:
            x1, y1, x2, y2 = roi
            roi = [int(max(x1 - pad, 0)), int(max(y1 - pad, 0)), int(min(x2 + pad, W)), int(min(y2 + pad, H))]

        img2 = img[roi[1]:roi[3], roi[0]:roi[2], ...] if roi is not None else img
        t0 = time.time()
        results = model.predict(img2, imgsz=imgsz, conf=realtime_conf_thres, iou=iou_thres,
                                device='0', retina_masks=True)  # retina_masks 高分辨率mask
        print(f'3333333333333333333 {time.time() - t0}')
        result = results[0]  # one img
        boxes = result.boxes  # Boxes object for bounding box outputs
        masks = result.masks  # Masks object for segmentation masks outputs  n*448*640
        # keypoints = result.keypoints  # Keypoints object for pose outputs
        # probs = result.probs  # Probs object for classification outputs
        # result.show()  # display to screen
        # result.save(filename='result.jpg')  # save to disk
        boxes = boxes.cpu().numpy()
        if boxes.shape[0] == 0:
            return img.copy(), []
        masks = masks.cpu().numpy()  # 当无目标时masks = None
        # print(masks.shape)
        out = []
        for i in range(boxes.shape[0]):
            xyxy = boxes.xyxy[i, :].tolist()
            conf = boxes.conf[i]
            cls_ind = int(boxes.cls[i])
            cls_name = result.names[cls_ind]
            mask = masks[i].data  # numpy 1*h*w  0.0 or 1.0
            if roi is not None:  # 缩放回原图
                xyxy[0] += roi[0]
                xyxy[1] += roi[1]
                xyxy[2] += roi[0]
                xyxy[3] += roi[1]
                mask = np.pad(mask, ((0, 0), (roi[1], H - roi[3]), (roi[0], W - roi[2])), 'constant',
                              constant_values=(0, 0))  # 1*h*w 填充
            out.append([xyxy, cls_ind, cls_name, conf, mask])
        print(f'444444444444444444 {time.time() - t0}')
        # draw
        show_img = img.copy()
        colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 255, 0), (255, 0, 255)]
        img_diag = np.sqrt(show_img.shape[0] ** 2 + show_img.shape[1] ** 2)
        # print(img_diag)
        for ind, (xyxy, cls_ind, cls_name, conf, mask) in enumerate(out):
            if isShowMask:
                # print(mask.shape)
                # mask = mask.repeat(3, axis=0).transpose(1, 2, 0)  # 3*h*w 彩色  耗时
                mask = mask.reshape(mask.shape[1], mask.shape[2], 1).repeat(3, axis=2)  # hw1 to 3*h*w 彩色  耗时
                # mask = show_img
                # show_img = (show_img + mask * colors[cls_ind%len(colors)] * 0.4).clip(0, 255).astype(np.uint8) # 同类别同色
                show_img = (show_img + mask * colors[ind % len(colors)] * 0.4).clip(0, 255).astype(np.uint8)
                # show_img = show_img.getUMat()

            px1, py1, px2, py2 = int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])
            # print(show_img.shape)
            cv2.putText(show_img, f'{cls_ind}_{cls_name}_{str(np.round(boxes.conf[i], 2))}', (px1, py1),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, max(2, int(img_diag * 0.0002)), (0, 255, 0),
                        max(2, int(img_diag * 0.0002)))
            # cv2.rectangle(show_img)
            cv2.rectangle(show_img, (px1, py1), (px2, py2), (0, 255, 0), max(2, int(img_diag * 0.001)))
        if roi is not None:
            cv2.rectangle(show_img, (roi[0], roi[1]), (roi[2], roi[3]), (255, 0, 0), max(2, int(img_diag * 0.001)))
        print(f'555555555555555 {time.time() - t0}')
        return show_img, out

    return yolov8_seg_predict




def ttest_get_detect_onepic():
    img = cv2.imread(r"D:\data\231207huoni\trainV8Seg_cable\add_imgs\20240104\Image_20240104150413321.jpg")
    model_path = r"D:\data\231207huoni\trainV8Seg_cable\models\640_cable\weights\best.pt"
    detect_onepic = get_yolov8_seg_predict(model_path)
    img_show, out = detect_onepic(img, 0.25)
    cv2.imwrite(r'D:\data\231207huoni\test_data\1.jpg', img_show)
    # print(out)


def ttest_dir(test_config,) :
    # from ultralytics import YOLO
    # test_dir = r"D:\data\231215安全带\trainV8Det_flball_blue\format_data\images\train"
    # # Load a pretrained YOLOv8n model
    # model_path = r"D:\data\231215安全带\trainV8Det_flball_blue\models\train7\weights\best.pt"
    # model = YOLO(model_path)
    # # Run inference on 'bus.jpg' with arguments
    # model.predict(test_dir, save=True, imgsz=640, conf=0.5, show=True)


    save_dir = test_config.get('save_dir', '')
    is_save_img = test_config.get('is_save_img', False)
    is_generate_labelmepose = test_config.get('is_generate_labelmepose', False)
    model_path = test_config.get('model_path', '')
    img_glob = test_config.get('img_glob', '')
    imgsz = test_config.get('imgsz', 640)


    # model_path='', img_glob='', save_dir='')
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    ls = glob.glob(img_glob)
    yolov8_seg_predict = get_yolov8_seg_predict(model_path)
    for ind, i in enumerate(ls):
        print(f"{ind}/{len(ls)} {i}")
        img = cv2.imread(i)
        # print(i)
        # print(img)
        # try:
        img_show, out = yolov8_seg_predict(img, imgsz=imgsz, realtime_conf_thres=0.25, isShowMask=True)
        # par_name = os.path.basename(os.path.dirname(i))
        # cv2.imwrite(f'{save_dir}/{par_name}_{str(ind).zfill(6)}.jpg', img_show)
        # except:
        #     print(f'error {i}')

        filename = os.path.basename(i)  # 00100.jpg
        ind_str = os.path.splitext(filename)[0]  # 00100
        # 保存结果图片
        if is_save_img:
            show_img_save_dir = os.path.join(save_dir, 'show_img')
            if not os.path.exists(show_img_save_dir):
                os.mkdir(show_img_save_dir)
            cv2.imwrite(f'{show_img_save_dir}/{ind_str}_res.jpg',
                        img_show)
        # 保存为labelme标注
        if is_generate_labelmepose:
            annotated_save_dir = os.path.join(save_dir, 'annotated')
            if not os.path.exists(annotated_save_dir):
                os.mkdir(annotated_save_dir)
            labelmepose_content = yoloSegInfer2labelmeBox(out, img_width=img.shape[1], img_height=img.shape[0])
            # cv2.imwrite(f'{annotated_save_dir}/{ind_str}.jpg', img)  # 原图
            if is_save_img:
                cv2.imwrite(f'{annotated_save_dir}/{ind_str}.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 85])  # 原图

            json_path = f'{annotated_save_dir}/{ind_str}.json'
            labelmepose_content['imagePath'] = f'{ind_str}.jpg'  # todo
            with open(json_path, 'w') as json_file:
                json.dump(labelmepose_content, json_file, indent=4)  # 标注


def yoloSegInfer2labelmeBox(out, **kwargs):

    img_width = kwargs.get('img_width',5120)
    img_height = kwargs.get('img_height',1440)
    labelmepose_content = {
        "version": "5.5.0",
        "flags": {},
        "imagePath": "",
        "imageData": None,
        "imageHeight": img_height,
        "imageWidth": img_width,
        "shapes": []
    }

    # [[xyxy],cls_ind, clsname,conf,mask] [list(1*4),int,str,float,numpy(1*h*w)]
    for group_id, ([x1, y1, x2, y2], cls_ind, clsname, conf, mask) in enumerate(out):
        # 矩形框
        rect_shape_dict = {}
        rect_shape_dict['group_id'] = group_id
        rect_shape_dict['shape_type'] = 'rectangle'
        rect_shape_dict['label'] = clsname
        rect_shape_dict['points'] = [[int(x1), int(y1)], [int(x2), int(y2)]]
        rect_shape_dict['flags'] = {}
        rect_shape_dict['description'] = ''
        rect_shape_dict['mask'] = None
        labelmepose_content['shapes'].append(rect_shape_dict)

    return labelmepose_content

if __name__ == '__main__':
    # ttest_get_detect_onepic()
    # root_path = r'D:\data\231207huoni'
    # root_path = r'/home/ps/zhangxiancai/data/231207huoni/'
    # ttest_dir(model_path=rf"D:\DATA\20250611HKBZ\trainV8Seg_allv2\models\yolov8mSeg_416_allv2\weights\best.pt",
    #           img_glob=rf'D:\DATA\20250611HKBZ\trainV8Seg_allv2\format_data\images\val\*.jpg',
    #           save_dir=rf"D:\DATA\20250611HKBZ\temp\{time.strftime('%Y%m%d%H%M%S')}")

    # ttest_dir(model_path=rf"D:\DATA\20250611HKBZ\trainV8Seg_allv2\models\yolov8mSeg_416_allv2\weights\best.pt",
    #           # img_glob=rf'D:\DATA\20250611HKBZ\caitu\测试数据集\7-固定电源线接上\*.jpg',
    #           img_glob=rf'D:\DATA\20250611HKBZ\caitu\20250613_dianyuanxian_val\*.jpg',
    #           save_dir=rf"D:\DATA\20250611HKBZ\temp\{time.strftime('%Y%m%d%H%M%S')}_imgsz640_testdata")

    test_name = 'YoloV8mSeg_train640_DBclip_ep200_infer1280'
    test_config = {

        # 'model_path': r"D:\DATA\20250611HKBZ\trainV8Seg_allv2\models\yolov8mSeg_416_allv25\weights\best.pt",
        # 'model_path': r"D:\DATA\20250611HKBZ\trainV8Seg_allv2\models\yolov8mSeg_416_allv24\weights\best.pt", # clip
        'model_path': r"D:\DATA\20250611HKBZ\trainV8Seg_allv2\models\yolov8mSeg_640_allv22\weights\best.pt", # 200epoch 640
        'save_dir': rf"D:\DATA\20250611HKBZ\temp\{time.strftime('%Y%m%d%H%M%S')}_{test_name}",
        # 'img_glob': r'D:\DATA\20250611HKBZ\caitu\20250614_testdata_117_rulev1_HKJK202411\*.jpg',
        'img_glob': r'D:\DATA\20250611HKBZ\caitu\before_20250619\testdata170\*.jpg',
        'imgsz': 1280,

        'is_generate_labelmepose': True,
        'is_save_img': True,

        'is_compare_annotation': True,
        # 'true_annotation_dir': r'D:\DATA\20250519RENBAO\caitu\pose_test_dataset250-labelme',
        # 'pre_annotation_dir': r'D:\DATA\20250519RENBAO\temp\annotated',

    }

    ttest_dir(test_config)