from tqdm import tqdm
from enum import Enum
from pathlib import Path
import torch
import re
import os
import cv2
import sys
import shutil
import glob
import random
import math
import time
import toml
import numpy as np
import hashlib
from multiprocessing import Process

try:
    sys.path.append('/opt/MVS/Samples/64/Python/MvImport')
    from HKCamera import HKCamera, Camera_state
except:
    pass

class_name = ['S', '1', '2', '3', '4', '5', 'O', 'B']
tsize_name = ['S', 'B', 'CS', 'CB']
color_name = ['B', 'R', 'N', 'P']
window_code = -1
manual_rectangle = None
vision_enhancement = False

PLAYER_DIR = 'Player'
EXTENSIONS = 'tools_extension'
LABELMASTER = '/home/ubuntu/LabelMaster/build/LabelMaster'
YOLOV5_PATH = '/home/ubuntu/yolov5-RM_h_test'
VIDEO_FORMATS = {'.mp4', '.avi', '.MP4'}
IMAGE_FORMATS = {'.jpg', '.png', '.bmp', '.jpeg', '.JPG', '.PNG', '.BMP'}
ARROW_MODE_ABBREVIATION = ['C', 'C2', 'R']
CAL_V_MODE_ABBREVIATION = ['D', 'M', 'N']
PLAYING_MODE_ABBREVIATION = ['S', 'P', 'L', 'F']
CLASS23_TRANS = torch.tensor([0, 0, 0, 1, 1, 1, 0, 1])
UNIFORM_IMAGE_SIZE = [100, 100]
FONT_FACE = cv2.FONT_HERSHEY_COMPLEX
FOURCC = cv2.VideoWriter_fourcc(*'XVID')
DATA_ENHANCEMENT_DEFAULT_ARGS = [0, 1, 1, None, 1, 0, 0, 0]
LOACL_FASTPATH = []
DEFAULT_WINDOW_NAME = 'hh'
DEFAULT_TVA = 100
DEFAULT_FPS = 30
DEFAULT_EXPOSURE_TIME = 3000
DEFAULT_GAIN = 10
DEFAULT_VIDEO = 'BeiLiZhu.avi'
DEFAULT_DATASET = 'D419C'
EPS = 1e-9

dirname = os.path.dirname(cv2.__file__)#获取 OpenCV 库的路径
plugin_path = os.path.join(dirname, 'qt', 'plugins', 'platforms')#将插件路径设置为 Qt 目录下的 plugins/platforms 目录
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = plugin_path#将环境变量 QT_QPA_PLATFORM_PLUGIN_PATH 设置为插件路径

def load_local_configure():
    """加载本地配置文件"""
    if Path(EXTENSIONS + '/configure.toml').exists():
        global LABELMASTER
        global LOACL_FASTPATH
        global YOLOV5_PATH
        user_configure = toml.load(EXTENSIONS + '/configure.toml')
        LABELMASTER = user_configure['LABELMASTER']
        LOACL_FASTPATH = user_configure['LOACL_FASTPATH']
        YOLOV5_PATH = user_configure['YOLOV5_PATH']

load_local_configure()

class Label_Format(Enum):
    """数据集的格式"""
    Auto = 0
    Standard = 1
    Old_Standard = 2
    LabelMaster = 3
    Yolov5 = 4
    SJ_old = 5
    SJ = 6
    SJ_wind = 7
    Wind = 8
    Wind_v5 = 9
    Wind_4P = 10
    LabelMaster_old = 11
    LabelMaster_wind = 12
    Yolov5_wind = 13
    Engineer = 14
    Anti_wind = 15

class Net_Format(Enum):
    """神经网络的类型"""
    Null = -1
    Auto = 0
    Yolov8 = 1
    Yolov5 = 2

class Camera_type(Enum):
    """相机类型"""
    Null = -1
    camera = 0
    HKCamera = 1

class Font(Enum):
    """字体类型"""
    Null = -1
    Highlight = 1
    Half_Light = 2
    Italic = 3
    Underline = 4
    Flicker = 5
    FRed = 31
    FGreen = 32
    FYellow = 33
    FBlue = 34
    FPurple = 35
    FWhite = 37
    
class Log_sign(Enum):
    """日志类型"""
    Null = -1
    Info = 1
    Warning = 2
    Error = 3
    Require = 4
    Debug = 5

class Player_sign(Enum):
    """数据的来源类型"""
    Null = -1
    Datasets = 1
    Video = 2
    Camera = 3

class Arrow_mode(Enum):
    """小键盘模式"""
    Null = -1
    Player_Control = 0
    Player_Control2 = 1
    Manual_Rectangle = 2

class Cal_V_mode(Enum):
    """计算亮度的方法"""
    default = 0
    manual = 1
    net = 2
    
class Playing_mode(Enum):
    """放映的状态"""
    Per_Frame = -1
    Stop = 0
    Playing = 1
    Loop = 2

class LabelMaster_mode(Enum):
    """LabelMaster的模式"""
    Armor = 0
    Wind = 1
    Engineer = 2
    Anti_wind = 3

class Data_enhancement_sign(Enum):
    """数据增强的类型"""
    H = 0
    S = 1
    V = 2
    TVA = 3
    scale = 4
    rotate = 5
    down_cover = 6
    random_geometry = 7

class Data_enhancement_configure:
    """用于保存数据增强参数的类"""
    def __init__(self) -> None:
        self.H = DATA_ENHANCEMENT_DEFAULT_ARGS[0]
        self.S = DATA_ENHANCEMENT_DEFAULT_ARGS[1]
        self.V = DATA_ENHANCEMENT_DEFAULT_ARGS[2]
        self.TVA = DATA_ENHANCEMENT_DEFAULT_ARGS[3]
        self.scale = DATA_ENHANCEMENT_DEFAULT_ARGS[4]
        self.rotate = DATA_ENHANCEMENT_DEFAULT_ARGS[5]
        self.down_cover = DATA_ENHANCEMENT_DEFAULT_ARGS[6]
        self.random_geometry = DATA_ENHANCEMENT_DEFAULT_ARGS[7]

    def reverse(self):
        """反转数据增强的参数
        
        Returns:
            Data_enhancement_configure: 反转后的参数
        """
        rc = Data_enhancement_configure()
        rc.H = self.H * 2 - DATA_ENHANCEMENT_DEFAULT_ARGS[0]  # value
        rc.S = 1 / (self.S + EPS)  # rate
        rc.V = 1 / self.V  # rate
        rc.scale = 1 / self.scale  # rate
        rc.rotate = self.rotate * 2 - DATA_ENHANCEMENT_DEFAULT_ARGS[5]  # value
        return rc

    def __getitem__(self, key):
        """"
        通过下标获取数据增强的参数。
        
        Args:
            key (Data_enhancement_sign): 数据增强的类型。
        
        Returns:
            float: 数据增强的参数。
        """
        return eval("self." + Data_enhancement_sign(key).name)
    
class Recoder:
    """用于记录视频或图像的类"""
    def __init__(self, project_dir:str, name:str, FPS=30) -> None:
        self.img = None
        self.name = name
        self.frame = None
        self.project_dir = project_dir
        self.FPS = FPS
        self.path = None
        self.video_writer = None
        self.save_path = None
        self.save_code = 0
        self.shot_code = 0
        self.shot_images = []
        self.window_name = None
        self.updated = False
        self.showing = False
        self.showed = True
        self.info = ''
    
    def __del__(self):
        self.stop_save()
        self.set_showing(False)
    
    def setup_save(self, suffix=None, as_video=True):
        """""""""
        设置保存的参数。
        
        Args:
            suffix (str, optional): 保存的文件名后缀。默认为None。
            as_video (bool, optional): 是否保存为视频。默认为True。
        """""""""
        save_path = f'{self.project_dir}/{self.window_name}{f"_{suffix}" if suffix is not None else ""}'
        if as_video:
            if self.video_writer is not None:
                log('please stop last recording before start a new one', Log_sign.Warning)
                return
            self.video_writer = cv2.VideoWriter(get_unique_path(f'{save_path}.avi'), FOURCC, self.FPS, (self.img.shape[1], self.img.shape[0]))
        else:
            if self.save_path is not None:
                log('please stop last recording before start a new one', Log_sign.Warning)
                return
            self.save_path = save_path
            self.save_code = 0
            mkdir(save_path)

    def stop_save(self):
        """停止保存"""
        self.save_path = None
        self.save_code = None
        if self.video_writer is not None:
            self.video_writer.release()
            self.video_writer = None

    def save(self):
        """保存当前帧"""
        if self.video_writer is not None:
            self.video_writer.write(self.img)
        if self.save_path is not None:
            if self.frame is None:
                path = f'{self.save_path}/{self.save_code}.png'
                self.save_code += 1
            else:
                path = f'{self.save_path}/{self.frame}.png'
            cv2.imwrite(path, self.img)

    def update(self, img:np.array, source_name=None, frame=None):
        """
        更新当前帧。
        
        Args:
            img (np.array): 目标图像。
            source_name (str, optional): 数据集的名称。默认为None。
            frame (int, optional): 当前帧的装甲板。默认为None。
        """
        if source_name is not None:
            self.set_window_name(source_name)
        self.img = img.copy()
        self.frame = frame
        self.updated = True

    def show(self, size_mul=1, vision_enhancement=False):
        """
        显示当前帧。
        
        Args:
            size_mul (float, optional): 显示窗口的尺寸倍数。默认为1。
            vision_enhancement (bool, optional): 是否开启视觉增强。默认为False。
        """
        if self.window_name is None:
            log('please set source name before showing', Log_sign.Error)
            return
        if self.showing:
            img = self.get_image()
            img = cv2.resize(img, (int(img.shape[1] * size_mul), int(img.shape[0] * size_mul)))
            cv2.putText(img, self.info, (0, 15), FONT_FACE, 0.5, (255, 255, 255))
            if vision_enhancement and manual_rectangle is not None:
                cv2.rectangle(img, (int(max(0, manual_rectangle[0] - manual_rectangle[2] - 1) * size_mul), int(max(0, manual_rectangle[1] - manual_rectangle[3] - 1) * size_mul)), (int(min(img.shape[1], manual_rectangle[0] + manual_rectangle[2] + 1) * size_mul), int(min(img.shape[0], manual_rectangle[1] + manual_rectangle[3] + 1) * size_mul)), (255, 255, 255), 1)
            cv2.imshow(self.window_name, img)
            self.showed = True
        else:
            self.destroyWindow()

    def get_image(self):
        """
        获取当前帧的图像
        
        Returns:
            np.array: 当前帧的图像
        """
        return self.img.copy()
    
    def set_window_name(self, source_name:str):
        """
        设置当前窗口的标题
        
        Args:
            source_name (str): 数据集的名称。
        """
        if self.window_name is None or self.window_name[:len(source_name)] != source_name:
            self.destroyWindow()
            self.window_name = f'{source_name}{f"_{self.name}" if len(self.name) else ""}'
            
    
    def set_showing(self, showing:bool):
        """
        设置当前的显示状态
        
        Args:
            showing (bool): 显示状态。
        """
        self.showing = showing
        self.destroyWindow()
    
    def destroyWindow(self):
        """销毁当前窗口"""
        if self.showed and self.window_name is not None:
            try:
                cv2.destroyWindow(self.window_name)
            except:
                pass
            self.showed = False
    
    def save_image(self, suffix:str):
        """
        保存当前帧的图像

        Args:
            suffix (str): 保存的文件名后缀。
        """
        cv2.imwrite(get_unique_path(f'{self.project_dir}/{self.window_name}{f"_{suffix}" if suffix is not None else ""}.png'), self.img)
    
    def get_saving_state(self):
        """
        获取当前是否正在保存

        Returns:
            bool: 是否正在保存
        """
        return self.video_writer is not None or self.save_path is not None
    
    def shot(self):
        """
        截图并且显示
        """
        cv2.imshow(f'<{self.shot_code}>{self.window_name}', self.img)
        self.shot_code += 1
        self.shot_images.append(self.get_image())
    
    def save_shot(self):
        """
        保存截图
        """
        for img in self.shot_images:
            cv2.imwrite(get_unique_path(f'[shot]{self.project_dir}/{self.window_name}.png'), img)
        self.shot_images.clear()


class Predictor(Recoder):
    """用于预测的类"""
    def show(self, size_mul=1, vision_enhancement=False):
        """
        显示当前帧。

        Args:
            size_mul (float, optional): 显示窗口的尺寸倍数。默认为1。 
            vision_enhancement (bool, optional): 是否开启视觉增强。默认为False。
        """
        self.save()
        return super().show(size_mul, vision_enhancement)

    def update(self, img: np.array, source_name=None, frame=None):
        """
        更新当前帧。
        
        Args:
            img (np.array): 目标图像。
            source_name (str, optional): 数据集的名称。默认为None。
            frame (int, optional): 当前帧的装甲板。默认为None。
        """
        super().update(img, source_name, frame)
        self.showing = False
        self.updated = False

    def predict(self, img:np.array, net, show_V=True, open_fix_prediction=True, label=None):
        """
        模型预测。

        Args:
            img (np.array): 当前帧的图像。
            net (Net): 模型。
            show_V (bool, optional): 是否显示亮度。默认为True。
            open_fix_prediction (bool, optional): 是否开启修正预测。默认为True。
            label (dataset, optional): 标签。默认为None。
        """
        pd = dataset(img, 'temp')
        pd.clear_label()
        net.predict(pd)
        self.updated = True
        self.draw_result(pd, (0, 200, 0), show_V=show_V)
        if open_fix_prediction:
            fix_prediction(pd)
            self.draw_result(pd, (200, 200, 200), show_V=show_V)
        if label is not None:
            if len(pd.keyPoints) and len(label.keyPoints):
                self.info += f" Dis:{calculate_distance(label, pd, img.shape):.2f}"

    def draw_result(self, data, color, scale_rate=1, show_V=True):
        """
        绘制预测结果。

        Args:
            data (dataset): 预测结果。
            color (tuple): 颜色。
            scale_rate (float, optional): 缩放比例。默认为1。
            show_V (bool, optional): 是否显示亮度。默认为True。
        """
        self.showing = True
        if isinstance(data, dataset) and len(data.cls) == 0 or isinstance(data, str):
            return
        kp = (data.keyPoints * torch.tensor([self.img.shape[1], self.img.shape[0]], device=data.keyPoints.device).repeat(data.keyPoints.shape[1] // 2) * scale_rate).round().int().tolist()
        for i in range(len(data.cls)):
            nk = len(kp[i]) // 2
            for kpi in range(nk):
                cv2.circle(self.img, (kp[i][kpi * 2], kp[i][kpi * 2 + 1]), 3, color, -1)
                cv2.line(self.img, (kp[i][kpi * 2], kp[i][kpi * 2 + 1]), (kp[i][((kpi + 1) % nk) * 2], kp[i][((kpi + 1) % nk) * 2 + 1]), color, 1)
            cv2.putText(self.img, f'{color_name[int(data.color[i])]}-[{tsize_name[int(data.tsize[i])]}]{class_name[int(data.cls[i])]}{f"<{data.conf[i]:.2f}>" if len(data.conf) else ""}{f"({Cal_V(self.img, cal_mode=Cal_V_mode.default, kps=data.keyPoints):.1f})" if show_V else ""}', (kp[i][0], kp[i][1]), FONT_FACE, 0.5, (255, 255, 255))


class Source(Recoder):
    """用于记录数据集的类"""
    def __init__(self, save_dir: str, name: str, FPS=30) -> None:
        super().__init__(save_dir, name, FPS)
        self.predictions = []
        self.prediction_names = []
    
    def append_prediction(self, prediction):
        """
        添加预测结果。

        Args:
            prediction (dataset): 预测结果。
        """
        self.predictions.append(prediction)
        self.prediction_names.append(prediction.name)
    
    def update(self, img: np.array, source_name=None, frame=None, nets=None):
        """
        更新当前帧。
        
        Args:
            img (np.array): 目标图像。
            source_name (str, optional): 数据集的名称。默认为None。
            frame (int, optional): 当前帧的装甲板。默认为None。
            nets (list, optional): 模型列表。默认为None。
        """
        super().update(img, source_name, frame)
        self.save()
        self.showing = True
        if nets is not None:
            for net in nets:
                try:
                    i = self.prediction_names.index(f'{self.name}_[{net.name}]')
                except:
                    self.append_prediction(Predictor(self.project_dir, f'{self.name}_[{net.name}]', self.FPS))
                    i = -1
                self.predictions[i].update(img, source_name, frame)
        else:
            try:
                i = self.prediction_names.index('label')
            except:
                self.append_prediction(Predictor(self.project_dir, 'label', self.FPS))
                i = -1
            self.predictions[i].update(img, source_name, frame)
    
    def predict(self, nets, show_V=True, open_fix_prediction=True, label=None):
        """
        模型预测。

        Args:
            nets (list): 模型列表。
            show_V (bool, optional): 是否显示亮度。默认为True。
            open_fix_prediction (bool, optional): 是否开启修正预测。默认为True。
            label (dataset, optional): 标签。默认为None。
        """
        if nets is not None:
            self.showing = False
            for net in nets:
                i = self.prediction_names.index(f'{self.name}_[{net.name}]')
                if not(i >= 0 and i < len(self.predictions)):
                    log(f'can\'t find {net.name} in {self.name}\' predictions', Log_sign.Error)
                self.predictions[i].predict(self.get_image(), net, show_V, open_fix_prediction, label)

    def show(self, size_mul=1, vision_enhancement=False):
        """
        显示当前帧。
        
        Args:
            size_mul (float, optional): 显示窗口的尺寸倍数。默认为1。
            vision_enhancement (bool, optional): 是否开启视觉增强。默认为False。
        """
        # if not(len(self.predictions) and self.predictions[0].updated):
        super().show(size_mul, vision_enhancement)
        # pop_list = []
        for i, prediction in enumerate(self.predictions):
            # if prediction.updated:
            prediction.show(size_mul, vision_enhancement)
        #     else:
        #         pop_list = [i] + pop_list
        # for i in pop_list:
        #     self.predictions.pop(i)
        #     self.prediction_names.pop(i)
    
    def set_source_name(self, source_name:str):
        """
        设置数据集的名称。

        Args:
            source_name (str): 数据集的名称。
        """
        self.source_name = source_name
        for prediction in self.predictions:
            prediction.source_name = source_name
    
    def reset_updating(self):
        """
        重新设置更新状态。
        """
        self.updated = False
        self.showing = False
        for prediction in self.predictions:
            prediction.updated = False
            prediction.showing = False

    def draw_result(self, data, color, scale_rate, show_V):
        """
        绘制预测结果。

        Args:
            data (dataset): 预测结果。
            color (tuple): 颜色。
            scale_rate (float, optional): 缩放比例。默认为1。
            show_V (bool, optional): 是否显示亮度。默认为True。
        """
        self.showing = False
        for prediction in self.predictions:
            prediction.draw_result(data, color, scale_rate, show_V)

    def set_info(self, info:str):
        """
        配置info相关

        Args:
            info (str): info
        """
        self.info = info
        for prediction in self.predictions:
            prediction.info = info

    def stop_save(self):
        """
        停止保存。
        """
        super().stop_save()
        for prediction in self.predictions:
            prediction.stop_save()
    
    def save(self):
        """
        保存当前帧。
        """
        super().save()
        for prediction in self.predictions:
            prediction.save()

    def setup_save(self, suffix=None, as_video=True, source=True, prediction=True):
        """""""""
        设置保存的参数。
        
        Args:
            suffix (str, optional): 保存的文件名后缀。默认为None。
            as_video (bool, optional): 是否保存为视频。默认为True。
            source (bool, optional): 是否保存原图。默认为True。
            prediction (bool, optional): 是否保存预测结果。默认为True。
        """""""""
        if source:
            super().setup_save(suffix, as_video)
        if prediction:
            for prediction in self.predictions:
                prediction.setup_save(suffix, as_video)
    
    def save_image(self, suffix):
        """
        保存当前帧的图像

        Args:
            suffix (str): 保存的文件名后缀。
        """
        super().save_image(suffix)
        for prediction in self.predictions:
            prediction.save_image()
    
    def get_saving_state(self):
        """
        获取当前是否正在保存
        """
        state = super().get_saving_state()
        for prediction in self.predictions:
            state = state or prediction.get_saving_state()
        return state

    def shot(self):
        """
        截图
        """
        super().shot()
        for prediction in self.predictions:
            prediction.shot()
    
    def save_shot(self):
        """
        保存截图
        """
        super().save_shot()
        for prediction in self.predictions:
            prediction.save_shot()


class Sources:
    """用于记录多个数据集的类"""
    def __init__(self, save_dir:str, FPS=30) -> None:
        self.sources = []
        self.source_names = []
        self.save_dir = save_dir
        self.FPS = FPS
        self.source_name = None
        self.source_frame = None
        self.nets = None
    
    def append(self, source:Source):
        """
        添加数据集。

        Args:
            source (Source): 数据集。
        """
        self.sources.append(source)
        self.source_names.append(source.name)
    
    def update_source_names(self):
        """
        更新数据集的名称列表。
        """
        self.source_names = [source.name for source in self.sources]
    
    def setup_update(self, source_name:str, source_frame, nets):
        """
        配置更新参数。

        Args:
            source_name (str): 数据集的名称。
            source_frame (int): 当前帧的装甲板。
            nets (list): 模型列表。
        """
        self.source_name = source_name
        self.source_frame = source_frame
        self.nets = nets
        for source in self.sources:
            source.reset_updating()
    
    def update(self, img:np.array, name:str):
        """
        更新当前帧。

        Args:
            img (np.array): 目标图像。
            name (str): 数据集的名称。
        """
        try:
            i = self.source_names.index(name)
        except:
            self.append(Source(self.save_dir, name, self.FPS))
            i = -1
        self.sources[i].update(img, self.source_name, self.source_frame, self.nets)
    
    def __getitem__(self, key):
        """
        通过下标获取数据集。

        Args:
            key (str): 数据集的名称。

        Returns:
            Source: 数据集。
        """
        try:
            return self.sources[self.source_names.index(key)]
        except:
            return None
    
    def show(self, size_mul=1, vision_enhancement=False):
        """
        显示当前帧。

        Args:
            size_mul (float, optional): 显示窗口的尺寸倍数。默认为1。
            vision_enhancement (bool, optional): 是否开启视觉增强。默认为False。
        """
        for i, source in enumerate(self.sources):
            if source.updated:
                source.show(size_mul, vision_enhancement)
            else:
                self.source_names.pop(i)
                self.sources.pop(i)
    
    def set_source_name(self, source_name:str):
        """
        设置数据集的名称。

        Args:
            source_name (str): 数据集的名称。
        """
        for source in self.sources:
            source.set_source_name(source_name)
    
    def draw_result(self, data, color, scale_rate, show_V):
        """
        绘制预测结果。

        Args:
            data (dataset): 预测结果。
            color (tuple): 颜色。
            scale_rate (float, optional): 缩放比例。默认为1。
            show_V (bool, optional): 是否显示亮度。默认为True。
        """
        for source in self.sources:
            source.draw_result(data, color, scale_rate, show_V)
    
    def predict(self, show_V=True, open_fix_prediction=True, label=None):
        """
        模型预测。

        Args:
            show_V (bool, optional): 是否显示亮度。默认为True。
            open_fix_prediction (bool, optional): 是否开启修正预测。默认为True。
            label (dataset, optional): 标签。默认为None。
        """
        for source in self.sources:
            source.predict(self.nets, show_V, open_fix_prediction, label)

    def set_info(self, info:str):
        """
        配置info相关

        Args:
            info (str): info
        """
        for source in self.sources:
            source.set_info(info)

    def get_main_window_name(self):
        """
        获取主窗口的名称。

        Returns:
            str: 主窗口的名称。
        """
        raw = self.__getitem__('raw')
        if raw is None:
            log('no raw', Log_sign.Error)
            return None
        if len(raw.predictions) and raw.predictions[0].showing:
            return raw.predictions[0].window_name
        else:
            return raw.window_name
    
    def save(self, source=True, prediction=True):
        """
        保存当前帧。
        
        Args:
            source (bool, optional): 是否保存原图。默认为True。
            prediction (bool, optional): 是否保存预测结果。默认为True。
        """
        for source in self.sources:
            source.save(source, prediction)
    
    def save_image(self, suffix):
        """
        保存当前帧的图像

        Args:
            suffix (str): 保存的文件名后缀。
        """
        for source in self.sources:
            source.save_image(suffix)

    def get_saving_state(self):
        """
        获取当前是否正在保存
        """
        return self.sources[0].get_saving_state()

    def setup_save(self, suffix=None, as_video=True, source=True, prediction=True):
        """
        设置保存的参数。
        
        Args:
            suffix (str, optional): 保存的文件名后缀。默认为None。
            as_video (bool, optional): 是否保存为视频。默认为True。
            source (bool, optional): 是否保存原图。默认为True。
            prediction (bool, optional): 是否保存预测结果。默认为True。
        """
        for source in self.sources:
            source.setup_save(suffix, as_video, source, prediction)
    
    def stop_save(self):
        """
        停止保存。
        """
        for source in self.sources:
            source.stop_save()
    
    def shot(self):
        """
        截图
        """
        for source in self.sources:
            source.shot()

    def save_shot(self):
        """
        保存截图
        """
        for source in self.sources:
            source.save_shot()

class Player_Core:
    """数据处理的核心类,Datasets,Video,Camera都继承自它"""
    def __init__(self, net, name='Unknown') -> None:
        self.nets = []
        self.data = []
        self.stage = []
        self.name = name
        self.acc_rate = None
        self.last_image = None
        self.playing = Playing_mode.Playing
        self.show_label = True
        self.video_code = 0
        self.project_dir = None
        self.player_sign = Player_sign.Null
        self.load_net(net)
        mkdir(PLAYER_DIR)
        

    def get_img(self, frame):
        pass

    def load(self, path, label_format=Label_Format.Auto):
        pass
    
    def open_current_file(self, choice=0, frame=0):
        pass
    
    def load_net(self, path, label_format=Label_Format.Auto, net_format=Net_Format.Auto, conf=0.7, iou=0.4):
        """
        载入模型。

        Args:
            path (str): 模型的路径。
            label_format (Label_Format, optional): 标签的格式。默认为Label_Format.Auto。
            net_format (Net_Format, optional): 模型的格式。默认为Net_Format.Auto。
            conf (float, optional): 模型的置信度。默认为0.7。
            iou (float, optional): 模型的IOU。默认为0.4。
        """
        if isinstance(path, list):
            if len(path) and isinstance(path[0], Net):
                self.nets += path
        else:
            n = Net(path, label_format, net_format, conf, iou)
            if n.net is not None:
                self.nets.append(n)

    def clear_nets(self):
        """
        清空模型。
        """
        self.nets.clear()
    
    def check_stage(self):
        """
        检查当前的stage是否为空,是则将数据导入。
        """
        if self.stage == []:
            self.stage = [i for i in range(len(self.data))]
        return self.stage
    
    def check_net(self):
        """
        检查当前的nets是否为空,是则将模型导入。
        """
        checked_nets = []
        for net in self.nets:
            if net is not None:
                checked_nets.append(net)
        self.nets = checked_nets
    
    def set_conf(self, conf):
        """
        设置置信度。
        """
        for n in self.nets:
            n.conf = conf

    def local_key(self, key, frame):
        """
        检查当前帧是否有指定的按键按下。

        Args:
            key (int): 按键的ASCII码。
            frame (int): 当前帧。
        """
        return frame + 1
    
    def load_project_dir(self, player_sign):
        """
        载入项目目录。

        Args:
            player_sign (Player_sign): 播放器的类型。
        """
        if self.project_dir is not None and Path(self.project_dir).exists():
            if player_sign == Player_sign.Datasets:
                return Datasets(self.project_dir)
            elif player_sign == Player_sign.Video:
                return Video(self.project_dir)
            log('unsupported player sign', Log_sign.Error)
        else:
            log('no project has been created', Log_sign.Error)
        return None

    def show(self, show_pred=True, recording_fps=30, project_dir=None, show_V_img=False, frame=0, TV_anchor=40, open_fix_prediction=False):
        """
        显示当前数据集。

        Args:
            show_pred (bool, optional): 是否显示预测结果。默认为True。
            recording_fps (int, optional): 录制的帧率。默认为30。
            project_dir (str, optional): 项目目录。默认为None。
            show_V_img (bool, optional): 是否显示亮度。默认为False。
            frame (int, optional): 当前帧。默认为0。
            TV_anchor (int, optional): 亮度锚点。默认为40。
            open_fix_prediction (bool, optional): 是否开启修正预测。默认为False。
        """
        img = None
        TV = None
        auto_recording = False
        size_mul = 1
        V_rate = 1
        scale_rate = 1
        show_V = False
        save_source = True
        save_prediction = True
        save_as_video = True
        cal_distance = False
        mix_enhancement_mode = False
        data_enhancement_args = Data_enhancement_configure()
        data_enhancement_choice = Data_enhancement_sign(0)
        self.project_dir = PLAYER_DIR + ('' if project_dir is None else '/' + project_dir)
        debug_mode = False
        st_frame = 0
        ed_frame = 0
        last_bar_value = None
        cal_V_mode = Cal_V_mode.default
        arrow_mode = Arrow_mode.Player_Control
        TV_anchor_deta = 0
        global vision_enhancement
        global manual_rectangle
        manual_rectangle_Hheight = 20
        manual_rectangle_Hwidth = 20
        hide_name = False
        FPS_limiter = DEFAULT_FPS
        FPS_timer = time.time()

        mkdir(self.project_dir)
        sources = Sources(self.project_dir, recording_fps)


        def trackbar_callback(x):
            pass

        if len(self.nets) == 0:
            log('The prediction would not be show cause no net has been loaded', Log_sign.Warning)
            show_pred = False
        
        if self.player_sign in {Player_sign.Datasets, Player_sign.Video} and len(self.data) == 0:
            log('nothing can show case the dataset is empty!', Log_sign.Error)
            return            
        
        while True:
            image, frame, source_name = self.get_img(frame)
            if image is not None:
                sources.setup_update(source_name if self.player_sign is not Player_sign.Datasets else self.name, frame if self.player_sign is not Player_sign.Camera else None, self.nets if show_pred else None)
                
                #改变亮度
                if V_rate != 1 or TV_anchor_deta:
                    Vr = (max(0, TV_anchor + TV_anchor_deta) / (Cal_V(image, self.nets, manual_rectangle, cal_V_mode, show_V_img, self.data[frame].keyPoints if self.player_sign == Player_sign.Datasets else None) + EPS)) if TV_anchor_deta else V_rate
                    #三通道分离
                    hue, sat, val = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2HSV))
                    x = np.arange(0, 256)
                    #小于0的值修改为0 ，大于 255修改为 255
                    lut_val = np.clip(x * Vr, 0, 255).astype(image.dtype)
                    #将三个通道合并起来
                    im_hsv = cv2.merge((hue, sat, cv2.LUT(val, lut_val)))
                    cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=image)
                
                #改变缩放的大小
                if scale_rate != 1:
                    rimg_size = image.shape
                    dest = cv2.resize(image, (int(rimg_size[1] * scale_rate), int(rimg_size[0] * scale_rate)))
                    image = np.zeros(rimg_size, dtype=np.uint8)
                    if scale_rate < 1:
                        image[:dest.shape[0], :dest.shape[1]] = dest
                    else:
                        image = dest[:rimg_size[0], :rimg_size[1]]
                
                sources.update(image, 'raw')
                
                #进行数据增强
                if mix_enhancement_mode:
                    sources.update(data_enhancement_core(image, data_enhancement_args, self.nets, manual_rectangle, cal_V_mode, self.data[frame].keyPoints if self.player_sign == Player_sign.Datasets else None), 'ME')
                    sources.update(data_enhancement_core(image, data_enhancement_args.reverse(), self.nets, manual_rectangle, cal_V_mode, self.data[frame].keyPoints if self.player_sign == Player_sign.Datasets else None), 'MER')
                else:
                    for i in range(len(DATA_ENHANCEMENT_DEFAULT_ARGS)):
                        if data_enhancement_args[i] != DATA_ENHANCEMENT_DEFAULT_ARGS[i]:
                            tdea = Data_enhancement_configure()
                            sources.update(data_enhancement_core(image, set_data_enhancement(tdea, Data_enhancement_sign(i), data_enhancement_args[i]), self.nets, manual_rectangle, cal_V_mode, self.data[frame].keyPoints if self.player_sign == Player_sign.Datasets else None), f'E-{Data_enhancement_sign(i).name}')
                            sources.update(data_enhancement_core(image, set_data_enhancement(tdea, Data_enhancement_sign(i), data_enhancement_args[i]).reverse(), self.nets, manual_rectangle, cal_V_mode), f'ER-{Data_enhancement_sign(i).name}')
                            if i == 2 and data_enhancement_args.TVA is None:
                                tdea = Data_enhancement_configure()
                                tdea.TVA = DEFAULT_TVA
                                sources.update(data_enhancement_core(sources['E-V'].get_image(), tdea, self.nets, manual_rectangle, cal_V_mode, self.data[frame].keyPoints if self.player_sign == Player_sign.Datasets else None), 'E-V-A')
                                sources.update(data_enhancement_core(sources['ER-V'].get_image(), tdea, self.nets, manual_rectangle, cal_V_mode, self.data[frame].keyPoints if self.player_sign == Player_sign.Datasets else None), 'ER-V-A')

                info = f'{f"[{self.video_code}] " if self.player_sign == Player_sign.Video else ""}{"" if source_name is None or hide_name else source_name}{f"<{frame}>" if self.player_sign != Player_sign.Camera else ""} P:{PLAYING_MODE_ABBREVIATION[self.playing.value]}{f" B:{V_rate:.1f}" if V_rate != 1 else ""}{f" sm:{size_mul:.1f}" if size_mul != 1 else ""}{" UMR" if manual_rectangle is not None else ""}{" DM" if debug_mode else ""}{" AR" if auto_recording else (" RT" if frame >= st_frame and frame <= ed_frame and ed_frame != 0 else "")}{" S" if sources.get_saving_state() else ""}{" VE" if vision_enhancement else ""}{" ME" if mix_enhancement_mode else " E"}-{Data_enhancement_sign(data_enhancement_choice).name}:{print_None(data_enhancement_args[data_enhancement_choice], 1)}{f" AM:{ARROW_MODE_ABBREVIATION[arrow_mode.value]}" if arrow_mode != Arrow_mode.Player_Control else ""}{f" CVM:{CAL_V_MODE_ABBREVIATION[cal_V_mode.value]}" if cal_V_mode != Cal_V_mode.default else ""}{f" TVA:{(TV_anchor + TV_anchor_deta):.1f}" if abs(TV_anchor_deta) > 1e-7 else ""}{f" SR:{scale_rate:.1f}" if scale_rate != 1 else ""}{f" FL:{FPS_limiter:.1f}" if FPS_limiter != DEFAULT_FPS else ""}{((" F" if self.player_sign == Player_sign.Video and self.for_acc else " ") + f"acc:{self.acc_rate}") if self.player_sign != Player_sign.Camera else ""}'
                
                #相机模式
                if self.player_sign == Player_sign.Camera:
                    info += f'{f" et:{self.exposure_time}" if self.exposure_time != DEFAULT_EXPOSURE_TIME else ""}{f" ga:{self.gain:.1f}" if self.gain != DEFAULT_GAIN else ""}'
                #数据集模式(frame代表当前帧)
                elif self.data is not None and self.player_sign == Player_sign.Datasets:
                    if self.show_label:
                        sources.draw_result(self.data[frame], (200, 0, 200), scale_rate, show_V)
                    if vision_enhancement:
                        tda = dataset(self.data[frame])
                        kps = self.data[frame].keyPoints.clone()
                        tda.keyPoints = kpts2inside(kps.reshape(kps.shape[0], -1, 2)).reshape(kps.shape[0], -1)
                        sources.draw_result(img, tda, (255, 255, 255), scale_rate=scale_rate)
                    if show_V:
                        TV = Cal_V(image, self.nets, manual_rectangle, cal_V_mode, show_V_img, (self.data[frame].keyPoints) if self.player_sign == Player_sign.Datasets else None)
                        info += f' TV:{TV:.2f}'
                if show_pred:
                    if len(self.nets):
                        sources.predict(show_V, open_fix_prediction, self.data[frame] if self.player_sign == Player_sign.Datasets and cal_distance else None)
                    else:
                        log('can\'t draw result from net cause net is not loaded!', Log_sign.Warning)

                sources.set_info(info)
                sources.show(size_mul, vision_enhancement)

                main_window_name = sources.get_main_window_name()
                if main_window_name is not None:
                    #播放模式为Video
                    if self.player_sign == Player_sign.Video:
                        if self.vi_frames > 0:
                            current_value = cv2.getTrackbarPos('player_control', main_window_name)
                            if current_value == -1:
                                cv2.createTrackbar('player_control', main_window_name, frame, self.vi_frames, trackbar_callback)
                            else:
                                if last_bar_value != current_value:
                                    frame = current_value
                                else:
                                    cv2.setTrackbarPos('player_control', main_window_name, frame)
                            last_bar_value = frame
                    #播放模式为Datasets
                    elif self.player_sign == Player_sign.Datasets:
                        prop = cv2.getWindowProperty(main_window_name, cv2.WND_PROP_VISIBLE)
                        if prop == -1:
                            cv2.createTrackbar('player_control', main_window_name, frame, len(self.data), trackbar_callback)
                        else:
                            current_value = cv2.getTrackbarPos('player_control', main_window_name)
                            if last_bar_value != current_value:
                                frame = current_value
                            else:
                                cv2.setTrackbarPos('player_control', main_window_name, frame)
                        last_bar_value = frame
            else:
                break

            if auto_recording:
                cv2.waitKey(1)
                key = -1
                if frame == ed_frame:
                    auto_recording = False
                    if self.player_sign == Player_sign.Datasets:
                        self.playing = Playing_mode.Per_Frame
                    elif self.player_sign == Player_sign.Video:
                        self.playing = Playing_mode.Stop
                    sources.stop_save()
            else:
                if self.playing not in {Playing_mode.Playing, Playing_mode.Loop}:  #  and frame > 0
                    key = cv2.waitKey(0)
                else:
                    if FPS_limiter != DEFAULT_FPS:
                        key = cv2.waitKey(max(1, int(1000 / FPS_limiter - (FPS_timer - time.time()) * 1000)))
                        FPS_timer = time.time()
                    else:
                        key = cv2.waitKey(1)
                    if self.playing == Playing_mode.Loop and key == -1:
                        key = 96
            # key = cv2.waitKey(1000)
            # key = 54 if key != 32 else 32
            if key == 52:  # 4
                if arrow_mode == Arrow_mode.Manual_Rectangle:
                    #将手动框选的区域的高度减小5个像素点
                    manual_rectangle_Hheight = max(5, manual_rectangle_Hheight - 5)
                    if manual_rectangle is not None:
                        manual_rectangle[2] = manual_rectangle_Hheight
                else:
                    #将当前显示的图像回退self.acc_rate帧(加速后退)
                    frame -= self.acc_rate
            elif key == 54:  # 6
                if arrow_mode == Arrow_mode.Manual_Rectangle:
                    #将手动框选的区域的高度增加5个像素点
                    manual_rectangle_Hheight+= 5
                    if manual_rectangle is not None:
                        manual_rectangle[2] = manual_rectangle_Hheight
                else:
                    #将当前显示的图像前进self.acc_rate帧
                    frame += self.acc_rate
            elif key == 50:  # 2
                if arrow_mode == Arrow_mode.Player_Control: 
                    #将亮度变化的比率减小0.1   
                    V_rate = max(0.1, V_rate - 0.1)
                elif arrow_mode == Arrow_mode.Manual_Rectangle:
                    #将手动框选的区域的宽度减小5个像素点
                    manual_rectangle_Hwidth = max(5, manual_rectangle_Hwidth - 5)
                    if manual_rectangle is not None:
                        manual_rectangle[3] = manual_rectangle_Hwidth
            elif key == 56:  # 8
                if arrow_mode == Arrow_mode.Player_Control:
                    #增加亮度
                    V_rate += 0.1
                elif arrow_mode == Arrow_mode.Manual_Rectangle:
                    #将手动框选的区域的宽度增加5个像素点
                    manual_rectangle_Hwidth += 5
                    if manual_rectangle is not None:
                        manual_rectangle[3] = manual_rectangle_Hwidth
            elif key == 51:  # 3
                if arrow_mode == Arrow_mode.Player_Control:
                    #增加加速速率
                    self.acc_rate += 2
                elif arrow_mode == Arrow_mode.Player_Control2:
                    #将帧率限制增加1
                    if FPS_limiter < 1:
                        FPS_limiter = FPS_limiter / (1 - FPS_limiter)
                    else:
                        FPS_limiter += 1
            elif key == 49:  # 1
                if arrow_mode == Arrow_mode.Player_Control:
                    #减少加速速率
                    self.acc_rate = max(1, self.acc_rate - 2)
                elif arrow_mode == Arrow_mode.Player_Control2:
                    #将帧率限制减少1
                    if FPS_limiter < 2:
                        FPS_limiter = FPS_limiter / (FPS_limiter + 1)
                    else:
                        FPS_limiter -= 1
            elif key == 57:  # 9
                if arrow_mode == Arrow_mode.Player_Control:
                    #将显示窗口的尺寸倍数增加0.1
                    size_mul += 0.1
                elif arrow_mode == Arrow_mode.Player_Control2:
                    #将缩放比例增加0.1
                    scale_rate += 0.1
            elif key == 55:  # 7
                if arrow_mode == Arrow_mode.Player_Control:
                    #将显示窗口的尺寸倍数减少0.1
                    size_mul = max(0.1, size_mul - 0.1)
                elif arrow_mode == Arrow_mode.Player_Control2:
                    #将缩放比例减少0.1
                    scale_rate = max(0.1, scale_rate - 0.1)
            elif key == 53:  # 5
                #重置改变亮度的比率
                V_rate = 1
            elif key == 27:  # ESC
                #按ESC退出
                break
            elif key == 32 and self.playing != Playing_mode.Per_Frame and self.player_sign != Player_sign.Datasets:  # SPACE
                #切换Playing模式或者Stop模式
                self.playing = Playing_mode(not self.playing.value)
            elif key == 112 and self.player_sign != Player_sign.Datasets:  # P
                #切换Per_Frame模式或者Playing模式
                self.playing = Playing_mode.Per_Frame if self.playing != Playing_mode.Per_Frame else Playing_mode.Playing
            elif key == 111:  # O
                #切换Loop模式或者Per_Frame模式
                self.playing = Playing_mode.Loop if self.playing != Playing_mode.Loop else Playing_mode.Per_Frame
            elif key == 110:  # N
                #切换是否显示预测值
                show_pred = not show_pred
            elif key == 120:  # X
                #保存当前图片
                sources.save_image(frame)
            elif key == 99:  # C
                #开始录制
                if self.player_sign != Player_sign.Camera:
                    if not sources.get_saving_state():
                        if st_frame + ed_frame > 0 and not auto_recording:
                            auto_recording = True
                            frame = max(0, st_frame - 1)
                            self.playing = Playing_mode.Playing
                            sources.setup_save(frame, save_as_video, save_source, save_prediction)
                    else:
                        log('please stop recording before auto-split!', Log_sign.Warning)
            elif key == 122:  # Z
                #结束录制并且保存
                if not sources.get_saving_state():
                    sources.setup_save(frame if self.player_sign != Player_sign.Camera else None, save_as_video, save_source, save_prediction)
                else:
                    sources.stop_save()
            elif key == 118:  # V
                #是否展示V值
                if not show_V and len(self.data) and self.player_sign == Player_sign.Video and len(self.nets) == 0:
                    log('you may play video, please load net before calculate Vals!', Log_sign.Warning)
                show_V = not show_V
            elif key == 105:  # I
                #打开图片文件
                self.open_current_file(0, frame)
            elif key == 117:  # U
                #打开标签文件
                self.open_current_file(1, frame)
            elif key == 121:  # Y
                #打开所在文件夹
                self.open_current_file(2, frame)
            elif key == 103:  # G
                #在Labelmaster中打开
                self.open_current_file(3, frame)
            elif key == 109:  # M
                #打开PLAYER文件夹
                os.system(f'xdg-open {PLAYER_DIR}')
            elif key == 101:  # E
                #切换是否数据增强
                mix_enhancement_mode = not mix_enhancement_mode
            elif key == 100:  # D
                cal_distance = not cal_distance
            elif key == 33:  # !
                data_enhancement_choice = Data_enhancement_sign.H
            elif key == 64:  # @
                data_enhancement_choice = Data_enhancement_sign.S
            elif key == 35:  # #
                data_enhancement_choice = Data_enhancement_sign.V
            elif key == 36:  # $
                data_enhancement_choice = Data_enhancement_sign.TVA
            elif key == 37:  # %
                data_enhancement_choice = Data_enhancement_sign.scale
            elif key == 94:  # ^
                data_enhancement_choice = Data_enhancement_sign.down_cover
            elif key == 38:  # &
                data_enhancement_choice = Data_enhancement_sign.random_geometry
            elif key == 119:  # W
                #将data_enhancement_args.TVA加1
                if data_enhancement_choice == Data_enhancement_sign.TVA:
                    if data_enhancement_args.TVA is None:
                        data_enhancement_args.TVA = DEFAULT_TVA
                    data_enhancement_args.TVA += 1
                else:
                    data_enhancement_args = set_data_enhancement(data_enhancement_args, data_enhancement_choice, data_enhancement_args[data_enhancement_choice] + 0.1)
            elif key == 115:  # S
                #将data_enhancement_args.TVA减1
                if data_enhancement_choice == Data_enhancement_sign.TVA:
                    if data_enhancement_args.TVA is None:
                            data_enhancement_args.TVA = DEFAULT_TVA
                    data_enhancement_args.TVA = max(0, data_enhancement_args.TVA - 1)
                else:
                    data_enhancement_args = set_data_enhancement(data_enhancement_args, data_enhancement_choice, max(0, data_enhancement_args[data_enhancement_choice.value] - 0.1))
            elif key == 113:  # Q
                #将data_enhancement_args.TVA设置为None
                if data_enhancement_choice == Data_enhancement_sign.TVA:
                    data_enhancement_args.TVA = None
                else:
                    data_enhancement_args = set_data_enhancement(data_enhancement_args, data_enhancement_choice, DATA_ENHANCEMENT_DEFAULT_ARGS[data_enhancement_choice.value])
            elif key == 114:  # R
                #销毁窗口
                cv2.destroyAllWindows()
            elif key == 116:  # T
                #截图
                sources.shot()
            elif key == 95:  # _
                #保存截图
                sources.save_shot()
            elif key == 225:  # shift
                pass
            elif key == 43:  # +
                #切换是否是debug模式,如果是则开启展示计算V值的图片
                debug_mode = not debug_mode
                if debug_mode:
                    show_V_img = True
                else:
                    show_V_img = False
            elif key == 44:  # ,
                #将当前图片设置为第一张图片
                st_frame = frame
            elif key == 46:  # .
                #将当前图片设置为最后一张图片
                ed_frame = frame
            elif key == 47:  # /
                #重置起始和终止的图片
                st_frame = 0
                ed_frame = 0
            elif key == 45:  # -
                #小编也很好奇这个是干啥的
                TV_anchor_deta -= 0.5
            elif key == 61:  # =
                TV_anchor_deta += 0.5
            elif key == 96:  # `
                pass
            elif key == 124:  # |
                #切换是否开启图像增强
                vision_enhancement = not vision_enhancement
            elif key == 42:  # *
                #切换小键盘模式
                arrow_mode = Arrow_mode((arrow_mode.value + 1) % len(ARROW_MODE_ABBREVIATION))
            elif key == 97:  # A
                #切换计算V值的模式
                cal_V_mode = Cal_V_mode((cal_V_mode.value + 1) % len(CAL_V_MODE_ABBREVIATION))
            elif key == 104:  # H
                #切换是否隐藏名字(?)
                hide_name = not hide_name
            elif key == 106:  # J
                if manual_rectangle is None:
                    cv2.setMouseCallback(self.main_window_name, mouse_callback)
                    manual_rectangle = [0, 0, manual_rectangle_Hwidth, manual_rectangle_Hheight]
                else:
                    manual_rectangle = None
            else:
                frame = self.local_key(key, frame)
        
        if self.player_sign == Player_sign.Camera:
            self.stop()


class Datasets(Player_Core):
    def __init__(self, datasets_path=DEFAULT_DATASET, label_format=Label_Format.Auto, skip_imgs=3, net=None, reload_label_format=Label_Format.Auto, force_load=False, name='Unknown'):
        super().__init__(net, name)
        self.acc_rate = 1
        self.playing = Playing_mode.Per_Frame
        self.player_sign = Player_sign.Datasets
        self.reload_label_format = label_format if reload_label_format == Label_Format.Auto else reload_label_format
        if datasets_path is not None:
            self.load(datasets_path, label_format, skip_imgs, force_load)
        
            
    def load(self, path, label_format=Label_Format.Auto, skip_imgs=3, force_load=False):
        """
        加载数据集

        Args:
            path (str): 数据集路径
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Auto。
            skip_imgs (int, optional): 跳过的帧数。默认为3。
            force_load (bool, optional): 是否强制加载。默认为False。
        """
        if isinstance(path, Video):
            #加载视频
            self.name = path.name
            lens = len(path.check_stage())
            for si, i in enumerate(path.check_stage()):
                print(f'\nConverting videos to datasets [{si + 1}/{lens}]...')
                path.load_video(i)
                frames = int(path.vi.get(cv2.CAP_PROP_FRAME_COUNT))
                if frames == -1:
                    ret, image = path.vi.read()
                    frame = 0
                    name = Path(path.data[i]).stem
                    while ret:
                        self.data.append(dataset(image, f'{name}_{frame}'))
                        ret, image = path.vi.read()
                        frame += skip_imgs
                        for _ in range(skip_imgs):
                            ret, image = path.vi.read()
                else:
                    for frame in tqdm(range(0, frames, skip_imgs + 1)):
                        path.vi.set(cv2.CAP_PROP_POS_FRAMES, frame)
                        ret, image = path.vi.read()
                        self.data.append(dataset(image, f'{Path(path.data[i]).stem}_{frame}'))
        elif isinstance(path, Datasets):
            #加载数据集
            self.name = path.name
            self.data += path.data
            self.stage += path.check_stage()
        elif isinstance(path, list):
            #加载路径
            if len(path):
                if isinstance(path[0], str):
                    for p in path:
                        if Path(p).exists():
                            self.data.append(p)
                        else:
                            log(f'skip {p} when load it to Datasets cause it\'s not exists', Log_sign.Warning)
                elif isinstance(path[0], dataset):
                    self.data += path
        elif isinstance(path, np.ndarray):
            #加载图片
            self.data.append(dataset(path))
        else:
            #自动判断数据集格式
            label_format = auto_format(path, label_format)
            self.name, path = get_name(path)
            if not os.path.isdir(path):
                if Path(path).suffix in IMAGE_FORMATS:
                    self.data += [dataset(path, label_format)]
                    return
                log(f'skip path\n{path}\ncause the path given isn\'t a dir or image', Log_sign.Warning)
                return
            imgs_path = get_files(path, IMAGE_FORMATS)
            print(f'\nLoading datasets from {path} ...')
            for img in tqdm(imgs_path):
                self.data += [dataset(img, label_format, force_load)]

    def output(self, save_path, label_format=Label_Format.Standard, val_rate=0.1, add=False, reserve_name=False, move=False, sum=-1, pop=False, show_log=True, toDatasets=True, generate_hdt=True, same_dir=False, reset_name=False, data_name=None):
        """
        输出数据集到指定路径

        Args:
            save_path (str): 输出路径
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Standard。
            val_rate (float, optional): 验证集占比。默认为0.1。
            add (bool, optional): 是否追加。默认为False。
            reserve_name (bool, optional): 是否保留原名。默认为False。
            move (bool, optional): 是否移动。默认为False。
            sum (int, optional): 输出的数量。默认为-1。
            pop (bool, optional): 是否从数据集中删除。默认为False。
            show_log (bool, optional): 是否显示日志。默认为True。
            toDatasets (bool, optional): 是否输出到Datasets文件夹。默认为True。
            generate_hdt (bool, optional): 是否生成HDT文件。默认为True。
            same_dir (bool, optional): 是否输出到同一个文件夹。默认为False。
            reset_name (bool, optional): 是否重置名字。默认为False。
            data_name (str, optional): 数据集名字。默认为None。

        return:
            Datasets: 输出的数据集
        """
        if len(self.data) == 0:
            log('There is no data to output!', Log_sign.Warning)
            return
        
        if sum != -1 and sum < len(self.check_stage()):
            stage = [self.stage.pop(int(len(self.stage) * random.random())) for _ in range(sum)]
        else:
            stage = self.check_stage()
        
        if generate_hdt and len(self.data) and self.data[0].hdt.TV == -1 and save_path is not None:
            self.build_TV()
        if reset_name:
            self.set_source_name()

        if same_dir and save_path is not None:
            save_path = self.get_grandparent_dir() + '/' + save_path
        if save_path is not None:
            if toDatasets and (not same_dir) and save_path[:9] != 'Datasets/' and save_path[0] != '/':
                save_path = f'Datasets/{save_path}'
                log(f'auto update the save_path to {save_path}', Log_sign.Info)
            self.build_output_dir(save_path, label_format, add, generate_hdt)
            dir = ['/train', '/val']
            file_code = get_file_code(save_path) + 1 if add else 0
            print(f'\nBuilding datasets to {save_path} ...')
            if label_format == Label_Format.LabelMaster:
                for i in tqdm(range(len(stage))):
                    self.data[stage[i]].output(save_path, label_format, None if reserve_name else f'{f"{data_name}_" if data_name is not None else ""}{file_code + i}', move, generate_hdt)
            else:
                for i in tqdm(range(len(stage))):
                    self.data[stage[i]].output(save_path + dir[random.random() < val_rate], label_format, None if reserve_name else f'{f"{data_name}_" if data_name is not None else ""}{file_code + i}', move, generate_hdt)
            if show_log:
                nda = Datasets(save_path, label_format=self.reload_label_format)
                nda.statistics()
            else:
                nda = None
        else:
            nda = None
        
        if pop or save_path is None:
            stage.sort(reverse=True)
            for p in stage:
                self.data.pop(p)
            stage.clear()
        return nda
    
    def statistics(self, show_log=True, show_TV=False):
        """
        屏幕输出当前数据集的构成
        """
        if len(self.data):
            print('\nStatisticsing datasets ...')
            self.sum_classes = [0 for _ in range(48)]
            sources_name = ['None']
            sources_sum = [0]
            TVs = []
            sum_background = 0
            info = []
            for da in tqdm(self.data):
                sum_background += len(da.cls) == 0
                for i in range(len(da.cls)):
                    self.sum_classes[int(da.class23[i] + da.color[i] * 12)] += 1
                if da.hdt.source is None:
                    sources_sum[0] += 1
                elif da.hdt.source in sources_name:
                    sources_sum[sources_name.index(da.hdt.source)] += 1
                else:
                    sources_name.append(da.hdt.source)
                    sources_sum.append(1)
                TVs.append(da.hdt.TV)
            if show_log:
                print()
                for i in range(48):
                    if self.sum_classes[i] > 0:
                        info.append(f'{color_name[i // 12]}-[{tsize_name[class23_to_tsize(i % 12)]}]{class_name[class23_to_cls(i % 12)]}:{self.sum_classes[i]}')
                info.append(f'background:{sum_background}\n\nGot {len(self.data)} images and {torch.tensor(self.sum_classes).sum()} targets in total\n\nSource:')
                for i, s in enumerate(sources_name):
                    if i + sources_sum[i] == 0:
                        continue
                    info.append(f'{s}:{sources_sum[i]}')
                if show_TV and len(TVs):
                    TVs = torch.tensor(TVs).int()
                    if TVs.max() > 0:
                        TVs_sum = torch.zeros(TVs.max() + 1, dtype=torch.int)
                        for TV in TVs:
                            TVs_sum[TV] += 1
                        plot(TVs_sum, title='TVs')
        else:
            log('the dataset is empty!', Log_sign.Info)

        return info

    def fetch_datastr(self, name, sum=-1, rate=None):
        """
        提取指定的数据集到stage中

        Args:
            name (str): 需要提取的种类
            sum (int, optional): 提取的数量。默认为-1。
            rate (float, optional): 提取的比例。默认为None。
        """
        color, tsize, cls = str2cls(name, False, False)
        self.fetch_data(color, tsize, cls, sum, rate)

    def fetch_data23(self, color, class23, sum=-1, rate=None):
        """
        提取指定的数据集到stage中

        Args:
            color (int): 颜色
            class23 (int): 类别
            sum (int, optional): 提取的数量。默认为-1。
            rate (float, optional): 提取的比例。默认为None。
        """
        self.fetch_data_core(color, class23_to_tsize(class23), class23_to_cls(class23), sum, rate)
    
    def fetch_data(self, color, tsize, cls, sum=-1, rate=None):
        """
        提取指定的数据集到stage中

        Args:
            color (int): 颜色
            tsize (int): ts
            cls (int): 类别
            sum (int, optional): 提取的数量。默认为-1。
            rate (float, optional): 提取的比例。默认为None。
        """
        if isinstance(color, int):
            if color == -1:
                color = [i for i in range(4)]
            else:
                color = [color]
        if isinstance(tsize, int):
            if tsize == -1:
                tsize = [0, 1]
            else:
                tsize = [tsize]
        if isinstance(cls, int):
            if cls == -1:
                cls = [i for i in range(8)]
            else:
                cls = [cls]
                
        cls23_list = ct2cls23(cls, tsize)
        for co in color:
            for c23 in cls23_list:
                self.fetch_data23(co, c23, sum, rate)
    
    def fetch_data_background(self):
        """
        提取背景数据集到stage中
        """
        self.fetch_data_core(0, 0, -1)
    
    def fetch_data_core(self, color, tsize, cls, sum=-1, rate=None):
        """
        提取指定类别的核心类

        Args:
            color (int): 颜色
            tsize (int): ts
            cls (int): 类别
            sum (int, optional): 提取的数量。默认为-1。
            rate (float, optional): 提取的比例。默认为None。
        """
        idatas = []
        if cls != -1:
            print(f'\nScaning datasets for {color_name[color]}-[{tsize_name[tsize]}]{class_name[cls]} ...')
            for i in tqdm(range(len(self.data))):
                for p in range(len(self.data[i].cls)):
                    if self.data[i].cls[p] == cls and self.data[i].tsize[p] == tsize and self.data[i].color[p] == color:
                        idatas.append(i)
                        break
        else:
            print(f'\nScaning datasets for background ...')
            for i in tqdm(range(len(self.data))):
                if len(self.data[i].cls) == 0:
                    idatas.append(i)
                    
        if rate is not None:
            sum = rate * len(idatas)
        if sum == -1 or sum >= len(idatas):
            for ti in idatas:
                if ti not in self.stage:
                    self.stage.append(ti)
        else:
            while sum and len(idatas):
                ti = idatas.pop(int(random.random() * len(idatas)))
                if ti not in self.stage:
                    self.stage.append(ti)
                    sum -= 1
    def fetch_data_dis(self, thred=5, net=0):
        """
        提取超出阈值的数据集到stage中
        """
        if isinstance(net, int) and len(self.nets) > net:
            net = self.nets[net]
        if not isinstance(net, Net):
            log('there is no net for predicting', Log_sign.Error)
            return
        log('calculating distances ...')
        for i in tqdm(range(len(self.data))):
            if self.data[i].source is not None:
                continue
            img = cv2.imread(str(self.data[i].img_path))
            pred = net.predict(img).keyPoints
            if self.data[i].keyPoints.shape[0] + pred.shape[0] == 0:
                continue
            if self.data[i].keyPoints.shape[0] == 0 or pred.shape[0] == 0 or calculate_distance(self.data[i].keyPoints, pred, img.shape) > thred:
                self.stage.append(i)
    
    def fetch_data_V(self, thred=150):
        """
        提取大于指定V值的数据集到stage中
        """
        if len(self.data):
            if self.data[0].hdt.TV == -1:
                self.build_TV()
            for i, da in enumerate(self.data):
                if da.hdt.TV > thred:
                   self.stage.append(i) 
    
    def fetch_data_source(self, source):
        """
        提取指定来源的数据集

        Args:
            source (str): 来源
        """
        for i, da in enumerate(self.data):
            if da.hdt.source == source:
                self.stage.append(i)
                
    def show_stage(self):
        """
        展示stage中的图片
        """
        Datasets(self.get_stage(False), net=self.nets, name=self.name).show()

    def cal_distance(self, thred=5, net=0):
        """
        计算预测值与真实值的距离
        """
        if isinstance(net, int) and len(self.nets) > net:
            net = self.nets[net]
        if not isinstance(net, Net):
            log('there is no net for predicting', Log_sign.Error)
            return
        distances = []
        log('calculating distances ...')
        for da in tqdm(self.data):
            if da.source is not None:
                continue
            img = cv2.imread(str(da.img_path))
            da = dataset()
            pred = net.predict(img).keyPoints
            if not (da.keyPoints.shape[0] ^ pred.shape[0]):
                if da.keyPoints.shape[0] + pred.shape[0] > 0:
                    distance = calculate_distance(da.keyPoints, pred, img.shape)
                    if distance <= thred:
                        distances.append(distance)
                        continue
                else:
                    continue
            log(f'skip data{"" if da.keyPoints.shape[0] ^ pred.shape[0] else f" {distance}"} from {da.img_path} > {da.label_path}', Log_sign.Warning)
        
    def build_output_dir(self, dataset_path=None, label_format=Label_Format.Standard, add=False, build_hdt=False):
        """
        建立输出文件夹

        Args:
            dataset_path (str, optional): 输出路径。默认为None。
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Standard。
            add (bool, optional): 是否追加。默认为False。
            build_hdt (bool, optional): 是否建立HDT文件。默认为False。
        """
        if label_format == Label_Format.LabelMaster:
            if not add:
                if os.path.exists(dataset_path):
                    (log(f'deleting old datasets:\n{dataset_path}\nPlease enter y to continue\n', Log_sign.Warning, False))
                    key = 'y'
                    if key == 'y':
                        shutil.rmtree(dataset_path)
                    else:
                        raise Exception(log('Please check your save_path', Log_sign.Error))
            elif os.path.exists(dataset_path):
                return
            os.mkdir(dataset_path)
        else:
            if not add:
                if os.path.exists(dataset_path):
                    (log(f'deleting old datasets:\n{dataset_path}\nPlease enter y to continue\n', Log_sign.Warning, False))
                    key = 'y'
                    if key == 'y':
                        shutil.rmtree(dataset_path)
                    else:
                        raise Exception(log('Please check your save_path', Log_sign.Error))
            else:
                if not val_dataset_structure(dataset_path):
                    #assert input(log(f'the sturcture of dataset is not complete, we\'ll delete old datasets:\n{dataset_path}\nPlease enter y to continue\n', Log_sign.Warning, False)) == 'y', 'Please check your save_path'
                    shutil.rmtree(dataset_path)
                else:
                    return
            os.mkdir(dataset_path)
            os.mkdir(os.path.join(dataset_path, 'val'))
            os.mkdir(os.path.join(dataset_path, 'val', 'images'))
            os.mkdir(os.path.join(dataset_path, 'val', 'labels'))
            os.mkdir(os.path.join(dataset_path, 'train'))
            os.mkdir(os.path.join(dataset_path, 'train', 'images'))
            os.mkdir(os.path.join(dataset_path, 'train', 'labels'))
            if build_hdt:
                os.mkdir(os.path.join(dataset_path, 'val', 'hdt'))
                os.mkdir(os.path.join(dataset_path, 'train', 'hdt'))

    def predict_label(self, net_format=Net_Format.Auto, conf=0.7, iou=0.4, background_only=True, net_code=0, net_path=None, V_range=None, merge=True, dis_thred=10, shrink=1):
        """
        预测标签

        Args:
            net_format (Net_Format, optional): 网络格式。默认为Net_Format.Auto。
            conf (float, optional): 置信度。默认为0.7。
            iou (float, optional): iou。默认为0.4。
            background_only (bool, optional): 是否只预测背景。默认为True。
            net_code (int, optional): 网络编号。默认为0。
            net_path (str, optional): 网络路径。默认为None。
            V_range (tuple, optional): V值范围。默认为None。
            merge (bool, optional): 是否合并。默认为True。
            dis_thred (int, optional): 距离阈值。默认为10。
            shrink (float, optional): 缩小倍数。默认为1。(已弃用)
        """
        if net_path is None:
            if net_code >= len(self.nets):
                log(f'Can\'t find net {net_code}', Log_sign.Error)
                return
            net = self.nets[net_code]
        else:
            net = Net(net_path, net_format, conf, iou)
        if self.stage is not []:
            log('the stage has been clear!', Log_sign.Warning)
            self.stage.clear()
        if background_only:
            self.fetch_data_core(0, 0, -1)
        else:
            self.stage = range(len(self.data))
        print('\nPredicting labels...')
        for i in tqdm(self.stage):
            if not merge:
                self.data[i].clear_label()
            if V_range is None:
                V_range = (10, 11)
            for trans_rate in range(0, 3):
                trans_rate = trans_rate / 4
                for V_rate in range(*V_range):
                    img = self.data[i].get_image()
                    img = img[:, int(img.shape[1] * trans_rate):int(img.shape[1] * (trans_rate + 0.5))]
                    # imshow(img)
                    # img = img[:, :int(img.shape[1] * shrink)]
                    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
                    lut_val = np.clip(np.arange(0, 256) * V_rate / 10, 0, 255).astype(img.dtype)
                    im_hsv = cv2.merge((hue, sat, cv2.LUT(val, lut_val)))
                    cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img)
                    pred = net.predict(img)
                    if(len(pred.keyPoints)):
                        pred.keyPoints[:, ::2] /= 2
                        pred.keyPoints[:, ::2] += trans_rate
                        # pred.keyPoints[:, ::2] *= shrink
                    for pred_i in range(len(pred.keyPoints)):
                        dis = calculate_distance(self.data[i], pred.keyPoints[pred_i], img.shape, False)
                        if dis > dis_thred or dis == -1:
                            self.data[i].add(pred, pred_i)
            img = self.data[i].get_image()
            pred = net.predict(img)
            for pred_i in range(len(pred.keyPoints)):
                dis = calculate_distance(self.data[i], pred.keyPoints[pred_i], img.shape, False)
                if dis > dis_thred or dis == -1:
                    self.data[i].add(pred, pred_i)
                        
        self.stage.clear()

    def distribute(self, path, sum_datasets=-1, sum_each=-1, sum_all=-1, move=False, add=False, reserve_name=True, same_dir=False, toDatasets=True):
        """
        将数据集分发到指定路径

        Args:
            path (str): 输出路径
            sum_datasets (int, optional): 分发的数据集数量。默认为-1。
            sum_each (int, optional): 每个数据集的数量。默认为-1。
            sum_all (int, optional): 输出的数量。默认为-1。
            move (bool, optional): 是否移动。默认为False。
            add (bool, optional): 是否追加。默认为False。
            reserve_name (bool, optional): 是否保留原名。默认为True。
            same_dir (bool, optional): 是否输出到同一个文件夹。默认为False。
            toDatasets (bool, optional): 是否输出到Datasets文件夹。默认为True。
        """
        if sum_datasets == -1 and sum_each > 0:
            sum_datasets = math.ceil(len(self.check_stage()) / sum_each)
        if sum_datasets < 0:
            log('the sum of the datasets is wrong!', Log_sign.Error)
            return
        if sum_all != -1:
            sum_each = math.ceil(sum_all / sum_datasets)
        elif sum_each == -1:
            sum_each = math.ceil(len(self.check_stage()) / sum_datasets)
        if same_dir:
            path = self.get_grandparent_dir() + '/' + path
        self.build_output_dir(path, Label_Format.LabelMaster, add)
        for i in range(sum_datasets):
            self.output(path + f'/{i}', Label_Format.LabelMaster, add=add, reserve_name=reserve_name, sum=sum_each, move=move, pop=False, show_log=False, toDatasets=toDatasets, generate_hdt=False)
        self.stage.clear()
    
    def local_key(self, key, frame):
        """
        按键处理

        Args:
            key (int): 按键
            frame (int): 当前帧

        Returns:
            int: 下一帧
        """
        if key == 107:  # K
            mkdir('del_image')
            if self.data[frame].img_path.exists():
                shutil.move(self.data[frame].img_path, f'del_image/{self.data[frame].img_path.name}')
                if os.path.exists(str(self.data[frame].label_path)):
                    shutil.move(self.data[frame].label_path, f'del_image/{self.data[frame].label_path.name}')
            self.data.pop(frame)
        elif key == 108:  # L
            self.show_label = not self.show_label
        else:
            return frame + 1
        return frame
            
    
    def get_img(self, frame):
        """
        获得指定帧的图片

        Args:
            frame (int): 帧数

        Returns:
            np.ndarray: 图片
        """
        if len(self.data) == 0:
            log('nothing can show case the dataset is empty!', Log_sign.Error)
            return None, frame, 'Empty'
        frame = max(min(frame, len(self.data) - 1), 0)
        return cv2.imread(str(self.data[frame].img_path)) if self.data[frame].source is None else self.data[frame].source.copy(), frame, self.data[frame].img_path.name
    
    def get_stage(self, clear=True):
        """
        获得当前stage中的数据

        Args:
            clear (bool, optional): 是否清空stage。默认为True。

        Returns:
            list: 数据集
        """
        data = [self.data[i] for i in self.stage]
        if clear:
            self.stage.clear()
        return data
    
    def fill(self, material, low_limit):
        """
        从指定数据集中补充数据

        Args:
            material (Datasets): 指定数据集
            low_limit (int): 填充完之后的最低数量
        """
        for i in range(48):
            self.statistics(False)
            if self.sum_classes[i] < low_limit:
                material.fetch_data23(i // 12, i % 12, low_limit - self.sum_classes[i])
                self.data += material.get_stage()
    
    def repeat_fill(self, low_limit, unique=True, class_limit=None):
        """
        重复从当前数据集中补充数据

        Args:
            low_limit (int): 填充完之后的最低数量
            unique (bool, optional): 是否去重。默认为True。
            class_limit (str, optional): 填充的类别。默认为None。
        """
        if class_limit is None:
            class_limits = [i for i in range(48)]
        else:
            if isinstance(class_limit, str):
                class_limit = [class_limit]
            class_limits = []
            for cl in class_limit:
                class_limits += str2cls(cl)
                
        for i in class_limits:
            self.statistics(False)
            if self.sum_classes[i] < low_limit and self.sum_classes[i] > 0:
                self.stage.clear()
                self.fetch_data23(i // 12, i % 12)
                if unique:
                    self.unique()
                for _ in range(low_limit - self.sum_classes[i]):
                    if(len(self.stage)):
                        self.data.append(self.data[self.stage[int(len(self.stage) * random.random())]])
        self.stage.clear()

    def unique(self, hash_list=[]):
        """
        去重

        Args:
            hash_list (list, optional): 哈希值列表。默认为[]。
        """
        pre_len = len(self.data)
        print('\nCalculating hash value of images ...')
        for da in tqdm(self.check_stage()):
            self.data[da].hash()
        unique_data = []
        for i, da in enumerate(self.data):
            if i not in self.stage:
                unique_data.append(i)
            else:
                if da.img_hash not in hash_list:
                    hash_list.append(da.img_hash)
                    unique_data.append(i)
        self.data = [self.data[i] for i in unique_data]
        self.stage.clear()
        hash_list.clear()
        log(f'the length of data changed from {pre_len} to {len(self.data)}', Log_sign.Info)
    
    def get_hash_list(self):
        """
        获取哈希值列表

        Returns:
            list: 哈希值列表
        """
        da_hash_list = []
        print('\nCalculating hash value of images from template ...')
        for da in tqdm(self.data):
            da_hash_list.append(da.hash())
        return da_hash_list
        
    def sub(self, dataset):
        """
        减去指定数据集

        Args:
            dataset (Datasets): 指定数据集
        """
        pre_len = len(self.data)
        if isinstance(dataset, str):
            dataset = Datasets(dataset)
        da_hash_list = dataset.get_hash_list()
        self.unique(da_hash_list)
        log(f'the length of data changed from {pre_len} to {len(self.data)}', Log_sign.Info)
        
    def verify_kps(self, bounds_expand=0):
        """
        验证关键点是否在边界里面

        Args:
            bounds_expand (int, optional): 扩大的边界。默认为0。
        """
        data_sum = 0
        error_sum = 0
        deta = torch.tensor([])
        for da in self.data:
            data_sum += da.keyPoints.shape[0]
            bigger = da.keyPoints > (1 + bounds_expand)
            smaller = da.keyPoints < (-bounds_expand)
            error_sum += ((bigger | smaller).sum(-1) > 0).sum()
            if bigger.any():
                deta = torch.concat([deta, da.keyPoints[bigger] - 1 - bounds_expand], dim=-1)
            if smaller.any():
                deta = torch.concat([deta, bounds_expand - da.keyPoints[smaller]], dim=-1)
        
        plot_deta = torch.zeros(100, dtype=torch.long)
        deta = (deta * 100).round().long()
        for d in deta:
            if d > plot_deta.shape[0] - 1:
                plot_deta[plot_deta.shape[0] - 1] += 1
            else:
                plot_deta[d] += 1
        log(f'Got {data_sum} targets in total, {error_sum} targets out of bounds, {data_sum - error_sum} targets in confirm.')
        # plot(plot_deta, using_plt=False, hist_wide=9, show_txt=True) 
        plot(plot_deta, index_mul=0.01)
        cv2.waitKey(0)

        return {'data_sum':data_sum,'error_sum':error_sum}
        
    def open_current_file(self, choice=0, frame=0):
        """
        打开当前文件

        Args:
            choice (int, optional):0 for image, 1 for label, 2 for folder, 3 for LabelMaster.默认为0。
            frame (int, optional): 帧数。默认为0。
        """
        if self.data[frame].source is None:
            if choice == 0:
                #打开图片文件
                os.system(f'xdg-open {self.data[frame].img_path}')
            elif choice == 1:
                #打开标签文件
                os.system(f'xdg-open {self.data[frame].label_path}')
            elif choice == 2:
                #打开文件夹
                path = self.data[frame].img_path.parent
                if path.stem == 'images':
                    path = path.parent.parent
                os.system(f'xdg-open {str(path)}')
            elif choice == 3:
                #打开LabelMaster
                path = self.data[frame].img_path
                tmp_datasets_path = PLAYER_DIR + "/Temp_Datasets/"
                if path.parent.stem == 'images':
                    log('the file structure is wrong, rebuilding to open', Log_sign.Info)
                    if Path(tmp_datasets_path).exists():
                        shutil.rmtree(tmp_datasets_path)
                    self.output(tmp_datasets_path, label_format=Label_Format.LabelMaster, toDatasets=False, generate_hdt=False, reserve_name=True, show_log=False)
                    path = tmp_datasets_path + path.name
                os.system(f'{LABELMASTER} {str(path)} {self.get_LabelMaster_mode().value}')
                if self.data[frame].label_path.exists():
                    self.data[frame] = dataset(self.data[frame].img_path)
                if Path(tmp_datasets_path).exists():
                    shutil.rmtree(tmp_datasets_path)
                
    def reload(self, label_format=Label_Format.Auto, load_all=False):
        """
        重新加载数据集

        Args:
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Auto。
            load_all (bool, optional): 是否加载所有数据集。默认为False。
        """
        if load_all:
            parents = []
            source_datas = []
            for da in self.data:
                if da.source is None:
                    if Path(da.img_path).parent not in parents:
                        parents.append(Path(da.img_path).parent)
                else:
                    source_datas.append(da)
            self.data = source_datas
            for pa in parents:
                self.load(str(pa), label_format)
        else:
            for da in self.check_stage():
                self.data[da].read_label(self.data[da].label_path, label_format, clear=True)

    def build_TV(self):
        """
        计算当前stage里的图片的TV值
        """
        log('building TV...')
        for da in tqdm(self.check_stage()):
            self.data[da].hdt.TV = self.data[da].cal_V(show_log=False)

    def set_source_name(self, remain=True):
        """
        建立来源名

        Args:
            remain (bool, optional): 是否保留原来的来源名。默认为True。
        """
        if self.name is None:
            log('the name of datasets is None', Log_sign.Error)
        else:
            log(f'set the source as {self.name}', Log_sign.Info)
            for da in self.data:
                if not remain or da.source is None:
                    da.hdt.source = self.name
                    
    def generate_hdt(self, autoset=True):
        """
        生成HDT文件

        Args:
            autoset (bool, optional): 是否自动设置来源名和TV值。默认为True。
        """
        if autoset and len(self.data) and self.data[0].hdt.TV == -1 and self.data[0].hdt.source is None:
            self.set_source_name()
            self.build_TV()
        for da in self.data:
            if da.img_path.parent.stem == 'images':
                output_dir = da.img_path.parent.parent.joinpath('hdt')
                mkdir(str(output_dir))
                da.hdt.output(output_dir.joinpath(f'{da.img_path.stem}.hdt'))
            else:
                da.hdt.output(da.img_path)
                
    def get_hdt(self, dataset='Datasets/prompt', statis=True):
        """
        读取对应的HDT文件

        Args:
            dataset (str, optional): 数据集路径。默认为'Datasets/prompt'。
            statis (bool, optional): 是否统计当前数据集的构成。默认为True。
        """
        dataset = Datasets(dataset)
        hash_list = dataset.get_hash_list()
        log('getting hdt ...')
        for da in tqdm(self.data):
            if da.hash() in hash_list:
                da.hdt.source = dataset.data[hash_list.index(da.img_hash)].hdt.source
                if da.hdt.TV == -1:
                    da.hdt.TV = dataset.data[hash_list.index(da.img_hash)].hdt.TV
        if statis:
            self.statistics()
                
    def save(self, label_format=Label_Format.LabelMaster):
        """
        保存当前数据集

        Args:
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.LabelMaster。

        Returns:
            Datasets: 新的数据集
        """
        if len(self.data):
            save_path = self.data[0].img_path.parent
            origin_flag = True
            if save_path.stem == 'images':
                save_path = save_path.parent.parent
            save_path = str(save_path)
            for da in self.data:
                if str(da.img_path).find(save_path) == -1:
                    origin_flag = False
                    break
            if log(f'the dataset will been saved to\n{save_path}\n- {"" if origin_flag else "NOT "}all of the data is from there\n- the label format is {label_format.name}\nThis command may cause the lost of origin data, please enter y to continue.', Log_sign.Require) == 'y':
                tmp_dataset_path = save_path + '_save_tmp'
                nda = self.output(tmp_dataset_path, label_format=label_format, toDatasets=False)
                shutil.rmtree(save_path)
                shutil.move(tmp_dataset_path, save_path)
                return nda
        else:
            log('the new datasets is empty!', Log_sign.Error)
    
    def in_image(self, show_log=True):
        """
        将超出图片范围的数据去除

        Args:
            show_log (bool, optional): 是否显示日志。默认为True。
        """
        stage = []
        len_data = len(self.data)
        pre_targets_sum = 0
        targets_sum = 0
        for ida, da in enumerate(self.data):
            if len(da.keyPoints) > 0:
                pre_targets_sum += len(da.keyPoints)
                for i, l in enumerate(da.keyPoints):
                    if ((l < 0) | (l > 1)).sum() > 0:
                        da.keyPoints = torch.cat([da.keyPoints[:i], da.keyPoints[i + 1:]])
                        da.cls = torch.cat([da.cls[:i], da.cls[i + 1:]])
                        da.tsize = torch.cat([da.tsize[:i], da.tsize[i + 1:]])
                        da.color = torch.cat([da.color[:i], da.color[i + 1:]])
                        da.frame = torch.cat([da.frame[:i], da.frame[i + 1:]])
                        da.class23 = torch.cat([da.class23[:i], da.class23[i + 1:]])
                        if len(da.conf):
                            da.conf = torch.cat([da.conf[:i], da.conf[i + 1:]])
                if len(da.keyPoints) > 0:
                    stage.append(ida)
                    targets_sum += len(da.keyPoints)
            else:
                stage.append(ida)
        self.data = [self.data[i] for i in stage]
        log(f'\nimages changed from {len_data} to {len(self.data)}\ntargets changed from {pre_targets_sum} to {targets_sum}', Log_sign.Info)
        if show_log:
            self.statistics()

    def save_to(self, dest, label_format=Label_Format.Auto):
        """
        将当前数据集的标签保存到指定数据集

        Args: 
            dest (str): 指定数据集
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Auto。
        """
        changes = 0
        dest = Datasets(dest)
        hash_list = dest.get_hash_list()
        log('Saving ...')
        for da in tqdm(self.check_stage()):
            if self.data[da].hash() in hash_list:
                self.data[da].output(dest.data[hash_list.index(self.data[da].img_hash)].label_path, label_format)
                changes += 1
        log(f'{changes} has been rewritten', Log_sign)

    def get_grandparent_dir(self):
        """
        获得上上级目录

        Returns:
            str: 上上级目录
        """
        path = self.data[0].img_path.parent
        if path.stem == 'images':
            path = path.parent.parent
        return str(path.parent)
    
    def get_label_format(self):
        """
        获得数据集的标签格式

        Returns:
            Label_Format: 标签格式
        """
        cf = Label_Format.Auto
        for da in self.data:
            cf = da.current_format
            if cf != Label_Format.Auto or len(da.keyPoints):
                break
        log('get label format ' + cf.name, Log_sign.Info)
        return cf
    
    def get_LabelMaster_mode(self):
        """
        获得LabelMaster的模式

        Returns:
            LabelMaster_mode: LabelMaster的模式
        """
        cf = self.get_label_format()
        if cf == Label_Format.LabelMaster:
            return LabelMaster_mode.Armor
        elif cf in {Label_Format.Wind, Label_Format.LabelMaster_wind}:
            return LabelMaster_mode.Wind
        elif cf == Label_Format.Engineer:
            return LabelMaster_mode.Engineer
        elif cf == Label_Format.Anti_wind:
            return LabelMaster_mode.Anti_wind
        return LabelMaster_mode.Armor
    
    def clear_tsize(self):
        """
        清除tsize
        """
        for da in self.data:
            da.tsize = torch.zeros_like(da.tsize)

    def clear_label(self):
        """
        清除标签
        """
        for da in self.data:
            da.clear_label()


    def rotation_fixing(self):
        """
        修正旋转标签(?)
        """
        cf = self.get_label_format()
        for da in self.check_stage():
            kps = self.data[da].keyPoints
            if len(kps):
                if cf == Label_Format.Anti_wind:
                    if (kps[0, ::2].min() == kps[0, 0] or kps[0, ::2].min() == kps[0, 2]):
                        self.data[da].keyPoints = torch.concat([kps[:, 4:], kps[:, :2], kps[:, 2:4]], dim=-1)
    
    def fliplr(self):
        """
        水平翻转
        """
        cf = self.get_label_format()
        log('fliplr data...')
        for da in tqdm(self.check_stage()):
            kps = self.data[da].keyPoints.clone()
            if len(kps):
                if cf == Label_Format.Anti_wind:
                    pd = dataset(self.data[da])
                    kps[:, ::2] = 1 - kps[:, ::2]
                    pd.keyPoints = torch.cat([kps[:, 6:], kps[:, 4:6], kps[:, 2:4], kps[:, :2]], dim=-1)
                    pd.source = np.fliplr(cv2.imread(str(pd.img_path)))
                    self.data.append(pd)
        self.stage.clear()
        
    def fix_label(self, new_mine_rule=False):
        """
        修复标签(只适用于工程车)

        Args:
            new_mine_rule (bool, optional): 是否使用新的标签规则。默认为False。
        """
        cf = self.get_label_format()
        for da in self.check_stage():
            if len(self.data[da].keyPoints):
                if cf == Label_Format.Engineer:
                    self.data[da].tsize = torch.zeros_like(self.data[da].tsize)
                    if new_mine_rule:
                        for i, cls in enumerate(self.data[da].cls):
                            if int(cls) != 0:
                                kps = self.data[da].keyPoints[i]
                                if kps[1] > kps[5]:
                                    self.data[da].keyPoints[i] = torch.concat([kps[4:], kps[:4]], dim=-1)
                                    
    def enhance_V(self, rate=1):
        """
        亮度增强

        Args:
            rate (int, optional): 增强倍率。默认为1。
        """
        print('enhancing V...')
        for da in tqdm(self.check_stage()):
            image = self.data[da].get_image()
            hue, sat, val = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2HSV))
            lut_val = np.clip(np.arange(0, 256) * rate, 0, 255).astype(image.dtype)
            im_hsv = cv2.merge((hue, sat, cv2.LUT(val, lut_val)))
            cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=self.data[da].source)
        self.stage.clear()
            
    def exchange_BR(self):
        """
        交换B和R通道
        """
        print('exchange BR...')
        for da in tqdm(self.check_stage()):
            image = self.data[da].get_image()
            hue, sat, val = cv2.split(image)
            self.data.append(dataset(cv2.merge((val, sat, hue))))
        self.stage.clear()

class dataset():
    """储存一张图片所包含信息的类"""
    def __init__(self, img, label_format=Label_Format.Auto, force_load=False):
        self.keyPoints = torch.tensor([])
        self.cls = torch.tensor([])
        self.tsize = torch.tensor([])
        self.color = torch.tensor([])
        self.frame = torch.tensor([])
        self.class23 = torch.tensor([])
        self.conf = torch.tensor([])
        self.hdt = HDT()
        self.source = None
        self.img_path = None
        self.img_hash = None
        self.current_format = Label_Format.Auto
        self.LABEL_FORMAT_ARMOR = {Label_Format.Standard, Label_Format.LabelMaster}

        if isinstance(img, np.ndarray):
            if not isinstance(label_format, str):
                label_format = 'hh'
            self.source = img
            self.img_path = Path(label_format)
        elif isinstance(img, dataset):
            self.clone(img)
        else:
            if not isinstance(img, Path):
                img = Path(img)
            if img.is_file():
                self.img_path = img
                self.hdt.load(self.img_path)
                self.label_path = self.img_path.with_suffix('.txt')
                if not self.label_path.exists() and 'images' == self.img_path.parent.stem:
                    self.label_path = self.label_path.parent.parent.joinpath(f'labels/{self.img_path.stem}.txt')
                if self.label_path.exists():
                    self.read_label(self.label_path, label_format, force_load=force_load)
            else:
                log(f'the path {self.img_path} for dataset is not a file!', Log_sign.Warning)
                return

    def read_label(self, path, label_format=Label_Format.Auto, clear=False, force_load=False):
        """读取标签文件"""
        if label_format != Label_Format.Auto:
            self.current_format = label_format
        if clear:
            self.clear_label()
        with open(path, 'r') as f:
            for line in f.readlines():
                line = list(map(float, line.split()))
                self.add_label(line, label_format, force_load=force_load)
                    
    def output(self, output_path=None, label_format=Label_Format.Standard, name=None, move=False, generate_hdt=False):
        """
        输出文件到指定路径

        Args:
            output_path (str, optional): 输出路径。默认为None。
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Standard。
            name (str, optional): 文件名。默认为None。
            move (bool, optional): 是否移动文件。默认为False。
            generate_hdt (bool, optional): 是否生成HDT文件。默认为False。
        """
        if self.source is None and self.img_path is None:
            log('the dataset is empty!', Log_sign.Warning)
            return
        if Path(output_path).suffix in set.union({'.txt'}, IMAGE_FORMATS):
            output_path = Path(output_path)
            file_name = output_path.stem
            output_path = output_path.parent
            if output_path.stem in {'images', 'labels'}:
                output_path = output_path.parent
                if label_format == Label_Format.Auto:
                    label_format = Label_Format.Standard
            elif label_format == Label_Format.Auto:
                label_format = Label_Format.LabelMaster
            output_path = str(output_path)
        else:
            file_name = self.label_path.stem if name is None else name
            if self.source is not None:
                cv2.imwrite(output_path + ('/' if label_format == Label_Format.LabelMaster else '/images/') + file_name + '.png', self.source)
            else:
                output_img_path = output_path + ('/' if label_format == Label_Format.LabelMaster else '/images/') + file_name + self.img_path.suffix
                if output_img_path != self.img_path:
                    if move:
                        shutil.move(self.img_path, output_img_path)
                    else:
                        shutil.copy(self.img_path, output_img_path)
        if generate_hdt:
            self.hdt.output(output_path + ('/' if label_format == Label_Format.LabelMaster else '/hdt/') + file_name + '.hdt')
        with open(output_path + ('/' if label_format == Label_Format.LabelMaster else f'/labels/') + file_name + '.txt', 'w') as f:
            if label_format in {Label_Format.Standard, Label_Format.LabelMaster, Label_Format.Engineer, Label_Format.Anti_wind, Label_Format.Wind}:
                for i in range(self.cls.shape[0]):
                    for kp in self.keyPoints[i]:
                        f.write(f'{kp} ' )
                    f.write(f'{int(self.cls[i])} {int(self.tsize[i])} {int(self.color[i])}\n')
            elif label_format == Label_Format.Old_Standard:
                for i in range(self.cls.shape[0]):
                    for fr in self.frame[i]:
                        f.write(f'{fr} ')
                    for kp in self.keyPoints[i]:
                        f.write(f'{kp} ')
                    f.write(f'{self.class23[i]} {self.color[i]}\n')
            elif label_format == Label_Format.Yolov5:
                for i in range(self.cls.shape[0]):
                    f.write(f'{self.cls[i]} ')
                    for fr in self.frame[i]:
                        f.write(f'{fr} ')
                    for kp in self.keyPoints[i]:
                        f.write(f'{kp} ')
                    f.write(f'-1 -1 {self.color[i]}\n')
            elif label_format == Label_Format.SJ or label_format == Label_Format.SJ_old:
                classes = 9 if label_format == Label_Format.SJ_old else 12
                for i in range(self.cls.shape[0]):
                    f.write(f'{self.class23[i] + self.color[i] * classes} ')
                    for kp in self.keyPoints[i]:
                        f.write(f'{kp} ')
                    f.write('\n')
            elif label_format == Label_Format.Wind_4P:
                for i in range(self.cls.shape[0]):
                    for kp in self.keyPoints[i][:-4]:
                        f.write(f'{kp} ')
                    f.write(f'{self.keyPoints[i][-2]} {self.keyPoints[i][-1]} ')
                    f.write(f'{int(self.cls[i])} {int(self.tsize[i])} {int(self.color[i])}\n')
            elif label_format == Label_Format.Wind_v5:
                for i in range(self.cls.shape[0]):
                    f.write(f'{int(self.cls[i])} ')
                    for fr in self.frame[i]:
                        f.write(f'{fr} ')
                    for kp in self.keyPoints[i]:
                        f.write(f'{kp} ')
                    f.write(f'{int(self.color[i])}\n')
    
    def add_label(self, label, label_format=Label_Format.Auto, conf=None, force_load=False):
        """
        添加标签
        
        Args:
            label (list): 标签
            label_format (Label_Format, optional): 标签格式。默认为Label_Format.Auto。
            conf (float, optional): 置信度。默认为None。
            force_load (bool, optional): 是否强制加载。默认为False。
        """
        if label == []:
            return
        flag = False
        if label_format == Label_Format.Auto:
            if len(label) == 11:
                label_format = Label_Format.Standard
            elif len(label) == 14:
                label_format = Label_Format.Old_Standard
            elif len(label) == 16:
                label_format = Label_Format.Yolov5
            elif len(label) == 9:
                label_format = Label_Format.SJ
            elif len(label) == 10:
                label_format = Label_Format.LabelMaster_old
            elif len(label) == 15:
                label_format = Label_Format.SJ_wind
            elif len(label) == 13:
                label_format = Label_Format.Wind
        if label_format in {Label_Format.Standard, Label_Format.LabelMaster, Label_Format.Anti_wind, Label_Format.Engineer}:
            if(len(label) == 11):
                kp = label[:8]
                cl = int(label[-3])
                ts = int(label[-2])
                co = int(label[-1])
                cl23 = get_class23(cl, ts, label_format)
                fr = self.build_frame(kp)
                flag = True
        elif label_format == Label_Format.Old_Standard:
            if(len(label) == 14):
                fr = label[:4]
                kp = label[4:12]
                cl23 = label[-2]
                cl = class23_to_cls(label[-2])
                ts = class23_to_tsize(label[-2])
                co = label[-1]
                flag = True
        elif label_format == Label_Format.Yolov5:
            if(len(label) == 16):
                cl = class23_to_cls(label[0])
                ts = class23_to_tsize(label[0])
                cl23 = label[0]
                co = label[-1]
                fr = label[1:5]
                kp = label[5:13]
                flag = True
        elif label_format == Label_Format.SJ or label_format == Label_Format.SJ_old:
            classes = 9 if label_format == Label_Format.SJ_old else 12
            if (len(label) == 9):
                cl23 = label[0] % classes
                co = label[0] // classes
                cl = class23_to_cls(cl23)
                ts = class23_to_tsize(cl23)
                kp = label[1:]
                fr = self.build_frame(kp)
                flag = True
        elif label_format == Label_Format.SJ_wind:
            if (len(label) == 15):
                cl23 = label[0] % 3
                co = label[0] // 3
                cl = cl23
                ts = 0
                kp = label[5:]
                fr = label[1:5]
                flag = True
        elif label_format == Label_Format.Wind:
            if (len(label) == 13):
                kp = label[:10]
                fr = self.build_frame(kp)
                cl23 = label[10]
                cl = cl23
                ts = label[-2]
                co = label[-1]
                flag = True
        elif label_format == Label_Format.Wind_v5:
            if (len(label) == 16):
                cl = label[0]
                ts = 0
                cl23 = label[0]
                co = label[-1]
                fr = label[1:5]
                kp = label[5:15]
                flag = True
        elif label_format == Label_Format.LabelMaster_wind:
            if (len(label) == 11):
                cl23 = label[0] % 3
                co = label[0] // 3
                cl = cl23
                ts = 0
                kp = label[1:]
                fr = self.build_frame(kp)
                flag = True
        elif label_format == Label_Format.Yolov5_wind:
            if (len(label) == 15):
                cl23 = label[-1] % 4
                co = label[-1] // 4
                cl = cl23
                ts = 0
                kp = label[4:-1]
                fr = self.build_frame(label[:4])
                flag = True
        elif label_format == Label_Format.LabelMaster_old:
            if (len(label) == 10):
                kp = label[:8]
                cl23 = label[-2]
                cl = class23_to_cls(label[-2])
                ts = class23_to_tsize(label[-2])
                co = label[-1]
                fr = self.build_frame(kp)
                flag = True
        if flag:
            cl23 = int(cl23)
            cl = int(cl)
            co = int(co)
            ts = int(ts)
        if flag and self.verify_label(label_format, kp, cl, co, ts, cl23, fr) or force_load:
            self.frame = torch.concat([self.frame, torch.tensor([fr])])
            self.keyPoints = torch.concat([self.keyPoints, torch.tensor([kp])])
            self.cls = torch.concat([self.cls, torch.tensor([cl])])
            self.tsize = torch.concat([self.tsize, torch.tensor([ts])])
            self.color = torch.concat([self.color, torch.tensor([co])])
            self.class23 = torch.concat([self.class23, torch.tensor([cl23])])
            if conf is not None:
                self.conf = torch.concat([self.conf, torch.tensor([conf])])
        else:
            if self.source is None:
                log(f'The target\n{label}\nin\n{self.label_path} > {self.img_path}\nis skiped when loading as {label_format.name}.', Log_sign.Warning)
                
    def verify_label(self, label_format, kp, cl, co, ts, cl23, fr):
        """
        验证标签是否合法

        Args:
            label_format (Label_Format): 标签格式
            kp (list): 关键点
            cl (int): 类别
            co (int): 颜色
            ts (int): 尺寸
            cl23 (int): 类别23
            fr (list): 框

        returns:
            bool: 是否合法
        """
        if (-0.5 < torch.tensor(fr + kp)).all() and (1.5 > torch.tensor(fr + kp)).all():
            if label_format in self.LABEL_FORMAT_ARMOR:
                return cl in range(8) and ts in range(2) and co in range(4) and not (cl == 1 and ts == 0 or cl in {0, 2, 6} and ts == 1)
            elif label_format in {Label_Format.Wind, Label_Format.Yolov5_wind, Label_Format.Wind_v5, Label_Format.LabelMaster_wind}:
                return cl in range(3) and ts == 0 and co in range(3)
            elif label_format == Label_Format.Engineer:
                return cl in range(3) and ts == 0 and co == 0
            elif label_format == Label_Format.Anti_wind:
                return cl == 0 and ts == 0 and co in range(2)
            else:
                log('the label format is not supported', Log_sign.Warning)
        else:
            log('the label is out of image too much', Log_sign.Warning)

        return False
                        

    def build_frame(self, keyPoints):
        """
        根据关键点生成框

        Args:
            keyPoints (list): 关键点

        Returns:
            list: 框
        """
        if keyPoints == None:
            return None
        kp = torch.tensor(keyPoints).reshape(-1, 2)
        max_x = kp[:, 0].max()
        max_y = kp[:, 1].max()
        min_x = kp[:, 0].min()
        min_y = kp[:, 1].min()
        center_x = (max_x + min_x) / 2
        center_y = (max_y + min_y) / 2
        width = max_x - min_x
        hight = max_y - min_y
        frame = [center_x.unsqueeze(0), center_y.unsqueeze(0), width.unsqueeze(0), hight.unsqueeze(0)]
        return frame
    
    def get_classmin(self):
        if len(self.class23) == 0:
            self.set_class23()
        return self.class23 + self.color * 12
    
    def clear_label(self):
        self.keyPoints = torch.tensor([])
        self.cls = torch.tensor([])
        self.tsize = torch.tensor([])
        self.color = torch.tensor([])
        self.frame = torch.tensor([])
        self.class23 = torch.tensor([])
        self.conf = torch.tensor([])
        self.current_format = Label_Format.Auto
    
    def add(self, data:'dataset', i=None):
        if i is None:
            self.keyPoints = torch.concat([self.keyPoints, data.keyPoints], dim=0)
            self.cls = torch.concat([self.cls, data.cls], dim=0)
            self.tsize = torch.concat([self.tsize, data.tsize], dim=0)
            self.color = torch.concat([self.color, data.color], dim=0)
            self.frame = torch.concat([self.frame, data.frame], dim=0)
            self.class23 = torch.concat([self.class23, data.class23], dim=0)
            self.conf = torch.concat([self.conf, data.conf], dim=0)
        else:
            self.keyPoints = torch.concat([self.keyPoints, data.keyPoints[i].unsqueeze(0)], dim=0)
            self.cls = torch.concat([self.cls, data.cls[i].unsqueeze(0)], dim=0)
            self.tsize = torch.concat([self.tsize, data.tsize[i].unsqueeze(0)], dim=0)
            self.color = torch.concat([self.color, data.color[i].unsqueeze(0)], dim=0)
            self.frame = torch.concat([self.frame, data.frame[i].unsqueeze(0)], dim=0)
            self.class23 = torch.concat([self.class23, data.class23[i].unsqueeze(0)], dim=0)
            if len(data.conf) or self.keyPoints == 1:
                self.conf = torch.concat([self.conf, data.conf[i].unsqueeze(0)], dim=0)
        if self.current_format == Label_Format.Auto:
            self.current_format = data.current_format

    def clone(self, dest):
        self.source = dest.source
        self.label_path = dest.label_path
        self.keyPoints = dest.keyPoints
        self.cls = dest.cls
        self.tsize = dest.tsize
        self.color = dest.color
        self.frame = dest.frame
        self.class23 = dest.class23
        self.conf = dest.conf
        self.hdt = dest.hdt
        self.img_path = dest.img_path
        self.img_hash = dest.img_hash
        self.current_format = dest.current_format

    def hash(self, Bytes=1024):
        if self.img_hash is None:
            if self.source is None:
                path = self.img_path
            else:
                cv2.imwrite('tmp.png', self.source)
                path = 'tmp.png'
            Md5 = hashlib.md5()
            with open(path, 'rb') as f:
                while True:
                    data = f.read(Bytes)
                    if data:
                        Md5.update(data)
                    else:
                        break
            self.img_hash = Md5.hexdigest()
        return self.img_hash
    
    def cal_V(self, img=None, nets=None, show_img=False, manual_rectangle=None, cal_mode=Cal_V_mode.default, show_log=False):
        if cal_mode == Cal_V_mode.default and len(self.keyPoints):
            V = 0
            if img is None:
                img = cv2.imread(str(self.img_path)) if self.source is None else self.source
            mv = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))[2]
            kps = (kpts2inside(self.keyPoints.clone().reshape(self.keyPoints.shape[0], -1, 2)) * torch.tensor(img.shape[:2]).unsqueeze(0).unsqueeze(0)).numpy()
            for kp in kps:
                V += get_V(cv2.warpPerspective(mv, cv2.getPerspectiveTransform(kp, np.array([[0, 0], [0, UNIFORM_IMAGE_SIZE[1]], [UNIFORM_IMAGE_SIZE[0], UNIFORM_IMAGE_SIZE[1]], [UNIFORM_IMAGE_SIZE[0], 0]],dtype='float32')), UNIFORM_IMAGE_SIZE), show_img)
            return V / len(self.keyPoints)
        else:
            return Cal_V(img, nets=nets, manual_rectangle=manual_rectangle, cal_mode=Cal_V_mode.net, show_img=show_img, show_log=show_log)
        
    def get_image(self, mat=True):
        if self.source is None:
            if mat:
                return cv2.imread(str(self.img_path))
            return self.img_path
        else:
            return self.source

class Video(Player_Core):
    """视频播放器类"""
    def __init__(self, path=DEFAULT_VIDEO, for_acc=False, net=None):
        super().__init__(net)
        self.vi = None
        self.vi_frames = -1
        self.last_frame = -1
        self.for_acc = for_acc
        self.acc_rate = 5
        self.fixed_fps = 30
        self.player_sign = Player_sign.Video
        self.video_suffer = None
        self.suffer_frames = 0
        # self.last_main_window_name = None
        self.tmp_code = 0
        self.load(path)

    def load(self, path, show=False):
        """加载视频"""
        self.name, path = get_name(path)
        if os.path.isfile(path):
            self.data += [path]
        elif os.path.isdir(path):
            self.data += get_files(path, VIDEO_FORMATS)
        if self.data == []:
            log(f'can\'t catch any video from {path}', Log_sign.Warning)
            

    def local_key(self, key, frame):
        """按键响应"""
        if key == 91:  # [
            self.video_code = max(self.video_code - 1, 0)
            self.last_frame = -1
        elif key == 93:  # ]
            self.video_code = min(self.video_code + 1, len(self.data) - 1)
            self.last_frame = -1
        elif key == 107:  # K
            mkdir('del_videos')
            shutil.move(self.data[self.video_code], f'del_videos/{Path(self.data[self.video_code]).name}')
            self.data.pop(self.video_code)
            self.load_video()
        elif key == 102:  # F
            cv2.destroyAllWindows()
            self.fix_head_core()
            self.load_video()
        elif key == 98:  # B
            self.for_acc = not self.for_acc
        elif key == 59:  # ;
            if self.data[frame].label_path.exists():
                self.data[frame] = dataset(self.data[frame].img_path)
        # elif key == 121:  # Y
        #     if self.video_suffer is None:
        #         self.set_video_suffer(frame)
        #     else:
        #         self.close_video_suffer()
        else:
            return frame + 1
        return frame
    
    def get_img(self, frame):
        image = None
        if self.last_frame == -1:
            self.load_video()
            frame = 0
        else:
            frame = max(0, frame)
        if self.last_frame != frame:
            ret = False
            if self.last_frame + 1 == frame:
                ret, image = self.vi.read()
                if self.video_suffer is not None and ret:
                    self.video_suffer.write(image)
                    self.suffer_frames += 1
            elif not self.for_acc:
                self.vi.set(cv2.CAP_PROP_POS_FRAMES, frame)
                ret, image = self.vi.read()
            else:
                if frame >= self.last_frame:
                    for _ in range(frame - self.last_frame):
                        ret, image = self.vi.read()
                else:
                    self.vi = cv2.VideoCapture(self.data[self.video_code])
                    if frame > 2000:
                        log('reloading video ...')
                        for _ in tqdm(range(frame)):
                            ret, image = self.vi.read()
                    else:
                        for _ in range(frame):
                            ret, image = self.vi.read()
            if ret:
                self.last_frame = frame
                self.last_image = image.copy()
        else:
            image = self.last_image.copy()
        if self.last_frame != frame and not ret and frame > 0:
            self.local_key(93, frame)
            image, frame, _ = self.get_img(frame)
        return image, frame, Path(self.data[self.video_code]).stem

    def load_video(self, video_code=None):
        if video_code is None:
            video_code = self.video_code
        elif isinstance(video_code, str):
            if Path(video_code).exists():
                if video_code in self.data:
                    video_code = self.data.index(video_code)
                else:
                    self.data.append(video_code)
                    video_code = len(self.data) - 1
            else:
                log(f'can\'t find the video {video_code}', Log_sign.Warning)
                return
        self.video_code = min(max(video_code, 0), len(self.data) - 1)
        assert self.video_code != -1, log('there is no video loaded!', Log_sign.Error)
        self.vi = cv2.VideoCapture(self.data[self.video_code])
        # if self.last_main_window_name is not None:
        #     try:
        #         cv2.destroyWindow(self.last_main_window_name)
        #     except:
        #         pass
        #     self.last_main_window_name = None
        self.last_frame = -1
        # if self.video_suffer is not None:
        #     self.set_video_suffer(0)
        self.vi_frames = int(self.vi.get(cv2.CAP_PROP_FRAME_COUNT))
        if self.vi_frames == 0:
            if not self.for_acc:
                self.for_acc = True
                log('the head of the video may be broken, auto set for_acc as True', Log_sign.Info)
        else:
            if self.for_acc:
                self.for_acc = False

    def fix_head(self, video_code=None, fps=30, stage=False):
        if stage:
            for i in self.check_stage():
                self.fix_head_core(i, fps)
        else:
            self.fix_head_core(video_code, fps)

    def fix_head_core(self, video_code=None, fps=None, thread=False):
        self.load_video(video_code)
        cv2.destroyAllWindows()
        if fps is None:
            fps = self.fixed_fps
        ret, image = self.vi.read()
        tmp_path = f'[{self.tmp_code}]fixing_head.avi'
        self.tmp_code = self.tmp_code + 1
        vio = cv2.VideoWriter(tmp_path, FOURCC, fps, (image.shape[1], image.shape[0]))
        print(f'Fixing head of {self.data[self.video_code]} {"in background" if thread else ""}...')
        if thread:
            Process(target=rewrite_video, args=[self.vi, vio, tmp_path, self.data[self.video_code], image, True]).start()
        else:
            rewrite_video(self.vi, vio, tmp_path, self.data[self.video_code], image)
        

    def split(self, num, video_code=None, fps=None):
        if num <= 0: 
            return
        self.load_video(video_code)
        cv2.destroyAllWindows()
        frames = int(self.vi.get(cv2.CAP_PROP_FRAME_COUNT))
        assert frames > 0, log(f'can\'t get any frame from\n{self.data[self.video_code]}\n, this may be caused when the head of video is crushed, please check it.', Log_sign.Error)
        sub_frames = frames // num
        video_name = Path(self.data[self.video_code]).stem
        dir_path = f'{PLAYER_DIR}/Split_{video_name}'
        mkdir(dir_path)
        
        fps = self.vi.get(cv2.CAP_PROP_FPS) if fps == None else fps
        ret, image = self.vi.read()
        for n in range(num):
            vio = cv2.VideoWriter(f'{dir_path}/{video_name}[{n}].avi', FOURCC, fps, (image.shape[1], image.shape[0]))
            print(f'[{n}/{num}] Writing {dir_path}/{video_name}[{n}].avi ...')
            for _ in tqdm(range(sub_frames)):
                vio.write(image)
                _, image = self.vi.read()
            vio.release()
        ret, image = self.vi.read()
        while ret:
            vio.write(image)
            _, image = self.vi.read()
        vio.release()

    def merge(self, fps=None, name=None):
        assert self.check_stage(), log('There is no videos to merge!', Log_sign.Error)
        self.load_video(0)
        fps = self.vi.get(cv2.CAP_PROP_FPS) if fps == None else fps
        name = 'merge_' + Path(self.data[self.check_stage()[0]]).stem if name == None else name
        if name[-4:] not in VIDEO_FORMATS:
            name += '.avi'
        print('\nMerging videos ...')
        ret, image = self.vi.read()
        vio = cv2.VideoWriter(f'{PLAYER_DIR}/{name}.avi', FOURCC, fps, (image.shape[1], image.shape[0]))
        for i in tqdm(self.stage[1:] + [self.stage[0]]):
            while ret:
                vio.write(image)
                ret, image = self.vi.read()
            self.load_video(i)
            ret, image = self.vi.read()
        vio.release()
        self.stage.clear()

    def fetch_data_hass(self, sign, clear=False):
        if clear:
            self.stage.clear()
        for i, d in enumerate(self.data):
            if d.rfind(sign) != -1:
                self.stage.append(i)
    
    def fix_name_forU(self):
        for vi in self.data:
            if vi.find(':'):
                pvi = Path(vi)
                shutil.move(vi, str(pvi.parent) + '/' + pvi.stem.replace(':', '_') + pvi.suffix)

    def open_current_file(self, choice=0, frame=0):
        if choice == 0:
            os.system(f'xdg-open {self.data[self.video_code]}')
        elif choice == 2:
            os.system(f'xdg-open {str(Path(self.data[self.video_code]).parent)}')
    
    # def set_video_suffer(self, frame):
    #     self.close_video_suffer()
    #     if frame == 0:
    #         img_size = self.vi.read().shape[:2]
    #     else:
    #         img_size = self.last_frame.shape[:2]
    #     self.suffer_frames = frame
    #     self.video_suffer = cv2.VideoWriter(f'{PLAYER_DIR}/video_suffer.avi', FOURCC, 120, img_size)
    #     cv2.destroyAllWindows()
    #     log('loading video to suffer ...')
    #     self.load_video()
    #     for _ in range(frame):
    #         self.video_suffer.write(self.vi.read())

    # def close_video_suffer(self):
    #     if self.video_suffer is not None:
    #         self.video_suffer.release()
    #         self.video_suffer = None
    #         if Path(f'{PLAYER_DIR}/video_suffer.avi').exists():
    #             os.remove(f'{PLAYER_DIR}/video_suffer.avi')

    def __str__(self):
        info = ''
        for i, vi in enumerate(self.data):
            info += f'{i} > {vi}\n'
        return info
    

class Camera(Player_Core):
    def __init__(self, camera=None, net=None, show=True) -> None:
        super().__init__(net)
        self.exposure_time = DEFAULT_EXPOSURE_TIME
        self.gain = DEFAULT_GAIN
        self.player_sign = Player_sign.Camera
        self.camera = None
        self.camera_type = Camera_type.Null
        self.saving_code = 0
        if camera == None:
            try:
                self.camera = HKCamera()
                assert self.camera.state != Camera_state.Null, 'find no device of HK camera!'
                self.camera_type = Camera_type.HKCamera
            except:
                try:
                    self.camera = cv2.VideoCapture(0)
                    self.camera_type = Camera_type.camera
                except:
                    log('Can\'t load the camera', Log_sign.Error)
                    self.camera_type = Camera_type.Null
        elif isinstance(camera, int):
            try:
                self.camera = cv2.VideoCapture(camera)
                self.camera_type = Camera_type.Null
            except:
                log('Can\'t open the camera', Log_sign.Error)
                self.camera_type = Camera_type.camera
        elif camera == 'HK':
            try:
                self.camera = HKCamera()
                assert self.camera.state != Camera_state.Null, 'find no device of HK camera!'
                self.camera_type = Camera_type.HKCamera
            except:
                log('Can\'t load the camera', Log_sign.Error)
                self.camera_type = Camera_type.Null
        if self.camera_type != Camera_type.Null:
            if self.camera_type == Camera_type.HKCamera:
                self.camera.set(DEFAULT_EXPOSURE_TIME, DEFAULT_GAIN)
            log(f'using camera {self.camera_type.name}', Log_sign.Info)
            if show:
                self.show()

    def __del__(self):
        self.camera = None
    
    def get_img(self, frame):
        if (frame == 0 or self.playing in {Playing_mode.Loop, Playing_mode.Stop}) and self.last_image is not None:
            return self.last_image.copy(), frame, 'HK' if self.camera_type == Camera_type.HKCamera else 'camera'
        assert self.camera is not None, log('the camera has\'t load', Log_sign.Error)
        if self.camera_type == Camera_type.HKCamera:
            img = self.camera.get_image()
            if img is None:
                log('can\'t get any image from camera which may be disconnected', Log_sign.Error)
            else:
                self.last_image = img.copy()
            return img, frame, 'HK'
        else:
            self.last_image = self.camera.read()[1]
            return self.last_image.copy(), frame, 'camera'
        
    def local_key(self, key, frame):
        if key == 91:  # [
            self.exposure_time = max(100, self.exposure_time - 100)
            self.camera.set(exposure_time=self.exposure_time)
        elif key == 93: # ]
            self.exposure_time += 100
            self.camera.set(exposure_time=self.exposure_time)
        elif key == 59: # ;
            self.gain = max(1, self.gain - 1)
            self.camera.set(gain=self.gain)
        elif key == 39: # '
            self.gain += 1
            self.camera.set(gain=self.gain)
        else:
            return 1
        return 0

    def stop(self):
        if self.camera_type == Camera_type.HKCamera:
            self.camera.stop()

    def release(self):
        self.camera = None



class Net:
    def __init__(self, path='best.pt', label_format=Label_Format.Auto, net_format=Net_Format.Auto, conf=0.5, iou=0.4):
        self.net = None
        self.conf = conf
        self.iou = iou
        self.name = None
        self.label_format = label_format
        
        if path is None:
            return
        elif isinstance(path, Net):
            self.clone_from(path)
            return
        if not Path(path).exists():
            # auto path
            path = auto_path(path)
            # net_format = auto_format(path, net_format)
            self.label_format = auto_format(path, label_format)
            if Path(path + '/best.pt').exists():
                path += '/best.pt'
            elif Path(path + '/weights/best.pt').exists():
                path += '/weights/best.pt'
            else:
                pts = get_files(path, suf='.pt')
                if len(pts):
                    path = pts[0]
                else:
                    log(f'Can\'t find model in {path}!', Log_sign.Warning)
                    return
            log(f'auto update model path to {path} ...', Log_sign.Info)
        self.name = get_name(path)[0]
        if net_format == Net_Format.Auto:
            try:
                from ultralytics import YOLO
                net_format = Net_Format.Yolov8
            except:
                try:
                    sys.path.append(YOLOV5_PATH)
                    from yolov5_detector import yolov5
                    net_format = Net_Format.Yolov5
                except:
                    net_format = Net_Format.Null

        if net_format == Net_Format.Yolov8:
            try:
                from ultralytics import YOLO
                print('\nLoading net model in yolov8...')
                self.net = YOLO(path)
            except:
                net_format = Net_Format.Null
        elif net_format == Net_Format.Yolov5:
            try:
                sys.path.append(YOLOV5_PATH)
                from yolov5_detector import yolov5
                print('\nLoading net model in yolov5...')
                self.net = yolov5(path)
            except:
                net_format = Net_Format.Null

        self.net_format = net_format
        if net_format == Net_Format.Null:
            log('Can\'t load model in Null format!', Log_sign.Warning)

    def predict(self, data):
        is_img = not isinstance(data, dataset)
        if is_img:
            data = dataset(data)
        if self.net is None:
            log('Can\'t predict without model!', Log_sign.Error)
            return
        if data.source is not None:
            img = data.source.copy()
        else:
            img = cv2.imread(str(data.img_path))
        if self.net_format == Net_Format.Yolov8:
            pred = self.net(source=img, conf=self.conf, iou=self.iou, show=False, verbose=False)
            if len(pred):
                nk = self.net.model.yaml['nk']
                ps = pred[0].pred.preds
                norm_kps = ps[:, :nk * 2] / torch.tensor([img.shape[1], img.shape[0]], device=ps.device).repeat(nk)
                preds = torch.concat([norm_kps, ps[:, nk * 2:]], dim=-1).tolist()
                for p in preds:
                    data.add_label(p[:-1], conf=p[-1], label_format=self.label_format)
        elif self.net_format == Net_Format.Yolov5:
            pred = self.net(source=img, conf=self.conf, iou=self.iou)
            if len(pred):
                for p in pred:
                    data.add_label(p[:-1].tolist(), label_format=self.label_format, conf=p[-1])
        else:
            raise log('Unknown model!', Log_sign.Error)
        data.current_format = self.label_format
        if is_img:
            return data
        
    def export(self):
        if self.net is None:
            log('Can\'t export without model!', Log_sign.Error)
            return
        if self.net_format == Net_Format.Yolov8:
            self.net.export(format="openvino")
            self.net.export(format="onnx")
        else:
            raise log('Unknown model!', Log_sign.Error)
        
    def val(self):
        if self.net is None:
            log('Can\'t val without model!', Log_sign.Error)
            return
        self.net.val()

    def clone_from(self, source):
        self.net_format = source.net_format
        self.net = source.net
        self.conf = source.conf
        self.iou = source.iou
        self.name = source.name
        self.label_format = source.label_format
        
        
class HDT():
    def __init__(self, path=None, TV=-1, source=None) -> None:
        if path is None:
            self.TV = TV
            self.source = source
        else:
            self.load(path)
    
    def load(self, path):
        if isinstance(path, str):
            path = Path(path)
        if path.suffix != '.hdt':
            path = path.with_suffix('.hdt')
            if not path.exists():
                path = path.parent.parent.joinpath(f'hdt/{path.stem}.hdt')
        if path.exists():
            with open(str(path), 'r') as f:
                lines = f.readlines()
                self.TV = float(lines[0])
                self.source = lines[1]
                if self.source == 'None':
                    self.source = None
                    
    def output(self, path):
        if self.TV == -1:
            log('skip the HDT while outputing, cause the TV has not been calculate', Log_sign.Warning)
            return
        if isinstance(path, str):
            path = Path(path)
        if path.suffix != '.hdt':
            path = path.with_suffix('.hdt')
        with open(str(path), 'w') as f:
            f.write(f'{self.TV}\n{self.source}')


def get_files(path, suf=None, deep=True, first=True):
    """
    获取文件夹下所有文件的路径

    Args:
        path: 文件夹路径
        suf: 文件后缀
        deep: 是否递归
        first: 是否为第一次调用

    Returns:
        files: 文件路径列表
    """
    files = []
    if os.path.isdir(path):
        if deep or first:
            for f in glob.glob(path + '/*'):
                files += get_files(f, suf, first=False)
    elif os.path.isfile(path) and (suf is None or Path(path).suffix in suf):
        files = [path]
    return files

def get_class23(cls, tsize, label_format=Label_Format.Standard):
    """
    将数据集转化为Class23格式

    Args:
        cls: 类别
        tsize: ts
        label_format: 标签格式

    return:
        class23: Class23格式的类别
    """
    if label_format in {Label_Format.Wind, Label_Format.Engineer, Label_Format.Anti_wind}:
        return cls
    if not isinstance(cls, torch.Tensor):
        cls = torch.Tensor([cls])
    return (cls + tsize * (6 + torch.div(6 - cls, 7, rounding_mode='floor') * 5) * CLASS23_TRANS[cls.long()]).long()

def class23_to_tsize(class23):
    """
    将Class23格式转化为ts

    Args:
        class23: Class23格式的类别

    return:
        tsize: ts
    """
    return class23 == 1 or class23 > 7

def class23_to_cls(class23):
    """
    将Class23格式转化为类别

    Args:
        class23: Class23格式的类别

    return:
        cls: 类别
    """
    if(class23 < 8):
        return class23
    elif(class23 > 8):
        return class23 - 6
    return 7

def get_file_code(path, suffix='.jpg'):
    """
    获得文件夹下的文件编号

    Args:
        path: 文件夹路径
        suffix: 文件后缀
    
    return:
        file_code: 文件编号
    """
    file_code = 0
    files = glob.glob(path + '/*')
    for file in files:
        if os.path.isdir(file):
            file_code = get_file_code(file)
        else:
            file = Path(file)
            if file.suffix == suffix:
                try:
                    code = int(file.stem)
                    file_code = max(code, file_code)
                except:
                    pass
    return file_code

def val_dataset_structure(dataset_path):
    """
    验证数据集的是否完整
    """
    return os.path.exists(dataset_path) and \
            os.path.exists(dataset_path + '/val') and \
            os.path.exists(dataset_path + '/val/images') and \
            os.path.exists(dataset_path + '/val/labels') and \
            os.path.exists(dataset_path + '/train') and \
            os.path.exists(dataset_path + '/train/images') and \
            os.path.exists(dataset_path + '/train/labels')
    
# def draw_result(img, data, color, show_V=False, scale_rate=1):
#     if isinstance(data, dataset) and len(data.cls) == 0 or isinstance(data, str):
#         return img
#     kp = (data.keyPoints * torch.tensor([img.shape[1], img.shape[0]], device=data.keyPoints.device).repeat(data.keyPoints.shape[1] // 2) * scale_rate).round().int().tolist()
#     for i in range(len(data.cls)):
#         nk = len(kp[i]) // 2
#         for kpi in range(nk):
#             cv2.circle(img, (kp[i][kpi * 2], kp[i][kpi * 2 + 1]), 3, color, -1)
#             cv2.line(img, (kp[i][kpi * 2], kp[i][kpi * 2 + 1]), (kp[i][((kpi + 1) % nk) * 2], kp[i][((kpi + 1) % nk) * 2 + 1]), color, 1)
#         cv2.putText(img, f'{color_name[int(data.color[i])]}-[{tsize_name[int(data.tsize[i])]}]{class_name[int(data.cls[i])]}{f"<{data.conf[i]:.2f}>" if len(data.conf) else ""}{f"({Cal_V(img, cal_mode=Cal_V_mode.default, kps=data)})" if show_V else ""}', (kp[i][0], kp[i][1]), FONT_FACE, 0.5, (255, 255, 255))
#     return img

def get_window_code():
    """
    获得窗口编号

    return:
        window_code: 窗口编号
    """
    global window_code
    window_code += 1
    return window_code

def mkdir(path:str, check=True, append=False):
    """
    创建文件夹

    Args:
        path: 文件夹路径
        check: 是否检查文件夹是否存在
        append: 是否一定要生成新的文件夹
    """
    if append:
        check = False
    if check:
        if not os.path.exists(path):
            os.mkdir(path)
    else:
        if append:
            os.mkdir(get_unique_path(path))
        else:
            if os.path.exists(path):
                assert log(f'The path\n{path}\nis exists,do you want to delete it?(Please enter y to delete it)', Log_sign.Require, False) == 'y', 'Please check the path'
                shutil.rmtree(path)
            os.mkdir(path)
                
def auto_path(path, within=False, chose=True):
    """
    自动更新文件路径

    :param path: 文件路径
    :type path: Union[str, Path]
    :param within: 是否在指定的路径中查找文件，默认为 False
    :type within: bool
    :param chose: 如果找到多个文件，是否让用户选择，默认为 True
    :type chose: bool
    :return: 更新后的文件路径
    :rtype: str
    """
    if isinstance(path, Path):
        path = str(path)
    if not isinstance(path, str) or os.path.exists(path):
        return path
    if path[0] == '*':
        within = True
        path = path[1:]
    pd = [
        'Datasets/prompt/armor/',
        'Datasets/prompt/wind/',
        'Datasets/prompt/',
        '/home/ubuntu/datasets/',
        'Datasets/',
        'Datasets/Raws/',
        'Videos/',
        'Player/',
        'RM-test/',
        'initial_weight/',
        '/home/hh/recording/'
    ] + LOACL_FASTPATH
    if within:
        finds = []
        for p in pd:
            len_p = len(p)
            for f in glob.glob(p + '*'):
                try:
                    if re.match(path, f[len_p:]) is not None:
                        if chose:
                            finds.append(f)
                        else:
                            log(f'auto update path {path} to {f}', Log_sign.Info)
                            return f
                except:
                    break
        if len(finds) == 1:
            log(f'auto update path {path} to {f}', Log_sign.Info)
            return f
        elif len(finds) > 1:
            log('These files are found:\n')
            for i, f in enumerate(finds):
                print(f'{i} > {f}')
            try:
                answer = int(log('choice:', Log_sign.Require))
                if answer >= 0 and answer < len(finds):
                    log(f'auto update path {path} to {finds[answer]}', Log_sign.Info)
                    return finds[answer]
            except:
                pass
    else:
        for p in pd:
            if os.path.exists(p + path):
                log(f'auto update path {path} to {p + path}', Log_sign.Info)
                return p + path
    return path

def set_font(txt, color=Font.Null):
    """
    设置字体颜色

    Args:
        txt: 文本
        color: 颜色

    return:
        txt: 设置颜色后的文本
    """
    if color != Font.Null:
        if isinstance(color, Font):
            return f'\033[{color.value}m{txt}\033[0m'
        elif isinstance(color, list):
            for t in color:
                if isinstance(t, Font):
                    txt = f'\033[{t.value}m' + txt
            txt += '\033[0m'
    return txt

def log(txt, sign=Log_sign.Null, show=True, master=''):
    """
    日志输出

    Args:
        txt: 文本
        sign: 日志类型
        show: 是否显示
        master: 主要信息
    """
    if len(master):
        master = f'[{master}]'
    if sign == Log_sign.Warning:
        txt = set_font(f'{sign.name}:' + txt, [Font.FYellow, Font.Highlight])
    elif sign == Log_sign.Error:
        txt = set_font(f'{sign.name}:' + txt, [Font.FRed, Font.Highlight])
    elif sign == Log_sign.Info:
        txt = set_font(f'{sign.name}:' + txt, Font.FBlue)
    elif sign == Log_sign.Require:
        return ('\n' + set_font(f'{master}{sign.name}:' + txt, Font.FGreen) + '\n')
    elif sign == Log_sign.Debug:
        txt = set_font(f'{sign.name}:' + txt, Font.FPurple)
    if show:
        print('\n' + master + txt)
    return '\n' + master + txt

def plot(data, index=None, title=None, xlabel=None, ylabel=None, legend=False, using_plt=True, img_hight=500, hist_wide=3, tags=None, show_txt=False, index_mul=1, index_b=0):
    """
    绘制图像

    Args:
        data: 数据
        index: 横坐标
        title: 标题
        xlabel: 横坐标名称
        ylabel: 纵坐标名称
        legend: 是否显示图例
        using_plt: 是否使用plt绘图
        img_hight: 图像高度
        hist_wide: 柱状图宽度
        tags: 标签
        show_txt: 是否显示柱状图上的数字
        index_mul: 横坐标的倍率
        index_b: 横坐标的偏移量

    """
    if index == None:
        index = [i * index_mul + index_b for i in range(len(data))]
    if len(data) != 0:
        # plt.rcParams['font.sans-serif'] = ['SimHei']
        # plt.rcParams['axes.unicode_minus'] = False
        if using_plt:
            from matplotlib import pyplot as plt
            if title is not None:
                plt.title(title)
            if xlabel is not None:
                plt.xlabel(xlabel)
            if ylabel is not None:
                plt.ylabel(ylabel)
            if not isinstance(data[0], list):
                data = [data]
            for d in data:
                plt.plot(index, d)
            if legend:
                plt.legend()
            plt.show()
        else:
            paper = np.zeros((img_hight, len(index) * hist_wide, 3), dtype=np.uint8)
            data = (data / data.max() * (img_hight - 10)).int().tolist()
            if tags is not None:
                for tag in tags:
                    tag = int(tag)
                    cv2.rectangle(paper, (tag * hist_wide, 0), ((tag + 1) * hist_wide - 1, img_hight - data[tag]), (0, 0, 255), -1)
            if show_txt & (hist_wide < 8):
                log(f'the {hist_wide} hist wide is too small, please set it up to 8', Log_sign.Warning)
                show_txt = False
            for i, da in enumerate(data):
                if da > 0:
                    cv2.rectangle(paper, (index[i] * hist_wide, img_hight - da), ((index[i] + 1) * hist_wide - 1, img_hight), (255, 255, 255), -1)
                    if show_txt:
                        cv2.putText(paper, f'{da}', (index[i] * hist_wide, img_hight - da), FONT_FACE, 0.5, (255, 255, 255))
            cv2.imshow(f'{f"[{get_window_code()}]plot" if title is None else title}', paper)
            

def str2cls(s, cls23=True, merge=True):
    """
    将字符串转化为类别,包括正则

    args:
        s: 字符串
        cls23: 是否转化为Class23格式
        merge: 是否合并为一个列表

    return:
        color: 颜色
        tsize: ts
        cls: 类别
    """
    if merge:
        cls23 = True
    if s[0] == '*':
        color = [0, 1, 2, 3]
    else:
        color = [color_name.index(s[0])]
    if s[1] == '*':
        tsize = [0, 1]
    else:
        tsize = [tsize_name.index(s[1])]
    if s[2] == '*':
        cls = [i for i in range(8)]
    else:
        cls = [class_name.index(s[2])] 
    if cls23:
        cls23 = ct2cls23(cls, tsize)
        if merge:
            cls_min = []
            for co in color:
                for cl in cls23:
                    cls_min.append(co * 12 + cl)
            return cls_min
        else:
            return color, cls23
    return color, tsize, cls

def ct2cls23(cls, tsize):
    """
    将类别和ts转化为Class23格式

    Args:
        cls: 类别
        tsize: ts

    return:
        cls23: Class23格式的类别
    """
    cls23_list = []
    for ts in tsize:
        for c in cls:
            cls23 = get_class23(c, ts)
            if cls23 not in cls23_list:
                cls23_list.append(cls23)
    return cls23_list

def kpts2inside(ptrs, wide_scale=6, height_scale=3):
    """
    将关键点转化为内部图按所在的区域

    Args:
        ptrs: 关键点
        wide_scale: 宽度缩放
        height_scale: 高度缩放

    return:
        ptrs: 内部图按所在的区域的关键点
    """
    if ptrs.shape[1] == 5:  # wind
        wide_scale = 100
        height_scale = 100
        ptrs = torch.concat([ptrs[:, :3], ptrs[:, 4:]], dim=1)
    up_deta = ((ptrs[:, 3] - ptrs[:, 0]) / wide_scale).unsqueeze(1)
    down_deta = ((ptrs[:, 2] - ptrs[:, 1]) / wide_scale).unsqueeze(1)
    left_detas = ((ptrs[:, 1] - ptrs[:, 0]) / height_scale).unsqueeze(1)
    right_detas = ((ptrs[:, 2] - ptrs[:, 3]) / height_scale).unsqueeze(1)
    ptrs += torch.cat([-left_detas, left_detas, right_detas, -right_detas], dim=1)
    ptrs += torch.cat([up_deta, down_deta, -down_deta, -up_deta], dim=1)
    return ptrs

def get_V(img, show_img=True):
    """
    获取图像的V值

    Args:
        img: 图像
        show_img: 是否显示图像

    return:
        V: V值
    """
    hist = cv2.calcHist([img], [0], None, [256], [0, 256]).squeeze()

    compactness, labels, centers = cv2.kmeans(np.float32(img.reshape(img.shape[0] * img.shape[1])), 2, None, (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 50, 0.05), 3, cv2.KMEANS_PP_CENTERS)
    centers = centers.squeeze().tolist()

    # cflag = True
    # while cflag:
    #     cflag = False
    #     line = int(sum(centers) / 2) + 1
    #     new_centers = [(hist[:line] * np.arange(0, line)).sum() / hist[:line].sum(), (hist[line:] * np.arange(line, 256)).sum() / hist[line:].sum()]
    #     cflag = new_centers == centers
    #     centers = new_centers.copy()

    # cflag = True
    # anchors = [i for i in range(0, 255, 5)]
    # new_anchors = []
    # PRANGE = 4
    # LOW_THRESHOLD = 10
    # while cflag:
    #     cflag = False
    #     new_anchors.clear()
    #     for ap in anchors:
    #         pl, tsum, cflag = cal_list_range_mean(hist, ap, PRANGE, cflag)
    #         if tsum > LOW_THRESHOLD and pl not in new_anchors:
    #             new_anchors.append(pl)
    #     anchors = new_anchors.copy()
    #     plot(torch.tensor(hist), None, 'tt', 'value', 'count', using_plt=False, tags=anchors)
    #     cv2.waitKey(0)
    # sums = []
    # for ap in anchors:
    #     sums.append(cal_list_range_mean(hist, ap, PRANGE, True)[1])
    # _, index = torch.tensor(sums).sort()
    # if len(sums) == 0:
    #     centers = [0, 0]
    # elif len(sums) == 1:
    #     centers = sums * 2
    # else:
    #     centers = [anchors[index[-1]], anchors[index[-2]]]
    #     centers = [max(centers), min(centers)]

    if show_img:
        plot(torch.tensor(hist), None, 'V', 'value', 'count', using_plt=False, tags=centers)
        cv2.imshow('V_split_img', img)
        # cv2.waitKey(0)
    return max(centers)

def cal_list_range_mean(data, point, prange, cflag):
    """
    计算数据列表 data 中，以 point 为中心，长度为 prange 的子列表的平均值。
    
    Args:
        data:数字列表。
        point:子列表的中心。
        prange:子列表的长度。
        cflag:指示中心点是否改变的标志位。
    
    return:
        newp:新的中心点。
        sums:子列表的和。
        cflag:指示中心点是否改变的标志位。
    """
    #计算子列表的左端点下标，确保下标不会小于 0。
    drl = max(0, round(point - prange))
    #计算子列表的右端点下标，确保下标不会大于 data 的长度。
    drr = min(len(data), round(point + prange + 1))
    #计算子列表的和。
    sums = data[drl:drr].sum()
    #计算子列表的平均值。
    newp = (data[drl:drr] * np.arange(drl, drr)).sum() / (EPS if sums == 0 else sums)
    return newp, sums, cflag or (point != newp)

def Cal_V(img, nets=None, manual_rectangle=None, cal_mode=Cal_V_mode.net, show_img=False, kps=None, show_log=False):
    """
    计算图像的V值

    Args:
        img: 图像
        nets: 网络
        manual_rectangle: 手动框选区域
        cal_mode: 计算模式
        show_img: 是否显示图像
        kps: 关键点
        show_log: 是否显示日志

    return:
        V: V值
    """
    if isinstance(nets, list):
        nets = nets[0] if len(nets) else None
    if kps is not None and kps.shape[0]:
        img = dataset(img)
        img.keyPoints = kps
    if isinstance(img, dataset):
        if cal_mode == Cal_V_mode.default:
            return img.cal_V(nets=nets, manual_rectangle=manual_rectangle, cal_mode=cal_mode, show_img=show_img)
        else:
            img = img.get_image()
    if nets is not None and cal_mode != Cal_V_mode.manual:
        tda = dataset(img)
        nets.predict(tda)
        if len(tda.keyPoints):
            return tda.cal_V(cal_mode=Cal_V_mode.default, show_img=show_img)
        
    if manual_rectangle is not None:
        rimg = img[max(0, manual_rectangle[1] - manual_rectangle[3]) : min(img.shape[0], manual_rectangle[1] + manual_rectangle[3]), max(0, manual_rectangle[0] - manual_rectangle[2]) : min(img.shape[1], manual_rectangle[0] + manual_rectangle[2])]
        return get_V(cv2.split(cv2.cvtColor(rimg, cv2.COLOR_BGR2HSV))[2], show_img=show_img)
    
    if show_log:
        log('can\' calculate V, cause there is no net loaded or position given.', Log_sign.Warning)
    return 0

def get_polygons_area(polygons):
    """
    计算多边形的面积

    Args:
        polygons: 多边形
    
    return:
        area: 面积
    """
    #将 polygons 重塑为一个三维数组，其中第一维表示多边形的数量，第二维表示多边形的顶点数，第三维表示每个顶点的坐标。
    polygons = polygons.reshape(polygons.shape[0], -1, 2)
    #计算每个多边形各个顶点与第一个顶点之间的距离。
    detaps = polygons - polygons[:, 0].unsqueeze(0).repeat((1, polygons.shape[1], 1))
    #使用叉积公式计算多边形的面积
    results = (detaps[:, 1:-1, 0] * detaps[:, 2:, 1] - detaps[:, 1:-1, 1] * detaps[:, 2:, 0]).sum(-1)
    return results.abs() / 2

def get_key_value():
    """
    在屏幕上显示一个名为 key_value 的窗口。
    按下键盘上的任意键时，程序会将该键的 ASCII 码打印到控制台上。
    按下 ESC 键时，程序会销毁 key_value 窗口。
    """
    cv2.imshow('key_value', np.zeros((640, 640)))
    key = 0
    while key != 27:
        key = cv2.waitKey(0)
        print(key)
    cv2.destroyWindow('key_value')

def data_enhancement_core(image, config, nets=[], manual_rectangle=None, cal_V_mode=Cal_V_mode.default, kps=None):
    """
    数据增强函数

    Args:
        image: 图像
        config: 数据增强配置
        nets: 网络
        manual_rectangle: 手动框选区域
        cal_V_mode: 计算模式
        kps: 关键点

    return:
        image: 增强后的图像
    """
    default_config = Data_enhancement_configure()
    if config != default_config:
        image = image.copy()

        # TV
        config_V = 0
        if config.TVA is not None:
            if config.V != default_config.V:
                log('the V value is not default, which will be ignore', Log_sign.Warning)
            tv = max(Cal_V(image, nets, manual_rectangle, cal_V_mode, kps=kps), 1e-9)
            config_V = config.TVA / tv
        if config_V == 0:
            config_V = config.V

        # hsv
        if config.H != default_config.H or config.S != default_config.S or config_V != default_config.V:
            hue, sat, val = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2HSV))
            x = np.arange(0, 256)
            lut_hue = np.clip(x + config.H, 0, 255).astype(image.dtype)
            lut_sat = np.clip(x * config.S, 0, 255).astype(image.dtype)
            lut_val = np.clip(x * config_V, 0, 255).astype(image.dtype)
            im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
            cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=image)
        
        # down_cover
        if config.down_cover != default_config.down_cover and kps is not None and kps.shape[-1] == 8:
            EXPAND_RATE_HEIGHT = 0.45
            EXPAND_RATE_WIDTH = 0.2
            height_rate = config.down_cover
            kps = kps.reshape(kps.shape[0], -1, 2)
            # up_deta = kps[:, 3, 0] - kps[:, 0, 0]
            # left_detas = kps[:, 1] - kps[:, 0]
            # lup = kps[:, 1] - left_detas * height_rate
            # width = up_deta * (1 + EXPAND_RATE * 2) + left_detas[:, 0].abs()
            # rdy = torch.amin([kps[:, 1, 1], kps[:, 2, 1]], dim=-1)
            # Rlup = torch.cat([min(lup[:, 0], kps[:, 1, 0]) - up_deta[:, 0] * EXPAND_RATE, lup[:, 1]], dim=1)
            # cover_rectangle = torch.cat([Rlup, Rlup[:, 0] + width, rdy], dim=-1).list()
            up_deta = (kps[:, 3] - kps[:, 0]).unsqueeze(1)
            down_deta = (kps[:, 2] - kps[:, 1]).unsqueeze(1)
            left_detas = (kps[:, 1] - kps[:, 0]).unsqueeze(1)
            right_detas = (kps[:, 2] - kps[:, 3]).unsqueeze(1)
            cover = torch.cat([kps[:, 1], kps[:, 1], kps[:, 2], kps[:, 2]], dim=1).reshape(kps.shape[0], -1, 2)
            cover += torch.cat([-left_detas * height_rate, left_detas * EXPAND_RATE_HEIGHT, right_detas * EXPAND_RATE_HEIGHT, -right_detas * height_rate], dim=1)
            cover += torch.cat([-up_deta, -down_deta, down_deta, up_deta], dim=1) * EXPAND_RATE_WIDTH
            cover *= torch.tensor([image.shape[1], image.shape[0]], device=kps.device)
            for c in cover:
                cv2.fillPoly(image, [np.array(c.reshape(-1, 2).int())], (0, 0, 0))
        
        # scale
        if config.scale != default_config.scale:
            config.scale = max(config.scale, 0.1)
            image = cv2.resize(image, image.shape[:2] * config.scale)
        

    return image

def auto_format(path, label_format):
    """
    自动获得数据集的类型

    Args:
        path: 数据集路径
        label_format: 数据集类型

    return:
        label_format: 数据集类型
    """
    path = Path(path).stem
    if label_format == Label_Format.Auto:
        if path[:1] in {'N', 'D'}:
            log(f'auto update label format to {Label_Format.Standard.name}', Log_sign.Info)
            return Label_Format.Standard
        if path[:1] == 'W':
            if 'v5' in path:
                log(f'auto update label format to {Label_Format.Wind_v5.name}', Log_sign.Info)
                return Label_Format.Wind_v5
            else:
                log(f'auto update label format to {Label_Format.Wind.name}', Log_sign.Info)
                return Label_Format.Wind
        elif path[:1] == 'E':
            log(f'auto update label format to {Label_Format.Engineer.name}', Log_sign.Info)
            return Label_Format.Engineer
        elif path[:2] == 'AW':
            log(f'auto update label format to {Label_Format.Anti_wind.name}', Log_sign.Info)
            return Label_Format.Anti_wind
        
    # elif label_format == Net_Format.Auto:
    #     if path[:2] == 'WN':
    #         log(f'auto update label format to {Net_Format.Yolov5.name}', Log_sign.Info)
    #         return Net_Format.Yolov5
    return label_format

def calculate_distance(label, pred, img_size, info=True):
    """
    计算两组关键点的距离

    Args:
        label (torch.tensor): 标签
        pred (torch.tensor): 预测
        img_size (tuple): 图片尺寸
        info (bool, optional): 是否输出信息. Defaults to True.
    """
    if isinstance(label, dataset):
        label = label.keyPoints
    if isinstance(pred, dataset):
        pred = pred.keyPoints
    if len(pred.shape) == 1:
        pred.unsqueeze_(0)
    if label.shape[0] + pred.shape[0] == 0:
        return 0
    elif label.shape[0] == 0 or pred.shape[0] == 0:
        if info:
            log('one of the length of data is 0 while calculating distance', Log_sign.Warning)
        return -1
    return ((pred.unsqueeze(1) - label.unsqueeze(0)) * torch.tensor([img_size[1], img_size[0]], device=pred.device).repeat(pred.shape[-1] // 2)).pow(2).amin(-2).reshape(pred.shape[0], -1, 2).sum(-1).pow(0.5).mean()

def set_data_enhancement(data_enhancement_args, choice, data):
    """
    修改数据增强参数

    Args:
        data_enhancement_args: 数据增强参数
        choice: 选择修改的属性
        data: 数据

    return:
        data_enhancement_args: 修改后的数据增强参数
    """
    if choice == Data_enhancement_sign.H:
        data_enhancement_args.H = data
    elif choice == Data_enhancement_sign.S:
        data_enhancement_args.S = data
    elif choice == Data_enhancement_sign.V:
        data_enhancement_args.V = data
    elif choice == Data_enhancement_sign.TVA:
        data_enhancement_args.TVA = data
    elif choice == Data_enhancement_sign.scale:
        data_enhancement_args.scale = data
    elif choice == Data_enhancement_sign.rotate:
        data_enhancement_args.rotate = data
    elif choice == Data_enhancement_sign.down_cover:
        data_enhancement_args.down_cover = data
    elif choice == Data_enhancement_sign.random_geometry:
        data_enhancement_args.random_geometry = data
    return data_enhancement_args

def mouse_callback(event, x, y, flags, param):
    """
    将鼠标的坐标值赋给 manual_rectangle 的前两个元素

    Args:
        event: 鼠标事件
        x: 鼠标横坐标
        y: 鼠标纵坐标
        flags: 鼠标标志
        param: 鼠标参数
    """
    if event == cv2.EVENT_LBUTTONDOWN:
        global vision_enhancement
        if vision_enhancement:
            global manual_rectangle
            if manual_rectangle is not None:
                manual_rectangle[0] = x
                manual_rectangle[1] = y

def print_None(s, rounds=None):
    """
    输出 None 时的替代文本

    Args:
        s: 文本
        rounds: 保留小数位数
    
    return:
        s: 替代文本
    """
    if s is None:
        return 'None'
    if rounds:
        return round(s, int(rounds))
    return s

def cal_image_quality_loss(raw_img, img):
    """
    评估图片的质量

    Args:
        raw_img: 原图
        img: 图片

    return:
        loss: 图片质量与原图的差距
    """
    if raw_img.shape == img.shape:
        rates = img / (raw_img + EPS)
        rates -= rates.max()
        return abs(rates).mean()
    else:
        log('the shape of img and raw_img is different', Log_sign.Error)

def get_name(path, filter=[]):
    """
    从给定的路径中获取文件或目录的名称。

    Args:
        path: 路径
        filter: 过滤器

    return:
        name: 名称
        path: 路径
    """
    name = None
    if isinstance(filter, str):
        filter = [filter]
    if isinstance(path, Path):
        path = str(path)
    if Path(path).stem == path and path[0] != '*':
        name = path
    path = Path(auto_path(path))
    if name is None:
        #检查文件名是否为 'weights'、'train' 或 'val'。如果是，则将名称设置为其父目录的名称
        if path.is_file():
            name = path.stem
            if name in {'best', 'last'}:
                path = path.parent
            else:
                log('auto set name to ' + name, Log_sign.Info)
                return name, str(path)
        name = path.stem
        #检查文件名是否为 'weights'、'train' 或 'val'。如果是，则将名称设置为其父目录的名称
        if name in {'weights', 'train', 'val'}:
            name = path.parent.stem
    
    log('auto set name to ' + name, Log_sign.Info)
    return name, str(path)

def opend(path):
    """
    打开文件

    Args:
        path: 文件路径
    """
    os.system(f'xdg-open {auto_path(path)}')
    
def init_local_configure():
    """
    加载本地配置文件
    """
    mkdir(EXTENSIONS)
    if Path(EXTENSIONS + '/configure.toml').exists():
        if log(f'the configure file existed, do you want to overwrite it?\nenter y to continue', Log_sign.Require) != 'y':
            return
    data = {
        'LABELMASTER':LABELMASTER,
        'LOACL_FASTPATH':LOACL_FASTPATH,
        'YOLOV5_PATH':YOLOV5_PATH
    }
    with open(EXTENSIONS + '/configure.toml', 'w') as f:
        f.write(toml.dumps(data))
    
def fix_prediction(data:dataset):
    """
    修正预测
    
    Args:
        data (dataset): 预测数据
    """
    if len(data.keyPoints):
        if data.current_format == Label_Format.Wind:
            # try:
            from tools_extension.AutoPin import wind_point_fixer
            data.keyPoints = wind_point_fixer(data.get_image(), data.keyPoints)
            # except:
            #     pass

def get_unique_path(path:str):
    """
    获得唯一的文件夹路径

    Args:
        path: 文件夹路径

    return:
        unique_path: 唯一的文件夹路径
    """
    path = Path(path)
    if not path.exists():
        return str(path)
    files = glob.glob(str(path.parent) + '/*')
    lname = len(path.stem)
    file_code = 0
    is_dir = path.is_dir()
    for f in files:
        try:
            if not (Path(f).is_dir() ^ is_dir):
                file_code = max(file_code, int(Path(f).stem[lname+1:]))
        except:
            pass
    if is_dir:
        return f'{str(path)}_{file_code+1}'
    return f'{str(path)[:-len(path.suffix)]}_{file_code+1}{path.suffix}'

def rewrite_video(vi, vio, tmp_path:str, dest:str, image:np.array, info=False):
    """
    修复视频

    Args:
        vi: 视频
        vio: 写入视频
        tmp_path: 临时路径
        dest: 目标路径
        image: 图片
        info: 是否输出信息
    """
    ret = True
    while ret:
        vio.write(image)
        ret, image = vi.read()
    vio.release()
    shutil.move(tmp_path, dest)
    if info:
        log(f'finish fixing {dest}', Log_sign.Info)

def imshow(img):
    """
    展示图片

    Args:
        img: 图片
    """
    cv2.imshow('img', img)
    cv2.waitKey(0)