from ctypes import CDLL, byref, create_string_buffer
from ctypes import cdll
from collections import defaultdict
from genericpath import exists
import os, shutil, io, sys
import os.path as osp
import json
import numpy as np
import cv2
import copy
import base64, requests
from multiprocessing import Pool
from tqdm import tqdm
from PIL import Image
from tool import filesystem, via_tool, opencv_tool  # export PYTHONPATH=$PYTHONPATH:`pwd`


class MyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, bytes):
            return obj.decode("utf-8")
        else:
            return super(MyEncoder, self).default(obj)


# 查找相同的图片
def find_image_by_size(img_path, find_dir, save_dir):
    img_size = osp.getsize(img_path)

    for f in filesystem.get_all_filepath(find_dir, [".jpg", ".png"]):
        size = osp.getsize(f)
        print(size)
        if img_size == size:
            save_path = save_dir + os.sep + f.replace(find_dir, "").replace("/", "_")
            shutil.copy(f, save_path)
            print(save_path)


def find_image_by_mean(img_path, find_dir, save_dir):
    """ 检索一张图片 """
    mean = np.mean(cv2.imread(img_path))

    for f in tqdm(filesystem.get_all_filepath(find_dir, [".jpg", ".png"])):
        img = cv2.imread(f)
        m = np.mean(img)
        if abs(mean - m) < 1.:
            save_path = save_dir + os.sep + f.replace(find_dir, "").replace("/", "_")
            shutil.copy(f, save_path)
            print(f)


def find_similiar_img(data_dir, save_dir):
    """ 查找相似图片 """
    os.makedirs(save_dir, exist_ok=True)
    data_dict = defaultdict(list)

    for f in tqdm(filesystem.get_all_filepath(data_dir, [".jpg", ".png"])):
        img = cv2.imread(f)
        m = np.mean(img)
        data_dict[str(m)[:8]].append(f)

    count = 0
    for k, v in data_dict.items():
        print(len(v))
        if len(v) > 1:
            for idx, f in enumerate(v):
                sp = osp.join(save_dir, str(count) + "_" + "_{}".format(idx).join(osp.basename(f).split(".")))
                shutil.copy(f, sp)
            count += 1


def extract_truck_pickup(data_dir, save_dir, via_name="via_region_data.json"):
    vis_files = filesystem.get_all_filepath(data_dir, [via_name])

    retain_key = ["truck", "pickup"]

    for vf in vis_files:
        with open(vf) as f:
            train_labels = json.loads(f.read())

        save_dict = dict()
        ##创建保存目录    
        save_root = osp.join(save_dir, osp.basename(osp.dirname(vf)))
        os.makedirs(save_root, exist_ok=True)

        for k, one_data in train_labels.items():
            have = False
            file_name = one_data["filename"]

            if not osp.exists(osp.join(osp.dirname(vf), file_name)):
                continue
            for region in one_data["regions"]:
                if region["region_attributes"]["label"] in retain_key:
                    have = True
                    break
            if have:
                shutil.copy(osp.join(osp.dirname(vf), file_name), osp.join(save_root, file_name))
                save_dict[k] = one_data

        with open(save_root + os.sep + via_name, "w") as wf:
            wf.write(json.dumps(save_dict))


class Rec:
    def init(self):
        optimize_recog_dll_path = '/home/xc/work/code/paddle/pdmodel/build/src/libpdmodel_manager.so'
        self.optimize_recog_dll = cdll.LoadLibrary(optimize_recog_dll_path)

        model_path = "/home/xc/work/code/paddle/pdmodel/model"
        model_path_buffer = create_string_buffer(model_path.encode('utf-8'), 65536)

        out = self.optimize_recog_dll.init_model_py(byref(model_path_buffer))
        print("init_model out: ", out)
        return out

    def text_rec_lite_py(self, imgData):
        img_dir_buffer0 = create_string_buffer(imgData, 65536 * 500)
        out_info = create_string_buffer(65536 * 560)
        out_2 = self.optimize_recog_dll.text_rec_lite_py(byref(img_dir_buffer0), byref(out_info))
        return out_2, json.loads(out_info.value.decode('utf8'))


# 按照时间戳排序图片
def sort_img_by_time(data_dir, via_name="via_region_data.json"):
    rec = Rec()
    ret = rec.init()
    print("ret: ", ret)

    save_dir = data_dir + "_t"
    os.makedirs(save_dir, exist_ok=True)

    img_files = filesystem.get_all_filepath(data_dir, [".jpg"], False)
    img_time_dict = defaultdict(list)
    cnt = 0
    for f in img_files:
        img = cv2.imread(f)
        # img = img[16:35, 190:262, :]
        # img = img[76:115, 339:482, :]
        img = img[108:178, 203:902, :]
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, binary = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
        img = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)

        # cv2.imshow("aa", binary)
        # cv2.waitKey(0)
        # cv2.imwrite(osp.join(save_dir, "test_{}.jpg".format(cnt)), img)
        # cnt+=1

        buffer = io.BytesIO()
        rgb_img = img[:, :, ::-1]
        pil_image = Image.fromarray(rgb_img)
        pil_image.save(buffer, format="jpeg")
        encoded = base64.code(buffer.getvalue())
        ret, data = rec.text_rec_lite_py(encoded)
        if ret != 0: continue
        time_text = data["text"].replace(":", "").replace("：", "").replace("；", "").replace("，", "")[-6:]
        # if len(time_text) != 6:
        #     print("ERROR: ", data["text"])
        #     continue
        try:
            time_int = int(time_text[0:2]) * 3600 + int(time_text[2:4]) * 60 + int(time_text[4:6])
        except:
            continue
        img_time_dict[time_int].append(f)

    with open(data_dir + os.sep + via_name, "r") as rf:
        data_dict = json.loads(rf.read())

    img_time_list = [[k, v] for k, v in img_time_dict.items()]
    img_time_list = sorted(img_time_list, key=lambda x: x[0])

    for idx, (timeline, files) in enumerate(img_time_list):
        file_list = [[f, int(osp.basename(f).split(".")[0])] for f in files]
        file_list = sorted(file_list, key=lambda x: x[1])

        for i, (f, stem) in enumerate(file_list):
            ori_name = osp.basename(f)
            ori_size = str(osp.getsize(f))
            new_name = "{}_{}.jpg".format(idx, i)
            save_path = osp.join(save_dir, new_name)
            shutil.move(f, save_path)

            if data_dict.get(ori_name + ori_size, None):
                item_data = copy.copy(data_dict[ori_name + ori_size])
                item_data["filename"] = new_name
                data_dict[new_name + ori_size] = item_data

    with open(save_dir + os.sep + via_name, "w") as wf:
        wf.write(json.dumps(data_dict))


# 按照时间戳排序图片
def sort_img_by_date(data_dir, save_dir, via_name="via_region_data.json"):
    rec = Rec()
    ret = rec.init()
    print("ret: ", ret)

    img_files = filesystem.get_all_filepath(data_dir, [".jpg"], False)
    img_time_dict = defaultdict(list)
    for f in img_files:
        img = cv2.imread(f)
        # img = img[16:35, 190:262, :]
        # img = img[76:115, 339:482, :]
        img = img[97:185, 203:518, :]

        buffer = io.BytesIO()
        rgb_img = img[:, :, ::-1]
        pil_image = Image.fromarray(rgb_img)
        pil_image.save(buffer, format="jpeg")
        encoded = base64.b64encode(buffer.getvalue())
        ret, data = rec.text_rec_lite_py(encoded)
        if ret != 0: continue
        time_text = data["text"].replace("-", "")
        if len(time_text) != 8:
            print("ERROR: ", data["text"])
            continue
        try:
            time_int = int(time_text[4:6]) * 30 + int(time_text[6:8])
        except:
            continue
        img_time_dict[time_int].append(f)

    with open(data_dir + os.sep + via_name, "r") as rf:
        data_dict = json.loads(rf.read())

    img_time_list = [[k, v] for k, v in img_time_dict.items()]
    img_time_list = sorted(img_time_list, key=lambda x: x[0])

    for idx, (timeline, files) in enumerate(img_time_list):
        file_list = [[f, int(osp.basename(f).split(".")[0][:-1])] for f in files]
        file_list = sorted(file_list, key=lambda x: x[1])

        for i, (f, stem) in enumerate(file_list):
            ori_name = osp.basename(f)
            ori_size = str(osp.getsize(f))
            new_name = "{}_{}.jpg".format(idx, i)
            save_path = osp.join(save_dir, new_name)
            shutil.move(f, save_path)

            if data_dict.get(ori_name + ori_size, None):
                item_data = copy.copy(data_dict[ori_name + ori_size])
                item_data["filename"] = new_name
                data_dict[new_name + ori_size] = item_data

    with open(save_dir + os.sep + via_name, "w") as wf:
        wf.write(json.dumps(data_dict))


# 根据poly裁剪
def crop_by_poly(data_dir, xs, ys):
    save_dir = data_dir + "_crop"
    os.makedirs(save_dir, exist_ok=True)

    files = filesystem.get_all_filepath(data_dir, [".jpg"])
    minx, maxx = min(xs), max(xs)
    miny, maxy = min(ys), max(ys)
    poly = [[x - minx, y - miny] for x, y in zip(xs, ys)]
    for f in files:
        img = cv2.imread(f)
        img = img[miny:maxy, minx:maxx]
        mask = np.zeros(img.shape[:2], dtype=np.uint8)
        cv2.fillConvexPoly(mask, np.array(poly), [255])
        bg = np.zeros(img.shape, dtype=np.uint8)
        cv2.copyTo(img, mask, bg)
        cv2.imwrite(osp.join(save_dir, osp.basename(f)), bg)


def post_rec(img_b64, port=9999, uri="/api/rec"):
    data = {
        "img_b64": img_b64,
        # 1 VihicleDetection det
        # 3 SlopeDetection   seg
        # 1 FireWarning      det
        # 1 TextRec          text_rec
        # 1 PedestrianDetection det
        # 1 WeatherDetection cls
        "mdl_idx": 1,
        "algo_type": "VihicleDetection",
        "mdl_type": "det"
    }
    url = "http://192.168.200.125:{}{}".format(port, uri)
    # url = "http://192.168.10.167:9998/api/rec"
    headers = {'Content-Type': 'application/json;charset=UTF-8'}
    string = json.dumps(data, cls=MyEncoder)
    r = requests.post(url, data=string, headers=headers)
    print("status_code: ", r.status_code)
    rec_data = r.json()
    if rec_data["code"] != 0:
        return None
    return rec_data


def http_rec_to_via(file_path, via_name="via_region_data.json"):
    one_data = {}
    if not osp.exists(file_path): return None

    file_size = osp.getsize(file_path)
    filename = osp.basename(file_path)
    one_data["filename"] = filename
    one_data["size"] = file_size
    one_data["file_attributes"] = {}

    buffer = io.BytesIO()
    img = cv2.imread(file_path)
    rgb_img = img[:, :, ::-1]
    pil_image = Image.fromarray(rgb_img)
    pil_image.save(buffer, format="jpeg")
    encoded = base64.b64encode(buffer.getvalue())
    rec_data = post_rec(encoded)

    regions = []
    if rec_data is None or rec_data["code"] != 0:
        pass
    else:
        exist_dict = {}
        for item in rec_data["items"]:
            x, y, w, h, label = item["x"], item["y"], item["w"], item["h"], item["category"]
            if exist_dict.get("{}x{}".format(x, y), None): continue
            exist_dict["{}x{}".format(x, y)] = 1

            region = {
                "shape_attributes": {
                    "name": "rect",
                    "x": x,
                    "y": y,
                    "width": w,
                    "height": h
                },
                "region_attributes": {
                    "label": label
                }
            }
            regions.append(region)

    one_data["regions"] = regions
    return one_data


def distribute(sub_dir, data_dict, start_cnt=0, per_img_in_dir=300):
    dir_cnt = data_dict.__len__() // per_img_in_dir
    dir_cnt = dir_cnt if data_dict.__len__() % per_img_in_dir == 0 else dir_cnt + 1

    save_dirs = [osp.join(sub_dir, str(i + start_cnt)) for i in range(dir_cnt)]
    save_dicts = [{} for i in range(dir_cnt)]
    for save_dir in save_dirs: os.makedirs(save_dir, exist_ok=True)

    for idx, (f, v) in enumerate(data_dict.items()):
        choose = idx % dir_cnt
        shutil.move(f, osp.join(save_dirs[choose], osp.basename(f)))
        save_dicts[choose][osp.basename(f) + str(v["size"])] = v

    for idx, save_dir in enumerate(save_dirs):
        with open(osp.join(save_dir, "via_region_data.rec.json"), "w") as rf:
            rf.write(json.dumps(save_dicts[idx]))


def merge_img_and_distribute3(data_dir, per_img_in_dir=300, via_name="via_region_data.json"):
    """将一个文件夹下面的所有图片合并重新分配"""
    sub_dirs = filesystem.get_last_dir(data_dir, 3)

    for sub_dir in sub_dirs:

        via_files = filesystem.get_all_filepath(sub_dir, [via_name, "via_region_data.rec.json"])
        # if len(via_files) != 0:
        #     print("sub_dir: ", sub_dir)
        #     print("via_files: ", len(via_files))

        continue_path = [osp.dirname(v) for v in via_files]
        img_files = filesystem.get_all_filepath(sub_dir, [".jpg"])
        for i in range(len(img_files) - 1, -1, -1):
            for cp in continue_path:
                if cp in img_files[i]:
                    # print("del: ", img_files[i])
                    del img_files[i]
                    break
        if len(img_files) == 0:
            print("contniue: ", sub_dir)
            continue

        rec_dict = {}
        rec_dict_two = {}
        for f in tqdm(img_files):
            one_data = http_rec_to_via(f)
            if one_data is None: continue
            if len(one_data["regions"]) >= 2:
                rec_dict_two[f] = one_data
            else:
                rec_dict[f] = one_data

        distribute(sub_dir, rec_dict_two, 0, per_img_in_dir)
        distribute(sub_dir, rec_dict, 1000, per_img_in_dir)


def merge_img_and_distribute4(data_dir, per_img_in_dir=300, via_name="via_region_data.json"):
    """将一个文件夹下面的所有图片合并重新分配"""
    sub_dirs = filesystem.get_last_dir(data_dir, 2)

    for sub_dir in sub_dirs:
        # fs = filesystem.get_all_filepath(sub_dir)
        # if len(fs) == 0:
        #     shutil.rmtree(sub_dir)
        #     continue
        print("sub_dir: ", sub_dir)

        via_files = filesystem.get_all_filepath(sub_dir, [via_name, "via_region_data.rec.json"])
        # if len(via_files) != 0:
        #     print("sub_dir: ", sub_dir)
        #     print("via_files: ", len(via_files))

        continue_path = [osp.dirname(v) for v in via_files]
        img_files = filesystem.get_all_filepath(sub_dir, [".jpg"])
        for i in range(len(img_files) - 1, -1, -1):
            for cp in continue_path:
                if cp in img_files[i]:
                    # print("del: ", img_files[i])
                    del img_files[i]
                    break
        if len(img_files) == 0:
            print("contniue: ", sub_dir)
            continue

        rec_dict = {}
        rec_dict_big_truck = {}
        for f in tqdm(img_files):
            one_data = http_rec_to_via(f)
            if one_data is None: continue
            big_truck = False
            for region in one_data["regions"]:
                label = region["region_attributes"]["label"]
                if label in ["bus", "big_truck"]:
                    big_truck = True
                    break
            if big_truck:
                rec_dict_big_truck[f] = one_data
            else:
                rec_dict[f] = one_data

        distribute(sub_dir, rec_dict_big_truck, 0, per_img_in_dir)
        distribute(sub_dir, rec_dict, 1000, per_img_in_dir)


def merge_img_and_distribute5(data_dir, per_img_in_dir=300, via_name="via_region_data.json"):
    """10月份数据  已经识别完了 重新分配含有大货车的"""
    sub_dirs = filesystem.get_last_dir(data_dir, 2)

    for sub_dir in sub_dirs:
        print("sub_dir: ", sub_dir)

        via_files = filesystem.get_all_filepath(sub_dir, ["via_region_data_rec.json"])
        if len(via_files) != 0:
            print("sub_dir: ", sub_dir)
            print("via_files: ", len(via_files))

        rec_dict = {}
        rec_dict_big_truck = {}
        for rec_via in via_files:
            rec_dir = osp.dirname(rec_via)
            with open(rec_via) as rf:
                rec_data = json.loads(rf.read())

            for k, one_data in rec_data.items():
                f = osp.join(rec_dir, one_data["filename"])
                if not osp.exists(f): continue
                big_truck = False
                for region in one_data["regions"]:
                    label = region["region_attributes"]["label"]
                    if label in ["bus", "big_truck"]:
                        big_truck = True
                        break

                if big_truck:
                    rec_dict_big_truck[f] = one_data
                else:
                    rec_dict[f] = one_data

        distribute(sub_dir, rec_dict_big_truck, 1000, per_img_in_dir)
        distribute(sub_dir, rec_dict, 2000, per_img_in_dir)
        for v in via_files: os.remove(v)


def remove_img_by_rec(data_dir):
    """10月份数据  已经识别完了 重新分配含有大货车的"""
    img_files = filesystem.get_all_filepath(data_dir, [".jpg"])
    for f in tqdm(img_files):
        print(f)
        one_data = http_rec_to_via(f)
        if one_data is not None and len(one_data["regions"]) > 0:
            continue
        os.remove(f)


def distribute_for_cls(data_dir, save_root, per_img_in_dir=300, via_name="via_region_data.json"):
    """将标记数据分割出来进行分类训练  以月份进行单次运行"""
    sub_dirs = filesystem.get_last_dir(data_dir, 1)
    for sub_dir in sub_dirs:
        print("sub_dir: ", sub_dir)
        # continue

        via_files = filesystem.get_all_filepath(sub_dir, [via_name])
        if len(via_files) == 0:
            print("continue: ", sub_dir)
            continue
        print("sub_dir: ", sub_dir)

        save_dir = osp.join(save_root, sub_dir.replace(data_dir + os.sep, ""))
        if not osp.exists(save_dir): os.makedirs(save_dir, exist_ok=True)
        print("save_dir: ", save_dir)

        for rec_via in via_files:
            # print(rec_via)  # 注释
            # if rec_via != r'f:\work\code\paddle\train_data\det\car\images\2022\2\16\122\night\via_region_data.json':  # 注释
            #     continue  # 注释
            print("rec_via: ", rec_via)
            rec_dir = osp.dirname(rec_via)
            with open(rec_via) as rf:
                rec_data = json.loads(rf.read())

            for k, one_data in rec_data.items():
                basename = one_data["filename"]
                f = osp.join(rec_dir, basename)
                if not osp.exists(f): continue
                print(f)
                img = cv2.imread(f)
                # print(img.shape)  # 注释
                for region in one_data["regions"]:
                    label = region["region_attributes"]["label"]
                    x = region["shape_attributes"]["x"]
                    y = region["shape_attributes"]["y"]
                    w = region["shape_attributes"]["width"]
                    h = region["shape_attributes"]["height"]
                    # if w < 32 and h < 32: label = "other"
                    if x < 0:
                        x = 0
                    sdir = osp.join(save_dir, label)
                    if not osp.exists(sdir): os.mkdir(sdir)
                    save_name = osp.dirname(f).replace(data_dir + os.sep, "").replace(os.sep,
                                                                                      ".") + "#" + basename.replace(
                        ".jpg", "#{}.{}.{}.{}.jpg".format(x, y, w, h))
                    save_path = osp.join(sdir, save_name)
                    # print("save_path: ", save_path)
                    # print("x={} y={} w={} h={}".format(x, y, w, h))  # 打印x, y, w, h数值
                    cv2.imwrite(save_path, img[y:y + h, x:x + w])


def restore_label_to_via(data_dir, save_root, per_img_in_dir=300, via_name="via_region_data.json"):
    """将手动分类数据还原"""
    via_files = filesystem.get_all_filepath(save_root, [via_name])
    via_dicts = {}
    for vf in via_files:
        with open(vf) as rf:
            rec_data = json.loads(rf.read())
        via_dicts[osp.dirname(vf).replace(save_root + os.sep, "")] = rec_data

    cls_dirs = filesystem.get_last_dir(data_dir, 1)
    for cls_dir in cls_dirs:
        print("sub_dir: ", cls_dirs)
        # continue

        img_files = filesystem.get_all_filepath(cls_dir, ['.jpg'])
        for img_file in img_files:
            # 8.103.2#21918_192.168.103.64_01_20210608184633952_2.mp4#476.140.140.92.jpg
            strs = osp.basename(img_file).split("#")
            sub_dir = strs[0].replace(".", os.sep)
            # if len(sub_dir.split(os.sep)) == 3:
            #     sss = strs[0].split(".")
            #     sub_dir = sss[0] + os.sep + sss[1] + "." + sss[2]
            filename = strs[1] + ".jpg"
            rect = strs[2].replace(".jpg", "").split(".")
            rect = [int(x) for x in rect]
            via_data = via_dicts[sub_dir]
            for key, value in via_data.items():
                if value["filename"] != filename: continue
                for region in value["regions"]:
                    x = region["shape_attributes"]["x"]
                    y = region["shape_attributes"]["y"]
                    w = region["shape_attributes"]["width"]
                    h = region["shape_attributes"]["height"]
                    if rect != [x, y, w, h]: continue
                    # via_dicts[sub_dir][key]
                    region["region_attributes"]["label"] = osp.basename(osp.dirname(img_file))

    for key, via_dict in via_dicts.items():
        with open(osp.join(save_root, key, "via_region_data.merge.json"), "w") as rf:
            rf.write(json.dumps(via_dict))


def rec_to_via2(data_dir, local_save=False, via_name="via_region_data.json"):
    sub_dirs = filesystem.get_last_dir(data_dir)

    # json文件检查
    for sub_dir in sub_dirs:
        if osp.exists(osp.join(sub_dir, via_name)):
            print("continue ", sub_dir)
            continue
        if not local_save:
            save_dir = sub_dir + "_two"
            os.makedirs(save_dir)
        else:
            save_dir = sub_dir

        # 文件格式转换
        data_dict = {}
        files = filesystem.get_all_filepath(sub_dir, [".jpg"])
        for file_path in files:
            one_data = {}
            if not osp.exists(file_path): continue

            file_size = osp.getsize(file_path)
            filename = osp.basename(file_path)
            one_data["filename"] = filename
            one_data["size"] = file_size
            one_data["file_attributes"] = {}

            buffer = io.BytesIO()
            img = cv2.imread(file_path)
            rgb_img = img[:, :, ::-1]
            pil_image = Image.fromarray(rgb_img)
            pil_image.save(buffer, format="jpeg")
            encoded = base64.b64encode(buffer.getvalue())
            rec_data = post_rec(encoded)

            if rec_data is None or rec_data["code"] != 0: continue

            wh_threshold = 100
            big_flag = False
            regions = []
            exist_dict = {}
            for item in rec_data["items"]:
                x, y, w, h, label = item["x"], item["y"], item["w"], item["h"], item["category"]
                if exist_dict.get("{}x{}".format(x, y), None): continue
                exist_dict["{}x{}".format(x, y)] = 1

                if w > wh_threshold or h > wh_threshold: big_flag = True

                region = {
                    "shape_attributes": {
                        "name": "rect",
                        "x": x,
                        "y": y,
                        "width": w,
                        "height": h
                    },
                    "region_attributes": {
                        "label": label
                    }
                }
                regions.append(region)

            # if (not big_flag) and len(rec_data["items"]) < 2: continue
            if not local_save:
                shutil.copy(file_path, osp.join(save_dir, filename))

            one_data["regions"] = regions
            data_dict[filename + str(file_size)] = one_data
            # print(one_data)

        with open(osp.join(save_dir, "via_region_data.rec.tmp.json"), "w") as rf:
            rf.write(json.dumps(data_dict))


def __cls_weather_to_dir_imp(save_dir, data_dir, sub_dir, idx):
    files = filesystem.get_all_filepath(sub_dir, [".jpg"])
    for file_path in files:
        if not osp.exists(file_path): continue

        buffer = io.BytesIO()
        img = cv2.imread(file_path)
        rgb_img = img[:, :, ::-1]
        pil_image = Image.fromarray(rgb_img)
        pil_image.save(buffer, format="jpeg")
        encoded = base64.b64encode(buffer.getvalue())
        rec_data = post_rec(encoded, 9990 + idx % 6)
        if rec_data is None or rec_data["code"] != 0: continue
        cls_dict = {0: "fog", 1: "rain", 2: "snow", 3: "norm"}
        name = cls_dict[rec_data["class_idx"]]
        save_path = osp.join(save_dir, name, file_path.replace(data_dir + os.sep, ""))
        # print(save_path)
        if not osp.exists(osp.dirname(save_path)):
            os.makedirs(osp.dirname(save_path), exist_ok=True)
        shutil.copy(file_path, save_path)


def cls_weather_to_dir(save_dir, data_dir):
    sub_dirs = filesystem.get_last_dir(data_dir)

    thread_count = 6
    thread_count = min(thread_count, len(sub_dirs))
    p = Pool(thread_count)
    for idx, one_dir in enumerate(sub_dirs):
        print(one_dir)
        p.apply_async(__cls_weather_to_dir_imp, args=(save_dir, data_dir, one_dir, idx))
        # __cls_weather_to_dir_imp(save_dir, data_dir, one_dir, idx)

    p.close()
    p.join()


if __name__ == "__main__":
    # img_path = "/home/xc/work/data/car/data/image/04-09/Screenshot_20210409_170609_uni.UNIF0607A1.jpg"
    # find_dir = "/home/xc/work/code/paddle/train_data/det/car/images/real"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/test"
    # # find_image_by_size(img_path, find_dir, save_dir)
    # find_image_by_mean(img_path, find_dir, save_dir)

    # data_dir = "/home/xc/work/code/paddle/train_data/det/car/images/real"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/test"
    # find_similiar_img(data_dir, save_dir)

    # 取truck pickup类进行单独标记
    # data_dir = "/home/xc/work/code/paddle/train_data/det/car/images/real"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/copy"
    # extract_truck_pickup(data_dir, save_dir)

    # data_dir = sys.argv[1]
    # sort_img_by_time(data_dir)

    # data_dir = sys.argv[1]
    # # small
    # xs = [138,150,163,174,187,208,252,296,316,308,286,263,242,219,163,156,138]
    # ys = [194,214,231,249,266,267,260,249,246,230,210,193,183,172,183,186,194]
    # big
    # xs = [403,425,463,503,556,581,677,795,890,943,905,819,742,667,581,475]
    # ys = [581,617,658,716,790,805,797,775,759,737,672,608,552,516,519,556]

    # xs = [441,468,508,561,606,710,884,967,988,955,885,819,746,692,629,441]
    # ys = [636,693,742,816,859,845,810,780,748,703,649,592,569,546,548,636]

    # xs = [583,593,626,663,707,746,841,991,1093,1089,1026,962,916,855,795,583]
    # ys = [578,622,663,709,757,809,793,775,746,697,647,592,558,529,504,578]
    # crop_by_poly(data_dir, xs, ys)

    # data_dir = "/home/xc/work/code/paddle/train_data/det/car/images/2021-06-11/1"
    # save_dir = "/home/xc/work/code/paddle/train_data/det/car/images/2021-06-11/1_t"
    # sort_img_by_date(data_dir, save_dir)

    # data_dir = sys.argv[1]
    # merge_img_and_distribute3(data_dir)

    # data_dir = sys.argv[1]
    # merge_img_and_distribute4(data_dir)

    # data_dir = sys.argv[1]
    # merge_img_and_distribute5(data_dir)

    # data_dir = sys.argv[1]
    # remove_img_by_rec(data_dir)

    # data_dir = sys.argv[1]
    # data_dir1 = r"F:\work\code\paddle\train_data\det\car\images\2022\2\15\121\0"
    # rec_to_via2(data_dir1, True)

    # save_dir=r"F:\work\dataset\pic\changshun\car\weather"
    # data_dir = sys.argv[1]
    # cls_weather_to_dir(save_dir, data_dir)

    # 1. 裁图 手动分类
    # data_dir = r"F:\work\code\paddle\train_data\det\car\images\2022\3"
    # save_root = r"f:\work\code\paddle\train_data\cls\car\images\2022\3"
    # distribute_for_cls(data_dir, save_root)


    # 2. 还原标记类别
    data_dir = r"f:\work\code\paddle\train_data\cls\car\images\2022\3"
    save_root = r"f:\work\code\paddle\train_data\det\car\images\2022\3"
    restore_label_to_via(data_dir, save_root)