from genericpath import exists, getsize
import os, sys
import os.path as osp
import shutil
import time
import numpy as np
import pickle
import struct
import re
import cv2
from tqdm import tqdm
from multiprocessing import Pool
import imageio
import json
import xml.etree.ElementTree as ET
from collections import defaultdict
import random
import copy
import requests

sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from tool import filesystem, opencv_tool # export PYTHONPATH=$PYTHONPATH:`pwd`


# 将未标记的图片移至文件夹内
def move_not_via_image(data_dir, dst_dir):
    with open(data_dir + os.sep + "via_region_data.json","r") as rf:
        data_dict = json.loads(rf.read())

    all_files = [ p for p in os.listdir(data_dir) if p.endswith("jpg")]
    for idx,filename in enumerate(data_dict.keys()):
        
        file_name = data_dict[filename]["filename"]
        if file_name in all_files:
            del all_files[all_files.index(file_name)]
        
    os.makedirs(dst_dir, exist_ok=True)
    for p in all_files:
        shutil.move(data_dir + os.sep + p , dst_dir + os.sep + p)

# 清除 via.json 里面的不存在的图片
def remove_none_image(data_dir, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        print(via_file)
        with open(via_file,"r") as rf:
            data_dict = json.loads(rf.read())
        data_new =  {}
        for idx,filename in enumerate(data_dict.keys()):
            
            if not os.path.exists(data_dir + os.sep + data_dict[filename]["filename"]):
                continue
            # if len(data_dict[filename]["regions"]) == 0:
            #     continue
            
            data_new[filename] = data_dict[filename]
        with open(via_file,"w") as wf:
            wf.write(json.dumps(data_new))


# 将未标记的图片移出
def remove_not_label_img_dir(data_dir, save_dir="", via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())

        ##创建保存目录  
        save_root = ""
        if save_dir != "":
            save_root = osp.join(save_dir, osp.dirname(via_file).replace(data_dir+os.sep, ""))
            os.makedirs(save_root, exist_ok=True)

        move_list = []
        all_images = filesystem.get_all_filepath(cur_dir, [".jpg"])
        for path in all_images:
            name = os.path.basename(path)
            exists = False
            for k in train_labels.keys():
                via_name = train_labels[k]["filename"]
                if name == via_name: # 图片和标记信息都存在
                    if len(train_labels[k]["regions"]) != 0: # 标记信息为空
                        exists = True
                    break

            if not exists:
                print(path)
                move_list.append(path)
                if save_root != "":
                    shutil.move(path, save_root +os.sep+name)
                else:
                    os.remove(path)


# 移除固定box
def remove_box_by_point(data_dir, point, via_name="via_region_data.json"):
    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())

    for k,v in data_dict.items():
        regions = []
        for region in v["regions"]:
            if not region["shape_attributes"].get("x", None):continue
            x0 = region["shape_attributes"]["x"]
            y0 = region["shape_attributes"]["y"]
            x1 = region["shape_attributes"]["width"] + x0
            y1 = region["shape_attributes"]["height"] + y0
            if opencv_tool.point_in_rect(point, [x0, y0, x1-x0, y1-y0]):
                continue
            regions.append(region)
        data_dict[k]["regions"] = regions

    with open(data_dir + os.sep + via_name,"w") as wf:
        wf.write(json.dumps(data_dict))

# 移除固定标签
def remove_box_by_label(data_dir, labels, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for f in via_files:
        with open(f,"r") as rf:
            data_dict = json.loads(rf.read())

        for k,v in data_dict.items():
            regions = []
            for region in v["regions"]:
                if region["region_attributes"]["label"] in labels:continue
                regions.append(region)
            data_dict[k]["regions"] = regions

        with open(f,"w") as wf:
            wf.write(json.dumps(data_dict))

# 移除固定标签
def remove_via_file(data_dir, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for f in via_files:
        os.remove(f)

def via_update_file_size(data_dir, via_name="via_region_data.json"):
    with open(data_dir + os.sep + via_name, "r") as rf:
        data_dict = json.loads(rf.read())

    new_dict = {}
    for idx, (k,v) in enumerate(data_dict.items()):
        filename = v["filename"]
        ori_path = osp.join(data_dir, filename)
        if not osp.exists(ori_path): continue
        
        file_size = osp.getsize(ori_path)
        v["size"] = file_size
        new_dict[filename+str(file_size)] = v

    with open(data_dir + os.sep + via_name,"w") as wf:
        wf.write(json.dumps(new_dict))


# rename
def rename_file_and_update_json(data_dir, via_name="via_region_data.json"):
    with open(data_dir + os.sep + via_name, "r") as rf:
        data_dict = json.loads(rf.read())

    new_dict = {}
    for idx, (k,v) in enumerate(data_dict.items()):
        ori_path = osp.join(data_dir, v["filename"])
        new_name = "{}.jpg".format(idx)
        file_size = osp.getsize(ori_path)
        v["filename"] = new_name
        new_dict[new_name+str(file_size)] = v
        # data_dict.pop(k)
        shutil.move(ori_path, osp.join(data_dir, new_name))

    with open(data_dir + os.sep + via_name,"w") as wf:
        wf.write(json.dumps(new_dict))

# rename
def rename_file_and_update_json2(data_dir, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())


        new_dict = {}
        for idx, (k,v) in enumerate(train_labels.items()):
            filename = v["filename"]
            ori_path = osp.join(cur_dir, filename)
            if not osp.exists(ori_path):continue

            new_path = osp.join(cur_dir, "{}_{}.jpg".format(osp.basename(cur_dir), idx))
            file_size = osp.getsize(ori_path)
            v["filename"] = osp.basename(new_path)
            new_dict[osp.basename(new_path)+str(file_size)] = v
            # data_dict.pop(k)
            shutil.move(ori_path, new_path)

        with open(cur_dir + os.sep + via_name,"w") as wf:
            wf.write(json.dumps(new_dict))
# 移除重复的标记框
def remove_duplicate_box(data_dir, via_name="via_region_data.json"):
    with open(data_dir + os.sep + via_name, "r") as rf:
        data_dict = json.loads(rf.read())

    for idx, (k,v) in enumerate(data_dict.items()):
        exists_box = []
        regions = []
        for region in v["regions"]:
            x0 = region["shape_attributes"]["x"]
            y0 = region["shape_attributes"]["y"]
            l = region["region_attributes"]["label"]
            name_id = "{}_{}_{}".format(x0, y0, l)
            if name_id in exists_box: 
                print("duplicate_box... ", k)
                continue

            exists_box.append(name_id)
            regions.append(region)

        data_dict[k]["regions"] = regions

    with open(data_dir + os.sep + via_name,"w") as wf:
        wf.write(json.dumps(data_dict))


# 将文件夹下的所有背景图片生成空白标签，用作抗干扰训练
def generate_black_label_for_train(data_dir, via_name="via_region_data.json"):
    "仅支持单层文件夹, 不递归"
    total_imgs = filesystem.get_all_filepath(data_dir, [".jpg"], recursive=False)

    via_dict = dict()

    for img_path in total_imgs:
        img = cv2.imread(img_path)
        if img is None:
            print("None... ", img_path)
            continue
        # print(img.shape)

        name = osp.basename(img_path)
        current_dict = dict()
        current_dict["filename"] = name

        regions = []
        shape_attributes = dict()
        shape_attributes["name"] = 'rect'
        shape_attributes["x"] = 0
        shape_attributes["y"] = 0
        shape_attributes["width"] = 100
        shape_attributes["height"] = 100
        regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":"retain"}})
        
        current_dict["regions"] = regions
        current_dict["file_attributes"] = {}
        via_dict[name] = current_dict

    with open(data_dir + os.sep + via_name, "w") as rf:
        rf.write(json.dumps(via_dict))

# 将标记的矩形框按类别分文件夹存放
def crop_img_for_cls_by_via(data_dir, save_dir, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    label_count = defaultdict(int)

    for f in via_files:
        with open(f,"r") as rf:
            data_dict = json.loads(rf.read())
        cur_dir = osp.dirname(f)
        prefix = cur_dir.replace(data_dir+os.sep, "").replace(os.sep, ".")
        
        for k, data in tqdm(data_dict.items()):
            filename = data["filename"]
            img_path = osp.join(cur_dir, filename)
            if not osp.exists(img_path):
                print("error. not exists.", img_path)
                continue
            img = cv2.imread(img_path)
            if img is None:
                print("error. img.", img_path)
                continue

            for region in data["regions"]:
                label = region["region_attributes"]["label"]
                x0 = region["shape_attributes"]["x"]
                y0 = region["shape_attributes"]["y"]
                x1 = x0 + region["shape_attributes"]["width"]
                y1 = y0 + region["shape_attributes"]["height"]
                stem = "{}.{}.{}.{}".format(x0,y0,x1,y1)
                if x0 < 0: x0 = 0
                if y0 < 0: y0 = 0
                if x1 > img.shape[1]: x1 = img.shape[1]
                if y1 > img.shape[0]: y1 = img.shape[0]

                crop_img = img[y0:y1, x0:x1]
                if not isinstance(crop_img, np.ndarray) or crop_img.shape[0] == 0:
                    print("error. crop_img.", img_path)

                label_count[label] += 1
                cur_save_dir = osp.join(save_dir, label)
                os.makedirs(cur_save_dir, exist_ok=True)
                save_path = osp.join(cur_save_dir, "{}#{}#{}.jpg".format(prefix, stem, filename))
                cv2.imwrite(save_path, crop_img)


def make_new_via_from_crop_img(img_dir, data_dir, save_via_name, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])

    crop_img_files = filesystem.get_all_filepath(img_dir, [".jpg"])
    total_img_boxs = defaultdict(list)
    for crop_img_file in crop_img_files:
        base_name = osp.basename(crop_img_file)
        strs = base_name.split("#")
        prefix = strs[0].replace(".", osp.sep)
        label_str = osp.basename(osp.dirname(crop_img_file))
        # if label_str != "ringelman": continue
        ori_img_path = osp.join(data_dir, prefix, ".".join(strs[2].split(".")[:-1]))
        total_img_boxs[ori_img_path].append([label_str] + strs[1].split("."))
    # print(total_img_boxs)

    
    for f in via_files:
        cur_dir = osp.dirname(f)
        cur_imgs = filesystem.get_all_filepath(cur_dir, [".jpg"])


        data_dict = {}
        for file_path in cur_imgs:
            item = {}
            regions = []
            boxs = total_img_boxs.get(file_path, None)
            if boxs is not None:
                for box in boxs:
                    x1, y1= int(box[1]), int(box[2])
                    x2, y2= int(box[3]), int(box[4])
                    region = {
                        "region_attributes": {"label": box[0]},
                        "shape_attributes": {"name":"rect",
                                            "x": x1,
                                            "y": y1,
                                            "width": x2-x1,
                                            "height": y2-y1}
                    }
                    regions.append(region)
            item["regions"] = regions
            item["filename"] = osp.basename(file_path)

            size = osp.getsize(file_path)
            item["size"] = size
            item["file_attributes"] = {}
            data_dict[osp.basename(file_path)+str(size)] = item

        with open(osp.join(cur_dir, save_via_name), 'w') as f:
            output_json = json.dumps(data_dict)
            f.write(output_json)

def copy_via_dir(data_dir, save_dir, via_name, save_via_name="via_region_data.boat.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    for f in via_files:
        with open(f,"r") as rf:
            data_dict = json.loads(rf.read())
        cur_dir = osp.dirname(f)
        dst_dir = osp.join(save_dir, cur_dir.replace(data_dir+os.sep, ""))
        if not osp.exists(dst_dir):
            os.makedirs(dst_dir, exist_ok=True)
            
        save_dict = {}
        for key, value in data_dict.items():
            if len(value["regions"]) == 0:
                continue
            save_dict[key] = value
            file_path = osp.join(cur_dir, value["filename"])

            shutil.copy(file_path, osp.join(dst_dir, value["filename"]))

        with open(osp.join(dst_dir, save_via_name), 'w') as f:
            output_json = json.dumps(save_dict)
            f.write(output_json)

def convert_to_rect_json_and_save_img(save_dir, image_aug, polygon_aug, via_name="via_region_data.json"):
    """
    添加对标签的保存
    """
    via_dict = dict()
    for idx, (image, polygons) in enumerate(zip(image_aug, polygon_aug)):
        name = "imgaug_" + str(idx) + ".jpg"
        image_path = save_dir + os.sep + name
        imageio.imwrite(image_path, image)

        boxs = []
        for plg in polygons:
            box = [
                int(np.min([plg.x1_int, plg.x2_int])),
                int(np.min([plg.y1_int, plg.y2_int])),
                int(abs(plg.x1_int- plg.x2_int)),
                int(abs(plg.y1_int- plg.y2_int)),
                plg.label
                ]
            boxs.append(box)
        name, item_dict = gen_via_item_dict(image_path, "rect", boxs)
        via_dict[name] = item_dict

    with open(save_dir + os.sep + via_name, "w") as rf:
        rf.write(json.dumps(via_dict))

def gen_via_item_dict(filepath, name_type, boxs):
    """
    专门生成ia标注格式的接口函数
    boxs:
    [
        x, 
        y, 
        w, 
        h,
        [["label", "car"], ["occluded", "no"]]
    ]
    """
    item_dict = dict()
    filename = osp.basename(filepath)
    filesize = osp.getsize(filepath)
    item_dict["filename"] = filename
    item_dict["size"] = filesize
    item_dict["file_attributes"] = {}

    regions = []
    for box in boxs:
        shape_attributes = dict()
        shape_attributes["name"] = name_type
        shape_attributes["x"] = box[0]
        shape_attributes["y"] = box[1]
        shape_attributes["width"] = box[2]
        shape_attributes["height"] = box[3]
        region = dict()
        region["shape_attributes"] = shape_attributes
        region_attributes = {}
        for l in box[4]:
            region_attributes[l[0]] = l[1]
        region["region_attributes"] = region_attributes
        regions.append(region)
        
    item_dict["regions"] = regions
    return filename+str(filesize), item_dict

def convert_to_poloygon_json_and_save_img(save_dir, image_aug, polygon_aug):

    via_dict = dict()
    for idx, (image, polygons) in enumerate(zip(image_aug, polygon_aug)):
        name = "imgaug_" + str(idx) + ".jpg"
        image_path = save_dir + os.sep + name
        imageio.imwrite(image_path, image)
        current_dict = dict()
        current_dict["filename"] = name
        regions = []

        for plg in polygons:
            shape_attributes = dict()
            shape_attributes["name"] = 'polygon'
            shape_attributes["all_points_x"] = [int(x) for x in plg.xx]
            shape_attributes["all_points_y"] = [int(y) for y in plg.yy]
            regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":"text"}})
        current_dict["regions"] = regions
        current_dict["file_attributes"] = {}
        via_dict[name] = current_dict

    with open(save_dir + os.sep + "via_region_data_ori.json", "w") as rf:
        rf.write(json.dumps(via_dict))

def convert_to_poloygon_json_and_save_img_2(save_dir, images, polygons):
    
    via_dict = dict()
    for idx, (image, polygon) in enumerate(zip(images, polygons)):
        if image is None:continue
        name = str(idx) + ".jpg"
        image_path = save_dir + os.sep + name
        imageio.imwrite(image_path, image)
        size = osp.getsize(image_path)

        current_dict = dict()
        current_dict["filename"] = name
        current_dict["size"] = size
        
        regions = []

        for plg in polygon:
            if len(plg[0]) == 0:continue

            shape_attributes = dict()
            shape_attributes["name"] = 'polygon'
            shape_attributes["all_points_x"] = [int(x) for x,y in plg[0]]
            shape_attributes["all_points_y"] = [int(y) for x,y in plg[0]]
            regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":plg[1]}})
        current_dict["regions"] = regions
        current_dict["file_attributes"] = {}
        via_dict[name + str(size)] = current_dict

    with open(save_dir + os.sep + "via_region_data_ori.json", "w") as rf:
        rf.write(json.dumps(via_dict))


def convert_to_via(data_ori_path, size_path):
    with open(data_ori_path,"r") as rf:
        data_ori_dict = json.loads(rf.read())
    ## 做一个映射
    data_ori_dict_map = dict()
    for key in data_ori_dict.keys():data_ori_dict_map[data_ori_dict[key]["filename"]] = key

    with open(size_path,"r") as rf:
        via_size_dict = json.loads(rf.read())

    save_dict = dict()
    for key in via_size_dict.keys():


        filename = via_size_dict[key]["filename"]
        size = via_size_dict[key]["size"]

        if data_ori_dict_map.get(filename, None) is None:continue
        data_ori_dict[data_ori_dict_map[filename]]["size"] = size
        save_dict[filename + str(size)] = data_ori_dict[data_ori_dict_map[filename]]

    with open(os.path.dirname(data_ori_path) + os.sep + "via_region_data.json","w") as wf:
        wf.write(json.dumps(save_dict))

def read_via_rect_to_point_imp(regions, default_label=None):
    xys = []
    for one_label in regions:
        if not one_label["shape_attributes"].get("x", None):continue

        x0 = one_label["shape_attributes"]["x"]
        y0 = one_label["shape_attributes"]["y"]
        x1 = one_label["shape_attributes"]["width"] + x0
        y1 = one_label["shape_attributes"]["height"] + y0
        label = one_label["region_attributes"].get("label", default_label)
        xy = [(x0,y0,x1,y1), label]
        xys.append(xy)
    return xys

def read_via_poloygon_to_list_imp(regions):
    xys = []
    for one_label in regions:
        xs = one_label["shape_attributes"]["all_points_x"]
        ys = one_label["shape_attributes"]["all_points_y"]
        label = one_label["region_attributes"].get("label", None)
        xy = []
        for x,y in zip(xs,ys):
            xy.append((x,y))
        xys.append((xy, label))
    return xys

def read_via_polygon_to_rect_imp(regions, default_label=False, return_rect_style=False):
    """
    将 polygon 转化为 rect
    @return_rect_style: True 为返回 rect 格式
    """
    xys = []
    for one_label in regions:
        xs = one_label["shape_attributes"]["all_points_x"]
        ys = one_label["shape_attributes"]["all_points_y"]
        label = default_label if default_label else one_label["region_attributes"].get("label", None)
        minx = np.min(xs)
        maxx = np.max(xs)
        miny = np.min(ys)
        maxy = np.max(ys)

        if return_rect_style:
            xy = [minx, miny, maxx - minx, maxy - miny]
        else:
            xy = [(minx, miny), (maxx, miny), (maxx, maxy), (minx, maxy)]
        xys.append((xy, label))
    return xys

def read_via_rect_to_point(data_dir, via_name="via_region_data.json", default_label=None):
    """
    添加标签的读取
    """
    if not os.path.exists(data_dir + os.sep + via_name):
        return None

    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for idx,filename in enumerate(data_dict.keys()):
        
        # if idx > 100:
        #     continue
        xys = read_via_rect_to_point_imp(data_dict[filename]["regions"], default_label)
        # 过滤 空 "regions"
        if len(xys) == 0:
            continue
        # 过滤空图像
        if not os.path.exists(data_dir + os.sep + data_dict[filename]["filename"]):
            continue
        data_new[data_dir + os.sep + data_dict[filename]["filename"]] = xys
    return data_new

def read_via_poloygon_to_list(data_dir, via_name="via_region_data.json"):
    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for filename in data_dict.keys():
        xys = read_via_poloygon_to_list_imp(data_dict[filename]["regions"])
        data_new[data_dir + os.sep + data_dict[filename]["filename"]] = xys
    #print(len(data_new.keys()))
    return data_new

def via_to_db(data_root, via_name='', gen_type="", ratio=0.8):
    """
    将via格式转换成db训练格式
    """

    via_files = filesystem.get_all_filepath(data_root + os.sep+"images", [via_name])

    save_dir = data_root
    total_data = []

    for f in via_files:
        path_dir = os.path.dirname(f)
        if not os.path.isdir(path_dir):
            continue
        print(f)
        
        with open(f) as rf:
            via_data = json.loads(rf.read())

        for key,value in via_data.items():
            file_name = value["filename"]

            sub_name = path_dir.replace(data_root+"/", "") + os.sep + file_name
            items = []
            for item in value["regions"]:
                xys= []
                for x,y in zip(item["shape_attributes"]["all_points_x"], item["shape_attributes"]["all_points_y"]):
                    xys.append([x,y])
                label = item["region_attributes"]["label"]
                # # 根据自己数据集添加相应规则
                if gen_type == "db_boeing" and label not in ["other"]:
                    items.append({"transcription":label, "points":xys})
                elif gen_type=="db_plate":
                    items.append({"transcription":label, "points":xys})
                else:
                    print("continue ", label)
                    continue
            total_data.append(sub_name + "\t" + json.dumps(items))

    np.random.shuffle(total_data)
    np.random.shuffle(total_data)
    length = int(len(total_data) * ratio)
    with open(save_dir + os.sep + "train_label.txt", "w", encoding="utf-8") as wf:
        for l in total_data[:length]:
            wf.write(l + "\n")
    with open(save_dir + os.sep + "test_label.txt", "w", encoding="utf-8") as wf:
        for l in total_data[length:]:
            wf.write(l + "\n")


def via_rect_to_polygon(data_dir, via_name="via_region_data.json", 
                                save_name="via_region_data_polygon.json"):
    """
    将via rect 格式转换为 via polygon 格式
    """
    if not os.path.exists(data_dir + os.sep + via_name):
        return None

    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for idx,size_name in enumerate(data_dict.keys()):
        current_dict = dict()
        current_dict["filename"] = data_dict[size_name]["filename"]
        current_dict["size"] = data_dict[size_name]["size"]
        current_dict["file_attributes"] = {}
        # 过滤空图像
        if not os.path.exists(data_dir + os.sep + current_dict["filename"]):
            continue

        xys = read_via_rect_to_point_imp(data_dict[size_name]["regions"], default_label=None)
        # 过滤 空 "regions"
        if len(xys) == 0:
            continue

        regions = []
        for plg, l in xys:
            shape_attributes = dict()
            shape_attributes["name"] = 'polygon'
            shape_attributes["all_points_x"] = [plg[0], plg[2], plg[2], plg[0]]
            shape_attributes["all_points_y"] = [plg[1], plg[1], plg[3], plg[3]]
            regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":l}})
        current_dict["regions"] = regions

        data_new[size_name] = current_dict

    with open(data_dir + os.sep + save_name, "w") as rf:
        rf.write(json.dumps(data_new))

def via_polygon_to_rect(data_dir, via_name="via_region_data_polygon.json", 
                                save_name="via_region_data.json"):
    """
    将via polygon 格式转换为 via rect 格式
    """
    if not os.path.exists(data_dir + os.sep + via_name):
        return None

    with open(data_dir + os.sep + via_name,"r") as rf:
        data_dict = json.loads(rf.read())
    data_new =  {}
    for idx,size_name in enumerate(data_dict.keys()):
        current_dict = dict()
        current_dict["filename"] = data_dict[size_name]["filename"]
        current_dict["size"] = data_dict[size_name]["size"]
        current_dict["file_attributes"] = {}
        # 过滤空图像
        if not os.path.exists(data_dir + os.sep + current_dict["filename"]):
            continue

        xys = read_via_polygon_to_rect_imp(data_dict[size_name]["regions"], default_label=False, return_rect_style=True)
        # 过滤 空 "regions"
        if len(xys) == 0:
            continue

        regions = []
        for box, l in xys:
            shape_attributes = dict()
            shape_attributes["name"] = 'rect'
            shape_attributes["x"] = box[0]
            shape_attributes["y"] = box[1]
            shape_attributes["width"] = box[2]
            shape_attributes["height"] = box[3]
            regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":l}})
        current_dict["regions"] = regions

        data_new[size_name] = current_dict

    with open(data_dir + os.sep + save_name, "w") as rf:
        rf.write(json.dumps(data_new))

def resize_via_image(data_dir, mix_side_len=1280, via_name="via_region_data_ori.json"):
    """
    将via image resize
    """
    last_dirs = filesystem.get_last_dir(data_dir)

    for last_dir in last_dirs:
        with open(last_dir + os.sep + via_name,"r") as rf:
            data_dict = json.loads(rf.read())
        data_new =  {}

        for idx, (k, v) in tqdm(enumerate(data_dict.items())):
            current_dict = dict()
            filename = v["filename"]
            current_dict["filename"] = filename
            current_dict["file_attributes"] = {}
            
            img_path = last_dir + os.sep + filename
            # 过滤空图像
            if not os.path.exists(img_path): continue
            img = cv2.imread(img_path)
            h,w = img.shape[:2]
            print(h, w)
            ratio = mix_side_len / max(w, h)
            new_w = int(w * ratio)
            new_h = int(h * ratio)
            resized_img = cv2.resize(img, (new_w,new_h), cv2.INTER_LINEAR)
            cv2.imwrite(img_path, resized_img)
            
            size = osp.getsize(img_path)
            current_dict["size"] = size

            xys = read_via_rect_to_point_imp(v["regions"])
            # 过滤 空 "regions"
            if len(xys) == 0: continue

            regions = []
            for box, l in xys:
                shape_attributes = dict()
                shape_attributes["name"] = 'rect'
                shape_attributes["x"] = int(box[0] * ratio)
                shape_attributes["y"] = int(box[1] * ratio)
                shape_attributes["width"] = int((box[2] - box[0]) * ratio)
                shape_attributes["height"] = int((box[3] - box[1]) * ratio)
                regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":l}})
            current_dict["regions"] = regions

            data_new[filename + str(size)] = current_dict

        with open(last_dir + os.sep + via_name, "w") as rf:
            rf.write(json.dumps(data_new))


def xml_to_via_json(data_dir, save_dir, voc_type=True, img_nums_per_dir=500):
    '''
    将xml文件转换成via.json
    '''

    total_dict = dict()
    xml_files = filesystem.get_all_filepath(data_dir, [".xml"])
    print("xml_files: ", len(xml_files))
    for xml_file in xml_files:
        
        if voc_type:
            name_prefix = xml_file.replace('.xml', '.jpg').replace(osp.sep+'Annotations'+osp.sep, osp.sep+"JPEGImages"+osp.sep)
        else:
            name_prefix = xml_file.replace('.xml', '.jpg')

        file_name = os.path.basename(name_prefix)
            
        one_dict = dict()
        tree = ET.parse(xml_file)
        root = tree.getroot()
        regions = []
        for member in root.findall('object'):
            label = None
            x, y, width, height = None, None, None, None
            for mem in member:
                if mem.tag == "name":
                    label = mem.text
                if mem.tag == "bndbox":
                    xmin, xmax, ymin, ymax = None, None, None, None
                    for m in mem:
                        if m.tag == "xmin":
                            xmin = int(m.text)
                        if m.tag == "xmax":
                            xmax = int(m.text)
                        if m.tag == "ymin":
                            ymin = int(m.text)
                        if m.tag == "ymax":
                            ymax = int(m.text)
                    x = xmin
                    y = ymin
                    width = xmax - xmin
                    height = ymax - ymin
            if None in [label, x, y, width, height]:
                print("error:",xml_file)
                continue

            region_dict = {}
            shape_attributes = {}
            region_dict['region_attributes'] = {'label': label}
            shape_attributes['x'] = x
            shape_attributes['y'] = y
            shape_attributes['width'] = width
            shape_attributes['height'] = height
            shape_attributes['name'] = 'rect'
            region_dict['shape_attributes'] = shape_attributes
            regions.append(region_dict)
        one_dict['regions'] = regions
        one_dict["filename"] = file_name
        one_dict["size"] = osp.getsize(name_prefix)
        one_dict["file_attributes"] = {} 
        total_dict[name_prefix] = one_dict

    print("total_dict.keys(): ", len(total_dict.keys()))

    total_list = []
    tmp_dict = dict()
    for idx, key in enumerate(total_dict.keys()):
        if (idx + 1 ) % img_nums_per_dir == 0:
            tmp_dict[key] = total_dict[key]

            total_list.append(copy.copy(tmp_dict))
            tmp_dict.clear()

        else:
            tmp_dict[key] = total_dict[key]

        if idx == len(total_dict.keys()) -1 and len(total_dict.keys()) % img_nums_per_dir != 0 :
            total_list.append(copy.copy(tmp_dict))
            
    print("len(total_list): ", len(total_list))

    for idx, d in enumerate(total_list):
        save_dict = dict()
        save_dir_path = os.path.join(save_dir, str(idx))
        if not os.path.exists(save_dir_path):
            os.mkdir(save_dir_path)
        print("d.keys(): ", len(d.keys()))

        for key in d.keys():
            name = osp.basename(key)
            dst_path = osp.join(save_dir_path, name)
            shutil.copy(key, dst_path)
            save_dict[name+str(osp.getsize(dst_path))] = d[key]
            

        with open(os.path.join(save_dir_path, 'via_region_data.ori.json'), 'w') as wfjson:
            wfjson.write(json.dumps(save_dict,ensure_ascii=True))


def txt_to_via_json(data_root, gen_via_name='',
                        label_file_path="",
                        per_img_in_dir=1000,
                        thread_count=8):
    """
    darknet_tool.deal_many_dir_local 的反向操作 
    将yolo txt合并成 via_region_data.json
    """
    txt_files = filesystem.get_all_filepath(data_root + os.sep+"labels", [".txt"])
    via_files = [osp.dirname(f) for f in txt_files]
    txt_dirs = set(via_files)

    # label_file_path = data_root + os.sep + label_file_path
    # label_dict = dict()
    # with open(label_file_path) as rf:
    #     for idx, line in enumerate(rf.readlines()):
    #         label_dict[idx] = line.strip("\n").strip()
 
    thread_count  = min(thread_count, len(txt_dirs)) 
    p = Pool(thread_count)
    for one_dir in txt_dirs:
        print(one_dir)
        p.apply_async(__convert_txt_to_via, args=(one_dir, 
            per_img_in_dir, gen_via_name))

    # print('Waiting for all subprocesses done...')
    p.close()
    p.join()


def __convert_txt_to_via(txt_dir, per_img_in_dir=1000, gen_via_name="via_region_data.json"):
    image_dir = txt_dir.replace("/labels", "/images/")

    data_dict = dict()
    for n in os.listdir(txt_dir):
        txt_path = txt_dir + os.sep + n
        image_path = image_dir+ os.sep + n.replace(".txt", ".jpg")
        if not osp.exists(image_path):
            print("not exists, ", image_path)
            continue
        
        cv_img = cv2.imread(image_path)
        h, w = cv_img.shape[:2]

        regions = []
        rf = open(txt_path, "r")
        for line in rf.readlines():
            outs = line.split(" ")
            if len(outs) != 5:
                print("len(outs) != 5... ", txt_path)                
                continue
            
            region = dict()
            label = str(int(outs[0]))
            cx, cy = int(float(outs[1]) * w), int(float(outs[2]) * h)
            bw, bh = int(float(outs[3]) * w), int(float(outs[4]) * h)
            shape_attribute = {"name":"rect",
                                "x":cx - bw//2,
                                "y":cy - bh//2,
                                "width":bw,
                                "height":bh
                            }
            region["shape_attributes"] = shape_attribute
            region["region_attributes"] = {"label": label}
            regions.append(region)

        rf.close()
        file_size = osp.getsize(image_path)
        data_dict[osp.basename(image_path) + str(file_size)] = {"size": file_size,
                "filename": osp.basename(image_path),
                "regions": regions,
                "file_attributes":{}
            }
        
    with open(image_dir + os.sep + gen_via_name,"w") as wf:
        wf.write(json.dumps(data_dict))

def txt_to_via_json2(data_dir, per_img_in_dir=1000, gen_via_name="via_region_data.json"):
    last_dirs = filesystem.get_last_dir(data_dir)

    for last_dir in last_dirs:
        data_dict = dict()
        for image_path in filesystem.get_all_filepath(last_dir, [".jpg"]):
            txt_path = image_path.replace(".jpg", ".txt")
            if not osp.exists(txt_path):
                print("not exists, ", txt_path)
                continue
            
            cv_img = cv2.imread(image_path)
            h, w = cv_img.shape[:2]

            regions = []
            rf = open(txt_path, "r")
            for line in rf.readlines():
                outs = line.split(" ")
                if len(outs) < 5:
                    print("len(outs) < 5... ", txt_path)                
                    continue
                
                region = dict()
                label = str(int(outs[0]))
                cx, cy = int(float(outs[1]) * w), int(float(outs[2]) * h)
                bw, bh = int(float(outs[3]) * w), int(float(outs[4]) * h)
                shape_attribute = {"name":"rect",
                                    "x":cx - bw//2,
                                    "y":cy - bh//2,
                                    "width":bw,
                                    "height":bh
                                }
                region["shape_attributes"] = shape_attribute
                region["region_attributes"] = {"label": label}
                regions.append(region)

            rf.close()
            file_size = osp.getsize(image_path)
            data_dict[osp.basename(image_path) + str(file_size)] = {"size": file_size,
                    "filename": osp.basename(image_path),
                    "regions": regions,
                    "file_attributes":{}
                }
            
        with open(last_dir + os.sep + gen_via_name,"w") as wf:
            wf.write(json.dumps(data_dict))


def merge_via_to_new_dir(via_data_dir, save_dir, via_name="via_region_data.json"):
    os.makedirs(save_dir, exist_ok=True)
    json_files = filesystem.get_all_filepath(via_data_dir, [".json"])
    print("json_files: ", len(json_files))

    total_json = dict()
    for f in json_files:
        with open(f, "r") as rf:
            d = json.load(rf)

        data_dir = osp.dirname(f)
        for key, value in d.items():
            file_path = data_dir + os.sep + value["filename"]
            if not osp.exists(file_path): continue 
            if len(value["regions"]) == 0: continue

            total_json[key] = value
            shutil.copy(file_path, save_dir + os.sep + value["filename"])

    with open(save_dir + os.sep + via_name, "w") as wf:
        wf.write(json.dumps(total_json))


def via_to_txt_for_huawei(data_dir, type_name, 
                            val_ratio =0.1,
                            save_retain = False,
                            via_name='via_region_data.json'):
    """
    华为yolov3 
    """    
    all_files = filesystem.get_all_filepath(data_dir, [via_name])

    all_lines = []
    for f in tqdm(all_files):
        with open(f) as rf:
            train_labels = json.loads(rf.read())
        
        for idx, p in enumerate(train_labels.keys()):

            label_data = train_labels[p]
            path = osp.dirname(f) + os.sep + label_data["filename"]
            if not os.path.exists(path):
                print("not exists... ", path)
                continue
            img = cv2.imread(path)
            h,w = img.shape[:2]
            if len(label_data["regions"]) == 0:
                print("regions=0... ", path)
                continue
            
            boxes = get_rects(type_name, label_data, w, h)
            
            box_n = ""
            continue_cur = False
            for box in boxes:
                if box[4] == None:
                    print("None... box[4]", path)
                    continue
                if  box[4] == "retain":
                    if not save_retain:
                        continue_cur = True
                    break
                box_n += "{} {} {} {} {} ".format(
                    get_label_idx(box[4], type_name), 
                    box[0],
                    box[1],
                    box[0]+box[2],
                    box[1]+box[3]
                )
            if continue_cur:continue

            line = "{} {} {} {}".format(path, w, h, box_n)
            all_lines.append(line)

    length = len(all_lines)
    indices = np.arange(length)
    np.random.shuffle(indices)
    np.random.shuffle(indices)


    train_wf = open(data_dir + os.sep + "hw_train.txt", "w")
    train_count = 0
    val_wf = open(data_dir + os.sep + "hw_val.txt", "w")
    val_count = 0

    for idx,i in enumerate(indices):
        if idx < length * val_ratio:
            val_wf.write(str(val_count) + " " + all_lines[i] + "\n")
            val_count+=1
        else:
            train_wf.write(str(train_count) + " " + all_lines[i] + "\n")
            train_count+=1


def get_rects(type_name, label_data, w, h):
    from tool.darknet_tool import __get_rects
    if type_name == "ct_meter":
        return __get_rects(label_data, w, h)
    else:
        raise NotImplementedError


def get_label_idx(cls_name, type_name):
    from tool.darknet_tool import __get_label_int_ct_meter
    if type_name == "ct_meter":
        return __get_label_int_ct_meter(cls_name)
    else:
        raise NotImplementedError


def coco_to_via(data_dir, train_file="instance_train.json", 
                            test_file="instance_test.json", 
                            val_file="instance_val.json", 
                            via_name="via_region_data.json"):
    coco = [train_file, test_file, val_file]
    images, categories, annotations = [], [], []
    for idx, coco_file in enumerate(coco):
        with open(osp.join(data_dir, coco_file)) as rf:
            data_dict = json.loads(rf.read())
            images.extend(data_dict["images"])
            categories.extend(data_dict["categories"])
            annotations.extend(data_dict["annotations"])

    img_id_dict = {}
    dir_dict = defaultdict(set)
    file_dict = defaultdict(list)
    for img in images:
        img_id_dict[img["id"]] = img["file_name"]

    categories_dict = {}
    for cate in categories:
        categories_dict[cate["id"]] = cate["name"]

    
    for box in annotations:
        file_path = img_id_dict[box["image_id"]]
        subnames = file_path.split(os.sep)
        if subnames[0] == "": del subnames[0]
        # 去除最前面的 train test val
        flag, file_path = subnames[0], "/".join(subnames[1:])
        dir_path = osp.dirname(file_path)
        dir_dict[dir_path].add((flag, file_path))
        file_dict[file_path].append(box)

    for sub_dir,file_list_tuple in dir_dict.items():
        save_dir = osp.join(data_dir, "images", sub_dir)
        if not osp.exists(save_dir):
            os.makedirs(save_dir)
        data_dict = {}
        for flag, file_path in file_list_tuple:
            if not exists(osp.join(data_dir, flag, file_path)):
                print("not exists...", osp.join(data_dir, flag, file_path))
                continue
            ori_file_path = osp.join(save_dir, osp.basename(file_path))
            if not osp.exists(ori_file_path):
                shutil.copy(osp.join(data_dir, flag, file_path), ori_file_path)

            item = {}
            boxs = file_dict[file_path]
            regions = []
            for box in boxs:
                region = {
                    "region_attributes": {"label": categories_dict[box["category_id"]]},
                    "shape_attributes": {"name":"rect",
                                         "x": int(box["bbox"][0]),
                                         "y": int(box["bbox"][1]),
                                         "width": int(box["bbox"][2]),
                                         "height": int(box["bbox"][3])}
                }
                regions.append(region)
            item["regions"] = regions
            item["filename"] = osp.basename(file_path)

            size = osp.getsize(ori_file_path)
            item["size"] = size
            item["file_attributes"] = {}
            data_dict[osp.basename(file_path)+str(size)] = item
        with open(osp.join(save_dir, via_name), 'w') as f:
            output_json = json.dumps(data_dict)
            f.write(output_json)

# 批量下载标记数据sse
def download_sse_label_data_to_via(root_dir, sub_dir=None, via_path="via_region_data.json"):
    """ 下载sse标记数据到对应文件夹 保存成via格式 """
    
    url = "http://localhost:3000/api/json/"

    if sub_dir:
        sub_dirs = [sub_dir]
    else:
        sub_dirs = filesystem.get_last_dir(root_dir)

    for img_dir in sub_dirs:
        data_dict = {}

        for f in filesystem.get_all_filepath(img_dir, [".jpg", ".png"]):
            relative_path = f.replace(root_dir, "")
            relative_path = relative_path.replace(os.sep, "%2F")
            data = requests.get(url + relative_path)
            json_data = json.loads(data.text)
            # print(json_data.keys())

            current_dict = dict()
            current_dict["filename"] = osp.basename(f)
            file_size = osp.getsize(f)
            current_dict["size"] = file_size
            regions = []
            if json_data.get("objects", None):
                for plg in json_data["objects"]:
                    shape_attributes = dict()
                    shape_attributes["name"] = 'polygon'
                    shape_attributes["all_points_x"] = [int(p["x"]) for p in plg["polygon"]]
                    shape_attributes["all_points_y"] = [int(p["y"]) for p in plg["polygon"]]
                    regions.append({"shape_attributes": shape_attributes, "region_attributes":{"label":plg["label"]}})
            current_dict["regions"] = regions
            current_dict["file_attributes"] = {}
            data_dict[osp.basename(f)+str(file_size)] = current_dict

        with open(img_dir + os.sep + via_path, "w") as rf:
            rf.write(json.dumps(data_dict))

            
def gen_empty_label_via(data_dir, via_name="via_region_data.json"):
    last_dirs = filesystem.get_last_dir(data_dir)

    regions = [
        {"shape_attributes": {
            "name":"polygon",
            "all_points_x":[1,5,239,389,507,614,668,695,706,707,383],
            "all_points_y":[4,494,369,293,235,180,134,89,40,7,6]},
            "region_attributes":{"label":"mask"}
        }
    ]
    for last_dir in last_dirs:
        data_dict = {}

        for img_path in filesystem.get_all_filepath(last_dir, [".jpg"]):
            item = {}
            item["regions"] = regions
            item["filename"] = osp.basename(img_path)

            size = osp.getsize(img_path)
            item["size"] = size
            item["file_attributes"] = {}
            data_dict[osp.basename(img_path)+str(size)] = item

        with open(osp.join(last_dir, via_name), 'w') as f:
            output_json = json.dumps(data_dict)
            f.write(output_json)


def mask_img_by_via_label(data_dir, remove_label=[], via_name="via_region_data_ori.json"):
    """
    将指定标签的图像数据抹去
    """
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    for f in via_files:
        with open(f,"r") as rf:
            data_dict = json.loads(rf.read())
        cur_dir = osp.dirname(f)

        for k,v in data_dict.items():
            regions = []
            filename = v["filename"]
            img_path = cur_dir + os.sep + filename
            # 过滤空图像
            if not os.path.exists(img_path): continue
            img = cv2.imread(img_path)

            for region in v["regions"]:
                if region["region_attributes"]["label"]  not in remove_label:
                    regions.append(region)
                    continue
                x0 = region["shape_attributes"]["x"]
                y0 = region["shape_attributes"]["y"]
                w0 = region["shape_attributes"]["width"]
                h0 = region["shape_attributes"]["height"]
                img[y0: y0+h0, x0: x0+w0] = 0
                
            cv2.imwrite(img_path, img)
            data_dict[k]["regions"] = regions

        with open(f,"w") as wf:
            wf.write(json.dumps(data_dict))



if __name__ == "__main__":
    
    # root_dir = "/home/xc/work/code/paddle/train_data/seg/slope/images"
    # # 下载单独文件夹
    # sub_dir = sys.argv[1]
    # download_sse_label_data_to_via(root_dir, sub_dir)

    # labels = ["scar", "bcar"]
    # data_dir = sys.argv[1]
    # remove_box_by_label(data_dir, labels)

    # data_dir = r"F:\work\dataset\det\rgl\video.pic"
    # save_dir = r"F:\work\dataset\det\rgl\video.pic\images.1"
    # crop_img_for_cls_by_via(data_dir, save_dir)

    # 将裁剪下来的整理后的各类别图片还原到原图上
    img_dir = r"F:\work\dataset\det\rgl\video.pic\images"
    data_dir = r"F:\work\dataset\det\rgl\video.pic"
    # save_via_name = "via_region_data.clear1.json"
    # save_via_name = "via_region_data.boat.json"
    # make_new_via_from_crop_img(img_dir, data_dir, save_via_name)

    # 将指定文件夹拷贝到指定目录
    # data_dir = r"F:\work\dataset\det\rgl\video.pic"
    # save_dir = r"F:\work\dataset\det\black_smoke\2023.1101"
    # via_name = "via_region_data.boat.json"
    # copy_via_dir(data_dir, save_dir, via_name)
    # # remove_via_file(sys.argv[1])
    
    # data_dir = ""
    # via_rect_to_polygon(data_dir)

    # data_dir = "/home/swls/work_dir/ocr/code/yolo_train/yolo_ct_meter_2"
    # type_name = "ct_meter"
    # via_to_txt_for_huawei(data_dir, type_name, save_retain=False)

    # data_dir = r"F:\work\dataset\det\water\garbage\2025.0513\06" 
    # save_dir = r"F:\work\dataset\det\water\garbage\2025.0513\06"
    # voc_type = True
    # # voc_type = False
    # img_nums_per_dir = 500
    # xml_to_via_json(data_dir, save_dir, voc_type, img_nums_per_dir)

    via_data_dir = r"F:\work\dataset\det\water\garbage\2025.0513"
    rename_file_and_update_json2(via_data_dir, "via_region_data.ori.json")

    # merge_via_to_new_dir(via_data_dir, via_data_dir)

    # data_dir = sys.argv[1]
    # generate_black_label_for_train(data_dir)
    

    # data_dir = sys.argv[1]
    # remove_none_image(data_dir)

    # data_dir = sys.argv[1]
    # remove_not_label_img_dir(data_dir)

    # data_dir = sys.argv[1]
    # gen_empty_label_via(data_dir)

    # data_dir = "/home/xc/work/code/paddle/train_data/det/car"  # sys.argv[1]
    # coco_to_via(data_dir)


    # data_dir = sys.argv[1]
    # point = [483, 214]
    # remove_box_by_point(data_dir, point)

    # data_dir = r"F:\work\dataset\det\rgl\images\2021\11\17"
    # rename_file_and_update_json(data_dir)

    # data_dir = sys.argv[1]
    # remove_duplicate_box(data_dir)


    # data_dir = sys.argv[1]
    # for p in filesystem.get_all_filepath(data_dir, [".json"]):
    #     print(p)
    #     remove_duplicate_box(osp.dirname(p))


    # # # 
    # data_root = "/home/xc/hyt/yolov5-6.0/datasets/VisDrone/VisDrone2019-DET-train"
    # gen_via_name = "via_region_data.json"
    # txt_to_via_json(data_root, gen_via_name)


    # via_update_file_size(r"E:\work\dataset\video\boat\mda-nfppn2rqug2yb7th\1_deal")

    # "file_attributes": {"blur":[[463, 120, 465, 197, 530, 205, 513, 125]]} 
    # 抹去图像区域 polygon

    # data_dir = sys.argv[1]
    # resize_via_image(data_dir, mix_side_len=1280, via_name="via_region_data.json")

    # data_dir = sys.argv[1]
    # txt_to_via_json2(data_dir)

    # data_dir="/home/xc/work/code/paddle/train_data/det/car/images/102" 
    # remove_label=["other"]
    # via_name="via_region_data.merge.json"
    # mask_img_by_via_label(data_dir, remove_label, via_name)

    # path = r"E:\work\dataset\det\sucai\4\4\0.ok\via_region_data.car5.json"
    # with open(path, "r") as rf:
    #     d = json.load(rf)
    # newd = {}
    # for idx, [key,value] in enumerate(d.items()):

    #     regions = []
    #     for region in value["regions"]:
    #         # x = min(region["shape_attributes"]["all_points_x"])
    #         x = region["shape_attributes"]["x"]
    #         y = region["shape_attributes"]["y"]
    #         w = region["shape_attributes"]["width"]
    #         h = region["shape_attributes"]["height"]

    #         if idx > 100:
    #             region["shape_attributes"]["x"] = 1
    #             region["shape_attributes"]["y"] = y
    #             region["shape_attributes"]["width"] = x + w
    #             region["shape_attributes"]["height"] = h

    #         # if idx > 474: 
    #         #     continue
    #         # if x < 2100:
    #         #     continue
    #         # label = region["region_attributes"].get("label", None)
    #         # if label == "empty":
    #         #     continue
    #         regions.append(region)
    #         # print(label)
    #     value["regions"] = regions
    #     newd[key] = value
    # with open(path, "w") as wf:
    #     wf.write(json.dumps(d))


