from ctypes import CDLL,byref,create_string_buffer
from ctypes import cdll
from collections import defaultdict
from genericpath import exists
import os, shutil, io, sys
import os.path as osp
import json, time
import numpy as np
import cv2
import copy
import base64, requests
from multiprocessing import Pool
from tqdm import tqdm
from PIL import Image
from tool import filesystem, via_tool, darknet_tool, opencv_tool # export PYTHONPATH=$PYTHONPATH:`pwd`



def merge_label_for_rgl(data_dir, via_name="via_region_data.rec.json", save_save_via_name="via_region_data.car.json"):
    """
    将文件夹下的标记文件合并成一个
    """
    total_dict = {}
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    for f in via_files:
        with open(f,"r") as rf:
            data_dict = json.loads(rf.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                total_dict[filename] = v


    last_dirs = filesystem.get_last_dir(data_dir)
    for last_dir in last_dirs:
        img_paths = filesystem.get_all_filepath(last_dir, [".jpg", ".png"])

        save_via_dict = {}
        for img_path in img_paths:
            filename = osp.basename(img_path)
            filesize = osp.getsize(img_path)
            save_via_dict[filename+str(filesize)] = total_dict[filename]

        save_via_path = osp.join(last_dir, save_save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_via_dict))


def rename_via_filename(data_dir, ori_via_name="via_region_data.json", dst_via_name="via_region_data.rgl.json"):
    via_files = filesystem.get_all_filepath(data_dir, [ori_via_name])
    for via_file in via_files:
        dst_file = osp.join(osp.dirname(via_file), dst_via_name)
        os.rename(via_file, dst_file)



# 将未标记的图片移出
def remove_not_label_img_dir(data_dir, via_name="via_region_data.rgl.json", save_via_name="via_region_data.rgl.label.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())

        ##创建保存目录  
        save_root = cur_dir + ".nolabel"
        os.makedirs(save_root, exist_ok=True)
        
        save_dict = {}
        move_list = []
        all_images = filesystem.get_all_filepath(cur_dir, [".jpg"])
        for path in all_images:
            name = os.path.basename(path)
            exists = False
            for k in train_labels.keys():
                filename = train_labels[k]["filename"]
                if name == filename: # 图片和标记信息都存在
                    if len(train_labels[k]["regions"]) != 0: # 标记信息为空
                        exists = True
                        save_dict[k] = train_labels[k]
                    break

            if not exists:
                print(path)
                move_list.append(path)
                shutil.move(path, save_root +os.sep+name)

        save_via_path = osp.join(cur_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))

def remove_not_label_img_dir2(data_dir, via_name="via_region_data.json", save_via_name="via_region_data.clear1.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())

        ##创建保存目录  
        save_root = cur_dir + ".nolabel"
        os.makedirs(save_root, exist_ok=True)
        
        save_dict = {}
        move_list = []
        all_images = filesystem.get_all_filepath(cur_dir, [".jpg"])
        for path in all_images:
            name = os.path.basename(path)
            exists = False
            for k in train_labels.keys():
                filename = train_labels[k]["filename"]

                # if name == filename: # 图片和标记信息都存在
                #     if len(train_labels[k]["regions"]) != 0:
                #         save_dict[k] = train_labels[k]
                #         exists = True
                #         break

                if name == filename: # 图片和标记信息都存在
                    for rgn in train_labels[k]["regions"]:
                        if rgn["region_attributes"]["label"] == "ringelman":
                            exists = True
                    
                    if exists: # 标记信息为空
                        save_dict[k] = train_labels[k]
                        break

            if not exists:
                print(path)
                move_list.append(path)
                shutil.move(path, save_root +os.sep+name)

        save_via_path = osp.join(cur_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))



def make_car_label_img_dir(data_dir, via_name="via_region_data.rec.json", save_via_name="via_region_data.car.label.json"):
    
    total_dict = {}
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            data_dict = json.loads(f.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                total_dict[filename] = v
        
        save_dict = {}
        img_paths = filesystem.get_all_filepath(cur_dir, [".jpg", ".png"])
        for img_path in img_paths:
            filename = osp.basename(img_path)
            filesize = osp.getsize(img_path)
            regions = []
            for region in total_dict[filename]["regions"]:
                label = region["region_attributes"]["label"]
                if label == "car":
                    regions.append(region)

            total_dict[filename]["regions"] = regions
            save_dict[filename+str(filesize)] = total_dict[filename]


        save_via_path = osp.join(cur_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))


def merge_car_rgl_via_file(data_dir, car_via_name="via_region_data.car.json", rgl_via_name="via_region_data.rgl.json", save_via_name="via_region_data.clear1.json"):
    last_dirs = filesystem.get_last_dir(data_dir)

    for last_dir in last_dirs:
        car_via_file = osp.join(last_dir, car_via_name)
        car_total_dict = {}
        with open(car_via_file) as f:
            data_dict = json.loads(f.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                regions = []
                for region in v["regions"]:
                    region["region_attributes"]["label"] = "car"
                    regions.append(region)
                v["regions"] = regions
                car_total_dict[filename] = v

        rgl_via_file = osp.join(last_dir, rgl_via_name)
        rgl_total_dict = {}
        with open(rgl_via_file) as f:
            data_dict = json.loads(f.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                regions = []
                for region in v["regions"]:
                    region["region_attributes"]["label"] = "ringelman"
                    regions.append(region)
                v["regions"] = regions
                rgl_total_dict[filename] = v
        
        print(last_dir)
        # merge
        save_dict = {}
        img_paths = filesystem.get_all_filepath(last_dir, [".jpg", ".png"])
        for img_path in img_paths:
            filename = osp.basename(img_path)
            filesize = osp.getsize(img_path)
            regions = []
            for region in car_total_dict[filename]["regions"]:
                regions.append(region)
            for region in rgl_total_dict[filename]["regions"]:
                regions.append(region)
            car_total_dict[filename]["regions"] = regions
            save_dict[filename+str(filesize)] = car_total_dict[filename]


        save_via_path = osp.join(last_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))
            


def find_box_from_via_file(via_dir, data_dir, via_name="via_region_data.json", save_via_name="via_region_data.clear1.json"):
    via_files = filesystem.get_all_filepath(via_dir, [via_name])
    
    total_labels = {}
    for via_file in via_files:
        with open(via_file) as f:
            train_labels = json.loads(f.read())
            total_labels.update(train_labels)
        
    
    save_dict = {}
    all_images = filesystem.get_all_filepath(data_dir, [".jpg"])
    for path in all_images:
        name = os.path.basename(path)
        
        for k,v in total_labels.items():
            filename = v["filename"]
            if name == filename:
                save_dict[k] = v

    save_via_path = osp.join(data_dir, save_via_name)
    with open(save_via_path,"w") as wf:
        wf.write(json.dumps(save_dict))


def remove_img_by_labels(data_dir, save_step=3, ori_via="via_region_data.rec.json", save_via_name="via_region_data.json"):
    """
    
    """
    ori_dirs = filesystem.get_last_dir(data_dir, -1)

    for ori_dir in ori_dirs:

        via_file = osp.join(ori_dir, ori_via)
        with open(via_file) as f:
            via_data = json.loads(f.read())

        new_via_data = {}
        save_via_data = {}
        for k,v in via_data.items():
            new_via_data[v["filename"]] = v

        img_files = filesystem.get_all_filepath(ori_dir, [".jpg"])
    
        # sort by name
        sort_data = [[(osp.basename(p).split("_")[0], int(osp.basename(p).split("_")[1].split(".")[0])), p] for p in img_files]
        sort_data = sorted(sort_data, key=lambda x: x[0])
        
        save_flag = 0
        for idx, img_file in enumerate(img_files):
            filename = osp.basename(img_file)
            
            regions = []
            for region in new_via_data[filename]["regions"]:
                if region["region_attributes"]["label"] == "person":
                    regions.append(region)
                    
            if idx % save_step == 0 or save_flag == 1:
                if idx % save_step == 0:

                    if len(regions) > 1:
                        save_flag = 1
                    else :
                        save_flag = 0

                key = filename + str(new_via_data[filename]["size"])
                new_via_data[filename]["regions"] = regions 
                save_via_data[key] = new_via_data[filename]

        save_via_path = osp.join(ori_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_via_data))


# 将标记文件中未记录的图片移出
def remove_not_label_img(data_dir, via_name="via_region_data.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())

        new_data = {}
        for k,v in train_labels.items():
            new_data[v["filename"]] = v

        ##创建保存目录  
        save_root = cur_dir + ".0"
        os.makedirs(save_root, exist_ok=True)

        all_images = filesystem.get_all_filepath(cur_dir, [".jpg"])
        for path in all_images:
            filename = os.path.basename(path)
            if new_data.get(filename, None) is None:
                shutil.move(path, save_root +os.sep+filename)


def merge_via_and_split_to_new_dir(data_dir, save_idx, per_img_in_dir=500, ori_via_name="via_region_data.json", save_via_name="via_region_data.json"):
    json_files = filesystem.get_all_filepath(data_dir, [ori_via_name])
    print("json_files: ", len(json_files))

    total_via_data = {}
    for json_file in json_files:
        with open(json_file, "r") as rf:
            via_data = json.load(rf)
        for k,v in via_data.items():
            total_via_data[v["filename"]] = v
        
    img_files = []
    for json_file in json_files:
        cur_dir = osp.dirname(json_file)
        cur_img_files = filesystem.get_all_filepath(cur_dir, [".jpg"])
        img_files.extend(cur_img_files)
    dir_cnt = len(img_files) // per_img_in_dir
    dir_cnt = dir_cnt if len(img_files) % per_img_in_dir == 0 else dir_cnt+1
    

    # sort by name
    sort_data = [[(osp.basename(p).split("_")[0], int(osp.basename(p).split("_")[1].split(".")[0])), p] for p in img_files]
    sort_data = sorted(sort_data, key=lambda x: x[0])

    for dir_idx in range(dir_cnt):
        s = dir_idx * per_img_in_dir
        e = (dir_idx+1) * per_img_in_dir
        save_dir = osp.join(data_dir, str(dir_idx + save_idx))
        os.makedirs(save_dir, exist_ok=True)
        save_data = {}
        for idx in range(s, e):
            if idx >= len(sort_data): continue
            ori_path = sort_data[idx][1]
            filename = osp.basename(ori_path)
            shutil.move(ori_path, osp.join(save_dir, osp.basename(ori_path)))
            key = filename + str(total_via_data[filename]["size"])
            save_data[key] = total_via_data[filename]

        with open(save_dir + os.sep + save_via_name, "w") as wf:
            wf.write(json.dumps(save_data))
    

def merge_via_and_split_to_new_dir2(data_dir, save_idx, per_box_in_dir=500, max_img_in_dir=750, ori_via_name="via_region_data.json", save_via_name="via_region_data.json"):
    json_files = filesystem.get_all_filepath(data_dir, [ori_via_name])
    print("json_files: ", len(json_files))

    total_via_data = {}
    for json_file in json_files:
        with open(json_file, "r") as rf:
            via_data = json.load(rf)
        for k,v in via_data.items():
            total_via_data[v["filename"]] = v
        
    img_files = []
    for json_file in json_files:
        cur_dir = osp.dirname(json_file)
        cur_img_files = filesystem.get_all_filepath(cur_dir, [".jpg"])
        img_files.extend(cur_img_files)
    # dir_cnt = len(img_files) // per_img_in_dir
    # dir_cnt = dir_cnt if len(img_files) % per_img_in_dir == 0 else dir_cnt+1
    

    # sort by name
    # sort_data = [[(osp.basename(p).split("_")[0], int(osp.basename(p).split("_")[1].split(".")[0])), p] for p in img_files]
    # sort_data = sorted(sort_data, key=lambda x: x[0])

    sort_data = [[osp.basename(p), p] for p in img_files]
    # sort_data = sorted(sort_data, key=lambda x: x[0])

    img_idx = 0
    cur_box_cnt = 0
    cur_img_cnt = 0
    dir_idx = 0
    cur_save_dir = osp.join(data_dir, str(dir_idx + save_idx))
    os.makedirs(cur_save_dir, exist_ok=True)
    cur_save_data = {}
    while True:
        # dir_idx +=1
        if img_idx >= len(sort_data):
            with open(cur_save_dir + os.sep + save_via_name, "w") as wf:
                wf.write(json.dumps(cur_save_data))
            cur_save_data = {}
            cur_box_cnt = 0
            dir_idx += 1
            cur_save_dir = osp.join(data_dir, str(dir_idx + save_idx))
            os.makedirs(cur_save_dir, exist_ok=True)
            break

        ori_path = sort_data[img_idx][1]
        img_idx += 1
        filename = osp.basename(ori_path)
        if total_via_data.get(filename, None) is None:
            continue
        shutil.move(ori_path, osp.join(cur_save_dir, osp.basename(ori_path)))
        key = filename + str(total_via_data[filename]["size"])
        cur_save_data[key] = total_via_data[filename]

        cur_box_cnt += len(total_via_data[filename]["regions"])
        cur_img_cnt += 1
        if cur_box_cnt >= per_box_in_dir or cur_img_cnt > max_img_in_dir:

            with open(cur_save_dir + os.sep + save_via_name, "w") as wf:
                wf.write(json.dumps(cur_save_data))
            cur_save_data = {}
            cur_box_cnt = 0
            cur_img_cnt = 0
            dir_idx += 1
            cur_save_dir = osp.join(data_dir, str(dir_idx + save_idx))
            os.makedirs(cur_save_dir, exist_ok=True)



def remove_large_box(data_dir, via_name="via_region_data.json"):
    json_files = filesystem.get_all_filepath(data_dir, [via_name])
    print("json_files: ", len(json_files))

    for json_file in json_files:
        with open(json_file, "r") as rf:
            via_data = json.load(rf)
        total_via_data = {}
        for k,v in via_data.items():
            total_via_data[v["filename"]] = v
        
        cur_dir = osp.dirname(json_file)
        cur_img_files = filesystem.get_all_filepath(cur_dir, [".jpg"])

        save_json_data = {}
        for ori_path in cur_img_files:
            img = cv2.imread(ori_path)
            img_h, img_w = img.shape[:2]
            filename = osp.basename(ori_path)
            cur_ing_data = total_via_data.get(filename, None)
            if cur_ing_data is None:
                continue
            
            remove_flag = 0
            for region in cur_ing_data["regions"]:
                width = region["shape_attributes"]["width"]
                height = region["shape_attributes"]["height"]

                if width > img_w*0.3 and height > img_h*0.8:
                    remove_flag = 1

            if remove_flag == 0:
                key = filename + str(cur_ing_data["size"])
                save_json_data[key] = cur_ing_data


        with open(cur_dir + os.sep + via_name, "w") as wf:
            wf.write(json.dumps(save_json_data))
    


if __name__ == "__main__":
    # 1. 每个文件夹已经识别完成 生成标记文件
    # 1. 将每个文件夹中只有一个检测框的图片大量去除
    # data_dir = r"F:\work\dataset\det\water\2025\0319\02"
    # remove_img_by_labels(data_dir)

    # 2. 去除没有标记的图片
    # data_dir = r"F:\work\dataset\det\water\2025\0319\03"
    # remove_not_label_img(data_dir)

    # 2.5 去除较大的检测框
    # data_dir = r"F:\work\dataset\det\water\2025\0319\03"
    # remove_large_box(data_dir)

    # # 3. 合并再按照图片数量分发
    # data_dir = r"F:\work\dataset\det\water\2025\0319\02"
    # merge_via_and_split_to_new_dir(data_dir, 6000)

    # 3. 合并再按照标记框数量分发
    data_dir = r"F:\work\dataset\det\water\2025.0601\filter"
    ori_via_name="via_region_data.rec.json"
    save_via_name="via_region_data.rec.json"
    max_box = 1000
    max_img = 750
    merge_via_and_split_to_new_dir2(data_dir, 3700, max_box, max_img, ori_via_name=ori_via_name, save_via_name=save_via_name)
