from ctypes import CDLL,byref,create_string_buffer
from ctypes import cdll
from collections import defaultdict
from genericpath import exists
import os, shutil, io, sys
import os.path as osp
import json, time
import numpy as np
import cv2
import copy
import base64, requests
from multiprocessing import Pool
from tqdm import tqdm
from PIL import Image
from tool import filesystem, via_tool, darknet_tool, opencv_tool # export PYTHONPATH=$PYTHONPATH:`pwd`



def merge_label_for_rgl(data_dir, via_name="via_region_data.rec.json", save_save_via_name="via_region_data.car.json"):
    """
    将文件夹下的标记文件合并成一个
    """
    total_dict = {}
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    for f in via_files:
        with open(f,"r") as rf:
            data_dict = json.loads(rf.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                total_dict[filename] = v


    last_dirs = filesystem.get_last_dir(data_dir)
    for last_dir in last_dirs:
        img_paths = filesystem.get_all_filepath(last_dir, [".jpg", ".png"])

        save_via_dict = {}
        for img_path in img_paths:
            filename = osp.basename(img_path)
            filesize = osp.getsize(img_path)
            save_via_dict[filename+str(filesize)] = total_dict[filename]

        save_via_path = osp.join(last_dir, save_save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_via_dict))


def rename_via_filename(data_dir, ori_via_name="via_region_data.json", dst_via_name="via_region_data.rgl.json"):
    via_files = filesystem.get_all_filepath(data_dir, [ori_via_name])
    for via_file in via_files:
        dst_file = osp.join(osp.dirname(via_file), dst_via_name)
        os.rename(via_file, dst_file)



# 将未标记的图片移出
def remove_not_label_img_dir(data_dir, via_name="via_region_data.rgl.json", save_via_name="via_region_data.rgl.label.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())

        ##创建保存目录  
        save_root = cur_dir + ".nolabel"
        os.makedirs(save_root, exist_ok=True)
        
        save_dict = {}
        move_list = []
        all_images = filesystem.get_all_filepath(cur_dir, [".jpg"])
        for path in all_images:
            name = os.path.basename(path)
            exists = False
            for k in train_labels.keys():
                filename = train_labels[k]["filename"]
                if name == filename: # 图片和标记信息都存在
                    if len(train_labels[k]["regions"]) != 0: # 标记信息为空
                        exists = True
                        save_dict[k] = train_labels[k]
                    break

            if not exists:
                print(path)
                move_list.append(path)
                shutil.move(path, save_root +os.sep+name)

        save_via_path = osp.join(cur_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))

def remove_not_label_img_dir2(data_dir, via_name="via_region_data.json", save_via_name="via_region_data.clear1.json"):
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            train_labels = json.loads(f.read())

        ##创建保存目录  
        save_root = cur_dir + ".nolabel"
        os.makedirs(save_root, exist_ok=True)
        
        save_dict = {}
        move_list = []
        all_images = filesystem.get_all_filepath(cur_dir, [".jpg"])
        for path in all_images:
            name = os.path.basename(path)
            exists = False
            for k in train_labels.keys():
                filename = train_labels[k]["filename"]

                # if name == filename: # 图片和标记信息都存在
                #     if len(train_labels[k]["regions"]) != 0:
                #         save_dict[k] = train_labels[k]
                #         exists = True
                #         break

                if name == filename: # 图片和标记信息都存在
                    for rgn in train_labels[k]["regions"]:
                        if rgn["region_attributes"]["label"] == "ringelman":
                            exists = True
                    
                    if exists: # 标记信息为空
                        save_dict[k] = train_labels[k]
                        break

            if not exists:
                print(path)
                move_list.append(path)
                shutil.move(path, save_root +os.sep+name)

        save_via_path = osp.join(cur_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))



def make_car_label_img_dir(data_dir, via_name="via_region_data.rec.json", save_via_name="via_region_data.car.label.json"):
    
    total_dict = {}
    via_files = filesystem.get_all_filepath(data_dir, [via_name])
    for via_file in via_files:
        cur_dir = osp.dirname(via_file)
        with open(via_file) as f:
            data_dict = json.loads(f.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                total_dict[filename] = v
        
        save_dict = {}
        img_paths = filesystem.get_all_filepath(cur_dir, [".jpg", ".png"])
        for img_path in img_paths:
            filename = osp.basename(img_path)
            filesize = osp.getsize(img_path)
            regions = []
            for region in total_dict[filename]["regions"]:
                label = region["region_attributes"]["label"]
                if label == "car":
                    regions.append(region)

            total_dict[filename]["regions"] = regions
            save_dict[filename+str(filesize)] = total_dict[filename]


        save_via_path = osp.join(cur_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))


def merge_car_rgl_via_file(data_dir, car_via_name="via_region_data.car.json", rgl_via_name="via_region_data.rgl.json", save_via_name="via_region_data.clear1.json"):
    last_dirs = filesystem.get_last_dir(data_dir)

    for last_dir in last_dirs:
        car_via_file = osp.join(last_dir, car_via_name)
        car_total_dict = {}
        with open(car_via_file) as f:
            data_dict = json.loads(f.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                regions = []
                for region in v["regions"]:
                    region["region_attributes"]["label"] = "car"
                    regions.append(region)
                v["regions"] = regions
                car_total_dict[filename] = v

        rgl_via_file = osp.join(last_dir, rgl_via_name)
        rgl_total_dict = {}
        with open(rgl_via_file) as f:
            data_dict = json.loads(f.read())
            for k,v in data_dict.items():
                filename = v["filename"]
                regions = []
                for region in v["regions"]:
                    region["region_attributes"]["label"] = "ringelman"
                    regions.append(region)
                v["regions"] = regions
                rgl_total_dict[filename] = v
        
        print(last_dir)
        # merge
        save_dict = {}
        img_paths = filesystem.get_all_filepath(last_dir, [".jpg", ".png"])
        for img_path in img_paths:
            filename = osp.basename(img_path)
            filesize = osp.getsize(img_path)
            regions = []
            for region in car_total_dict[filename]["regions"]:
                regions.append(region)
            for region in rgl_total_dict[filename]["regions"]:
                regions.append(region)
            car_total_dict[filename]["regions"] = regions
            save_dict[filename+str(filesize)] = car_total_dict[filename]


        save_via_path = osp.join(last_dir, save_via_name)
        with open(save_via_path,"w") as wf:
            wf.write(json.dumps(save_dict))
            


def find_box_from_via_file(via_dir, data_dir, via_name="via_region_data.json", save_via_name="via_region_data.clear1.json"):
    via_files = filesystem.get_all_filepath(via_dir, [via_name])
    
    total_labels = {}
    for via_file in via_files:
        with open(via_file) as f:
            train_labels = json.loads(f.read())
            total_labels.update(train_labels)
        
    
    save_dict = {}
    all_images = filesystem.get_all_filepath(data_dir, [".jpg"])
    for path in all_images:
        name = os.path.basename(path)
        
        for k,v in total_labels.items():
            filename = v["filename"]
            if name == filename:
                save_dict[k] = v

    save_via_path = osp.join(data_dir, save_via_name)
    with open(save_via_path,"w") as wf:
        wf.write(json.dumps(save_dict))



if __name__ == "__main__":
    # 1.
    # data_dir = r"F:\work\dataset\det\rgl\2024\03\04"
    # merge_label_for_rgl(data_dir)

    # 2.
    # data_dir = r"F:\work\dataset\det\rgl\2024\03"
    # rename_via_filename(data_dir)

    # 3. 将没有标记黑烟的图片移出去
    # data_dir = r"F:\work\dataset\det\rgl\2024\03\02"
    # remove_not_label_img_dir(data_dir)



    # 4. 重新以当前文件夹下剩下的黑烟图片生成车辆标记图片
    # via_region_data.rec.json -> via_region_data.car.label.json
    # data_dir = r"F:\work\dataset\det\rgl\2024\03\02\2314"
    # make_car_label_img_dir(data_dir)

    # 2.1
    # data_dir = r"F:\work\dataset\det\rgl\2024\1120"
    # via_name="via_region_data.json"
    # remove_not_label_img_dir(data_dir, via_name)
    
    # 2.2
    # data_dir = r"F:\work\dataset\det\rgl\2024\1120"
    # make_car_label_img_dir(data_dir)

    # 5.
    # data_dir = r"F:\work\dataset\det\rgl\images\2024\1211"
    # car_via_name="via_region_data.car.label.json"
    # rgl_via_name="via_region_data.rgl.label.json"
    # merge_car_rgl_via_file(data_dir, car_via_name, rgl_via_name)


    data_root = r"F:\work\dataset\det\rgl"
    save_dir = r"F:\work\dataset\det\rgl\dataset"
    via_name="via_region_data.clear1.json" 
    gen_type="ringelman"
    # darknet_tool.deal_many_dir(data_root, save_dir, via_name, True, gen_type)
    darknet_tool.create_train_val_txt(save_dir)
    darknet_tool.check_darket_train_data(save_dir)



    # 处理2021年数据
    # 将没有黑烟的图片移出来
    # data_dir = r"F:\work\dataset\det\rgl\images\2021\12"
    # remove_not_label_img_dir2(data_dir)

    # 从json文件中找标记数据
    # via_dir = r"F:\work\dataset\det\rgl\images\2021\6\29"
    # data_dir = r"F:\work\dataset\det\rgl\images\2021\6\29\bg"
    # find_box_from_via_file(via_dir, data_dir)
