import shutil, os, json
import os.path as osp
import cv2
import numpy as np
from tool import filesystem, via_tool


# 重命名---成对
def rename_by_pair2(dest_dir):
    img_files = filesystem.get_all_filepath(dest_dir, [".jpg", ".JPG", ".bmp", ".BMP", ".PNG", ".png"])    
    print(len(img_files))

    rename_cnt =  10000
    # length = len(name_list)
    for idx, f in enumerate(img_files):
        txt_file = ".".join(f.split(".")[:-1]) + ".txt"
        if not osp.exists(txt_file):
            print("not exists: ", txt_file )
            continue
        shutil.move(f, osp.join(dest_dir, "{}.{}".format(rename_cnt, f.split(".")[-1])))
        shutil.move(txt_file, osp.join(dest_dir, "{}.txt".format(rename_cnt)))
        rename_cnt+=1


def distribute_yolo_dataset(data_dir, label_file, per_img_in_dir=500, via_name="via_region_data.json"):
    annotation_path = osp.join(data_dir, label_file)
    with open(annotation_path, "r") as rf:
        json_data = json.loads(rf.read())
    
    other_imgs = []
    test_imgs = []
    train_imgs = []
    for key,one_data in json_data["imgs"].items():
        save_one_data = {}
        suffix_name = one_data["path"]
        img_path = osp.join(data_dir, suffix_name)

        regions = []
        for obj in one_data["objects"]:
            label = obj["category"]
            xmin = int(obj["bbox"]["xmin"])
            ymin = int(obj["bbox"]["ymin"])
            xmax = int(obj["bbox"]["xmax"])
            ymax = int(obj["bbox"]["ymax"])

            region = {}
            shape_attribute = {"name":"rect",
                                "x":xmin,
                                "y":ymin,
                                "width":xmax - xmin,
                                "height":ymax - ymin
                            }
            region["shape_attributes"] = shape_attribute
            region["region_attributes"] = {"label": label}
            regions.append(region)

        save_one_data["filename"] = suffix_name
        save_one_data["regions"] = regions
        save_one_data["file_attributes"] = {}
        
        if suffix_name.startswith("other"):
            other_imgs.append(save_one_data)
        elif suffix_name.startswith("test"):
            test_imgs.append(save_one_data)
        elif suffix_name.startswith("train"):
            train_imgs.append(save_one_data)

    prefixs = ["other", "test", "train"]
    for prefix, datas in zip(prefixs, [other_imgs, test_imgs, train_imgs]):
        save_dir = None
        save_dict = {}
        for idx, data in enumerate(datas):
            if idx % per_img_in_dir == 0:
                if save_dir is not None:
                    with open(osp.join(save_dir, via_name),"w") as wf:
                        wf.write(json.dumps(save_dict))
                        
                save_dir = osp.join(data_dir, "{}_{}".format(prefix, idx // per_img_in_dir))
                save_dict = {}
            os.makedirs(save_dir, exist_ok=True)
            img_path = osp.join(data_dir, data["filename"])
            filename = osp.basename(img_path)
            size = osp.getsize(img_path)
            data["filename"] = filename
            data["size"] = size
            save_dict[filename+str(size)] = data
            shutil.copy(img_path, osp.join(save_dir, filename))




def convert_img_to_jpg(dest_dir):
    img_files = filesystem.get_all_filepath(dest_dir, [".jpg", ".JPG", ".bmp", ".BMP", ".PNG", ".png"])    
    for f in img_files:
        if f.endswith(".jpg"): continue

        img = cv2.imread(f)
        save_path = ".".join(f.split(".")[:-1]) + ".jpg"
        cv2.imwrite(save_path, img)
        os.remove(f)


def txt_to_via_json(data_dir, per_img_in_dir=1000, gen_via_name="via_region_data.json"):
    image_dirs = filesystem.get_last_dir(osp.join(data_dir, "images"))


    for image_dir in image_dirs:
        data_dict = dict()

        txt_dir = image_dir.replace("/images/", "/labels/")
        img_paths = filesystem.get_all_filepath(image_dir)
        for img_path in img_paths:
            filename = osp.basename(img_path)
            txt_path = osp.join(txt_dir, filename.replace(".jpg", ".txt"))
            if not osp.exists(txt_path):
                print("not exists, ", txt_path)
                continue
            
            cv_img = cv2.imread(img_path)
            if cv_img is None:
                print("None: ", img_path)
            # h, w = cv_img.shape[:2]

            regions = []
            rf = open(txt_path, "r")
            for line in rf.readlines():
                outs = line.split(",")
                if len(outs) != 8:
                    # print("len(outs) != 8... ", txt_path)                
                    continue
                
                region = dict()
                label = "tsign"
                xs = []
                ys = []
                for idx, v in enumerate(outs):
                    if idx % 2 == 0: xs.append(int(v))
                    else: ys.append(int(v))
                min_x = int(np.min(xs))
                min_y = int(np.min(ys))
                max_x = int(np.max(xs))
                max_y = int(np.max(ys))

                shape_attribute = {"name":"rect",
                                    "x":min_x,
                                    "y":min_y,
                                    "width":max_x - min_x,
                                    "height":max_y - min_y
                                }
                region["shape_attributes"] = shape_attribute
                region["region_attributes"] = {"label": label}
                regions.append(region)

            rf.close()
            file_size = osp.getsize(img_path)
            data_dict[osp.basename(img_path) + str(file_size)] = {"size": file_size,
                    "filename": osp.basename(img_path),
                    "regions": regions,
                    "file_attributes":{}
                }
        
        with open(image_dir + os.sep + gen_via_name,"w") as wf:
            wf.write(json.dumps(data_dict))

if __name__ == "__main__":


    # label_file = "annotations.json"

    label_file = "annotations_all.json"

    data_dir = "/mnt/data/dataset/det/TT100K/tt100k_2021"
    distribute_yolo_dataset(data_dir, label_file, 300)



