import os
import json
import shutil
import random
from tqdm import tqdm
from ai.utils.config import dataset_path
from concurrent.futures import ThreadPoolExecutor

# 目标检测 convert the bounding box from COCO to YOLO format
def convert_detect(size, box):
    dw = 1. / (size[0])
    dh = 1. / (size[1])
    x = box[0] + box[2] / 2.0
    y = box[1] + box[3] / 2.0
    w = box[2]
    h = box[3]
    # round函数确定(xmin, ymin, xmax, ymax)的小数位数
    x = round(x * dw, 6)
    w = round(w * dw, 6)
    y = round(y * dh, 6)
    h = round(h * dh, 6)
    return (x, y, w, h)

# 目标检测 convert the json file from COCO to YOLO format
def coco_detect(id):
    json_path = os.path.join(dataset_path, id, id + '.json')
    json_data = json.load(open(json_path, 'r'))
    # 编写yaml文件
    yaml_path = os.path.join(dataset_path, id, 'data.yaml')
    id_map = {}  # coco数据集的id不连续 重新映射一下再输出
    with open(yaml_path, 'w') as f:
        f.write("path: /usr/src/app/datasets/media/upload/" + id + "\n")
        f.write("train: images/train" + "\n")
        f.write("val: images/val" + "\n")
        f.write("names:" + "\n")
        for i, category in enumerate(json_data['categories']):
            f.write("  " + str(i) + ": " + f"{category['name']}\n")
            id_map[category['id']] = int(i)

    # cocotojson 生成annotation文件夹
    ana_path = os.path.join(dataset_path, id, 'annotation')
    shutil.rmtree(ana_path, ignore_errors=True)
    os.makedirs(ana_path, exist_ok=True)

    anns_by_img_id = {}
    for ann in json_data['annotations']:
        if ann['image_id'] not in anns_by_img_id:
            anns_by_img_id[ann['image_id']] = []
        anns_by_img_id[ann['image_id']].append(ann)

    for img in tqdm(json_data['images']):
        img_name = img['file_name']
        img_width = img['width']
        img_height = img['height']
        img_id = img['id']
        ana_txt_name = os.path.splitext(img['file_name'])[0] + '.txt'
        with open(os.path.join(ana_path, ana_txt_name), 'w') as f:
            if img_id in anns_by_img_id:
                for ann in anns_by_img_id[img_id]:
                    box = convert_detect((img_width, img_height), ann["bbox"])
                    f.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))

    # for img in tqdm(json_data['images']):
    #     file_name = img["file_name"]
    #     img_width = img["width"]
    #     img_height = img["height"]
    #     img_id = img["id"]
    #     head, tail = os.path.splitext(file_name)
    #     ana_txt_name = head + ".txt"  # 对应的txt名字，与jpg一致
    #     with open(os.path.join(ana_path, ana_txt_name), 'w') as f:
    #         for ann in json_data['annotations']:
    #             if ann['image_id'] == img_id:
    #                 box = convert_detect((img_width, img_height), ann["bbox"])
    #                 f.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))

# 图像分割 convert the bounding box from COCO to YOLO format
def convert_segment(size, x, y):
    yolo_x = round(float(x / size[0]), 6)
    yolo_y = round(float(y / size[1]), 6)
    return (yolo_x, yolo_y)

# 图像分割 convert the json file from COCO to YOLO format
def coco_segment(id):
    json_path = os.path.join(dataset_path, id, id + '.json')
    json_data = json.load(open(json_path, 'r'))
    # 编写yaml文件
    yaml_path = os.path.join(dataset_path, id, 'data.yaml')
    id_map = {}  # coco数据集的id不连续 重新映射一下再输出
    with open(yaml_path, 'w') as f:
        f.write("path: /usr/src/app/datasets/media/upload/" + id + "\n")
        f.write("train: images/train" + "\n")
        f.write("val: images/val" + "\n")
        f.write("names:" + "\n")
        for i, category in enumerate(json_data['categories']):
            f.write("  " + str(i) + ": " + f"{category['name']}\n")
            id_map[category['id']] = int(i)

    # cocotojson
    ana_path = os.path.join(dataset_path, id, 'annotation')
    shutil.rmtree(ana_path, ignore_errors=True)
    os.makedirs(ana_path, exist_ok=True)

    anns_by_img_id = {}
    for ann in json_data['annotations']:
        if ann['image_id'] not in anns_by_img_id:
            anns_by_img_id[ann['image_id']] = []
        anns_by_img_id[ann['image_id']].append(ann)

    for img in tqdm(json_data['images']):
        img_width = img['width']
        img_height = img['height']
        img_id = img['id']
        ana_txt_name = os.path.splitext(img['file_name'])[0] + '.txt'
        with open(os.path.join(ana_path, ana_txt_name), 'w') as f:
            if img_id in anns_by_img_id:
                for ann in anns_by_img_id[img_id]:
                    f.write("%s" % (id_map[ann["category_id"]]))
                    for x in range(0, len(ann["segmentation"][0]), 2):
                        yolo_x, yolo_y = convert_segment((img_width, img_height), ann["segmentation"][0][x],
                                                         ann["segmentation"][0][x + 1])
                        f.write(" %s %s" % (yolo_x, yolo_y))
                    f.write("\n")

    # for img in tqdm(json_data['images']):
    #     img_width = img["width"]
    #     img_height = img["height"]
    #     img_id = img["id"]
    #     ana_txt_name = os.path.splitext(img['file_name'])[0] + '.txt'  # 对应的txt名字，与jpg一致
    #     with open(os.path.join(ana_path, ana_txt_name), 'w') as f:
    #         for ann in json_data['annotations']:
    #             if ann['image_id'] == img_id:
    #                 f.write("%s" % (id_map[ann["category_id"]]))
    #                 for x in range(0, len(ann["segmentation"][0]), 2):
    #                     yolo_x, yolo_y = convert_segment((img_width, img_height), ann["segmentation"][0][x],
    #                                                           ann["segmentation"][0][x + 1])
    #                     f.write(" %s %s" % (yolo_x, yolo_y))
    #                 f.write("\n")

# 划分目标检测数据集
# 根据annotation文件夹的标签信息划分为
# images/train images/val
# labels/train labels/val
def divide_detect(id):
    # 原始文件路径
    image_original_path = os.path.join(dataset_path, id + '/')
    label_original_path = os.path.join(dataset_path, id, 'annotation/')
    # 训练集路径
    train_image_path = os.path.join(dataset_path, id, 'images/train/')
    train_label_path = os.path.join(dataset_path, id, 'labels/train/')
    # 验证集路径
    val_image_path = os.path.join(dataset_path, id, 'images/val/')
    val_label_path = os.path.join(dataset_path, id, 'labels/val/')
    # 删除之前的文件夹 images labels
    shutil.rmtree(os.path.join(dataset_path, id, 'images'), ignore_errors=True)
    shutil.rmtree(os.path.join(dataset_path, id, 'labels'), ignore_errors=True)
    # 检查文件夹是否存在
    os.makedirs(train_image_path, exist_ok=True)
    os.makedirs(train_label_path, exist_ok=True)
    os.makedirs(val_image_path, exist_ok=True)
    os.makedirs(val_label_path, exist_ok=True)

    # 数据集划分比例 训练集80% 验证集20%
    train_percent = 0.8
    val_percent = 0.2

    total_txt = os.listdir(label_original_path)
    num_txt = len(total_txt)
    list_all_txt = range(num_txt)  # 范围 range(0, num)

    num_train = int(num_txt * train_percent)
    num_val = num_txt - num_train

    train = random.sample(list_all_txt, num_train)
    val = [i for i in list_all_txt if not i in train]

    # 使用线程池并发地复制文件
    with ThreadPoolExecutor(max_workers=10) as executor:
        for i in list_all_txt:
            name = total_txt[i][:-4]

            srcImage = image_original_path + name + '.jpg'
            srcLabel = label_original_path + name + '.txt'

            if i in train:
                dst_train_Image = train_image_path + name + '.jpg'
                dst_train_Label = train_label_path + name + '.txt'
                executor.submit(shutil.copyfile, srcImage, dst_train_Image)
                executor.submit(shutil.copyfile, srcLabel, dst_train_Label)
            elif i in val:
                dst_val_Image = val_image_path + name + '.jpg'
                dst_val_Label = val_label_path + name + '.txt'
                executor.submit(shutil.copyfile, srcImage, dst_val_Image)
                executor.submit(shutil.copyfile, srcLabel, dst_val_Label)

    # for i in list_all_txt:
    #     name = total_txt[i][:-4]
    #
    #     srcImage = image_original_path + name + '.jpg'
    #     srcLabel = label_original_path + name + '.txt'
    #
    #     if i in train:
    #         dst_train_Image = train_image_path + name + '.jpg'
    #         dst_train_Label = train_label_path + name + '.txt'
    #         shutil.copyfile(srcImage, dst_train_Image)
    #         shutil.copyfile(srcLabel, dst_train_Label)
    #     elif i in val:
    #         dst_val_Image = val_image_path + name + '.jpg'
    #         dst_val_Label = val_label_path + name + '.txt'
    #         shutil.copyfile(srcImage, dst_val_Image)
    #         shutil.copyfile(srcLabel, dst_val_Label)

# 划分图像分类数据集
def divide_classify(id):
    # 删除之前的文件夹 train val
    train_dir = os.path.join(dataset_path, id, 'train')
    val_dir = os.path.join(dataset_path, id, 'val')
    if os.path.exists(train_dir):
        shutil.rmtree(train_dir)
    if os.path.exists(val_dir):
        shutil.rmtree(val_dir)

    # 创建目录
    os.makedirs(train_dir)
    os.makedirs(val_dir)

    json_path = os.path.join(dataset_path, id, id + '.json')

    def process_json_obj(item):
        # print(item)
        file_name = item["image"].split('/')[-1]
        label = item["choice"]
        if label not in class_files:
            class_files[label] = []
        class_files[label].append(file_name)

    class_files = {}
    with open(json_path, 'r') as f:
        buffer = ''
        for line in f:
            buffer += line.strip()
            if buffer.endswith('},') or buffer.endswith('}'):
                buffer = buffer.replace('},', '}')
                try:
                    json_obj = json.loads(buffer)
                    process_json_obj(json_obj)
                except json.JSONDecodeError:
                    # JSON格式不正确，忽略当前JSON对象
                    pass
                buffer = ''

    # 使用切片来选择训练和验证集
    for label, image in class_files.items():
        num_image = len(image)
        num_train = int(num_image * 0.8)
        train_image = image[:num_train]
        val_image = image[num_train:]

        for image_list, image_dir in ((train_image, 'train'), (val_image, 'val')):
            class_dir = os.path.join(dataset_path, id, image_dir, label)
            os.makedirs(class_dir, exist_ok=True)

            # 使用线程池并发地复制文件
            with ThreadPoolExecutor(max_workers=10) as executor:
                for file_name in image_list:
                    src_file = os.path.join(dataset_path, id, file_name)
                    dst_file = os.path.join(class_dir, file_name)
                    executor.submit(shutil.copyfile, src_file, dst_file)


    # data = json.load(open(json_path, 'r'))
    #
    # # 数据集划分比例，训练集80%，验证集20%
    # train_percent = 0.8
    # val_percent = 0.2
    #
    # dict = {}
    # for item in data:
    #     file_name = item["image"].split('/')[len(item["image"].split('/')) - 1]
    #     label = item["choice"]
    #     train_path = os.path.join(dataset_path, id, 'train', label)
    #     val_path = os.path.join(dataset_path, id, 'val', label)
    #     if not os.path.exists(train_path):
    #         os.makedirs(train_path)
    #     if not os.path.exists(val_path):
    #         os.makedirs(val_path)
    #     if label not in dict:
    #         dict[label] = []
    #     dict[label].append(file_name)
    #
    # for label, image in dict.items():
    #     num_image = len(image)
    #     list_all_image = range(num_image)  # 范围 range(0, num)
    #     num_train = int(num_image * train_percent)
    #     train = random.sample(list_all_image, num_train)
    #     val = [i for i in list_all_image if not i in train]
    #     for i in list_all_image:
    #         srcImage = os.path.join(dataset_path, id, image[i])
    #         dst_train_Image = os.path.join(dataset_path, id, 'train', label, image[i])
    #         dst_val_Image = os.path.join(dataset_path, id, 'val', label, image[i])
    #
    #         if i in train:
    #             shutil.copyfile(srcImage, dst_train_Image)
    #         elif i in val:
    #             shutil.copyfile(srcImage, dst_val_Image)