import os, json, random
from shutil import copy, rmtree
# 浮点运算量计算
from torchstat import stat
from PIL import Image

def load_dataset_message(dataset_path):
    # ------------------------------------------------- #
    # 输入路径示例
    # dataset_path = ROOT/dataset/Flower/train
    # dataset_path = ROOT/dataset/Flower/val
    # ------------------------------------------------- #
    """
    加载数据集文件以及标签信息
    :param dataset_path: 数据集目录
    :return: images_location, labels
    """
    # 获取类别目录信息
    path_message = os.listdir(dataset_path)

    # 生成json文件记录标签
    classes_json = {}
    with open(os.path.join(os.path.split(os.path.abspath(__file__))[0], "../classes.json"), "w") as f:
        for index, item in enumerate(path_message):
            classes_json.update({index: item})
        f.write(json.dumps(classes_json))
    f.close()

    # 依次获取文件信息
    images_messages = []
    labels_messages = []
    for index, item in enumerate(path_message):
        # 储存文件名信息
        current_file_message = []
        for fileName in os.listdir(os.path.join(dataset_path, item)):
            current_file_message.append(os.path.join(dataset_path, item, fileName))

        # 根据文件长度记录label信息
        current_file_labels = [index] * len(current_file_message)
        images_messages.extend(current_file_message)
        labels_messages.extend(current_file_labels)

    return images_messages, labels_messages

def split_dataset(dataset_path):
    """
    数据集分割方法
    :param dataset_path: 源数据集目录
    :return:
    """
    # 指定解压文件夹
    dataset_root = os.path.join(os.getcwd(), dataset_path)
    # 获取花的类别
    classes = []
    for cla in os.listdir(dataset_root):
        if os.path.isdir(os.path.join(dataset_root, cla)):
            classes.append(cla)

    # 建立保存训练集的文件夹
    train_root = os.path.join(dataset_root, "../train")
    if os.path.exists(train_root):
        rmtree(train_root)
        os.makedirs(train_root)

    # 给每个类别都建立训练集下的文件夹
    for cla in classes:
        os.makedirs(os.path.join(train_root, cla))

    # 建立保存验证集的文件夹
    validate_root = os.path.join(dataset_root, "../val")
    if os.path.exists(validate_root):
        rmtree(validate_root)
        os.makedirs(validate_root)
    # 验证集分类文件夹建立
    for cla in classes:
        os.makedirs(os.path.join(validate_root, cla))

    for cla in classes:
        class_path = os.path.join(dataset_root, cla)
        images = os.listdir(class_path)
        num = len(images)
        # 按照9:1的比例随机进行数据分割
        validation_set_amounts = random.sample(images, int(num * 0.1))
        for index, image in enumerate(images):
            if image in validation_set_amounts:
                # 将分配至验证集中的文件复制到相应目录
                image_path = os.path.join(class_path, image)
                validation_path = os.path.join(validate_root, cla)
                im = Image.open(image_path)
                im = im.convert('RGB')
                im.save(os.path.join(validation_path, image.replace("png", "jpg")), quality=95)
            else:
                # 将分配至训练集中的文件复制到相应目录
                # new_image = image.replace("png", "jpg")
                image_path = os.path.join(class_path, image)
                new_path = os.path.join(train_root, cla)
                im = Image.open(image_path)
                im = im.convert('RGB')
                im.save(os.path.join(new_path, image.replace("png", "jpg")), quality=95)
            print("\r[{}] 数据集分割中... [{}/{}]".format(cla, index + 1, num), end="")  # processing bar
        print()

    print("分割完成！")


def get_model_flops_args(model, x):
    # 总得参数信息
    stat(model, x)

if __name__ == '__main__':
    split_dataset(os.path.join(os.getcwd(), "../dataset/Lung"))

