import os
from torch.utils.data import random_split
import torch

DB_DIR = "/disk527/sdb1/a804_cbf/datasets/lunar_crater/textures"
train_rate = 0.8


def file_list(db_dir, subdir):
    yield from map(
        lambda file: (
            int(file.split("_")[1]),
            int(file.split("_")[2]),
            os.path.join(db_dir, file),
        ),
        os.listdir(db_dir),
    )


def file_datasets(db_dir):
    for subdir in os.listdir(db_dir):
        subdir_files = os.path.join(db_dir, subdir)
        yield from file_list(subdir_files)


if __name__ == "__main__":
    dataset = list(file_datasets(DB_DIR))
    num_len = len(dataset)
    train_len = int(num_len * train_rate)
    val_len = num_len - train_len

    train_dataset, test_dataset = random_split(
        dataset=dataset,
        lengths=[train_len, val_len],
        generator=torch.Generator().manual_seed(0),
    )
    print(f"train_dataset: {len(train_dataset)}")
    print(f"test_dataset: {len(test_dataset)}")

    train_dataset = sorted(train_dataset, key=lambda x: x[0] * 192 + x[1])
    test_dataset = sorted(test_dataset, key=lambda x: x[0] * 192 + x[1])
    with open(os.path.join(DB_DIR, "train.txt"), "w") as f:
        for i, file in enumerate(train_dataset):
            f.write(f"{file[-1]}\n")
    with open(os.path.join(DB_DIR, "val.txt"), "w") as f:
        for i, file in enumerate(test_dataset):
            f.write(f"{file[-1]}\n")
