
import os
import argparse
import random
from pathlib import Path

ALLOWED_IMG_EXT = {".png", ".jpg", ".jpeg", ".tif", ".tiff", ".bmp"}

def find_pairs(split_dir: Path, img_dir_name="img", label_dir_name="label_bin"):
    """
    Return list of (img_rel, label_rel) relative to dataset_dir.
    Expects structure: <dataset_dir>/<split>/{img,label}/<filename>.<ext>
    """
    img_dir = split_dir / img_dir_name
    label_dir = split_dir / label_dir_name
    if not img_dir.exists():
        raise FileNotFoundError(f"Images dir not found: {img_dir}")
    if not label_dir.exists():
        raise FileNotFoundError(f"Labels dir not found: {label_dir}")

    # Map label stems to path (support multiple extensions)
    label_map = {}
    for p in label_dir.rglob("*"):
        if p.is_file() and p.suffix.lower() in ALLOWED_IMG_EXT:
            label_map[p.stem] = p

    pairs = []
    for ip in img_dir.rglob("*"):
        if ip.is_file() and ip.suffix.lower() in ALLOWED_IMG_EXT:
            stem = ip.stem
            lp = label_map.get(stem, None)
            if lp is None:
                continue
            pairs.append((ip, lp))
    return pairs

def norm_relpaths(pairs, dataset_dir: Path):
    """normalize to forward-slash relative paths rooted at dataset_dir"""
    rel = []
    for ip, lp in pairs:
        ir = ip.relative_to(dataset_dir).as_posix()
        lr = lp.relative_to(dataset_dir).as_posix()
        rel.append((ir, lr))
    return rel

def write_list(lines, out_path: Path):
    out_path.parent.mkdir(parents=True, exist_ok=True)
    with open(out_path, "w", encoding="utf-8") as f:
        for a,b in lines:
            f.write(f"{a} {b}\n")

def main():
    ap = argparse.ArgumentParser(description="Prepare semi-supervised list files from a fully supervised dataset.")
    ap.add_argument("--dataset-dir", default="D:/document/post/datasets/GF7/Processed/Chongqing", help="Path to dataset root (e.g., .../Massachusetts-building/Massachusetts)")
    ap.add_argument("--img-dir-name", default="image", help="Name of image subdir inside each split (default: img)")
    ap.add_argument("--label-dir-name", default="label", help="Name of label subdir inside each split (default: label)")
    ap.add_argument("--percents", type=int, nargs="+", default=[1, 2, 5, 10], help="Labeled percentages to generate, e.g., --percents 1 5 10 20")
    ap.add_argument("--seed", type=int, default=42, help="Random seed for labeled sampling")
    args = ap.parse_args()

    dataset_dir = Path(args.dataset_dir)
    if not dataset_dir.exists():
        raise FileNotFoundError(f"Dataset dir not found: {dataset_dir}")

    splits = {}
    for split in ["train", "val", "test"]:
        split_dir = dataset_dir / split
        if not split_dir.exists():
            raise FileNotFoundError(f"Split folder not found: {split_dir}")
        pairs = find_pairs(split_dir, args.img_dir_name, args.label_dir_name)
        pairs_rel = norm_relpaths(pairs, dataset_dir)
        pairs_rel.sort()
        splits[split] = pairs_rel

    train_pairs = splits["train"]
    val_pairs   = splits["val"]
    test_pairs  = splits["test"]

    # Always (re)write val.txt and test.txt
    lists_dir = dataset_dir / "lists"
    write_list(val_pairs,  lists_dir / "val.txt")
    write_list(test_pairs, lists_dir / "test.txt")

    # For each percent, sample labeled subset from train; the rest go to unlabeled
    rng = random.Random(args.seed)
    indices = list(range(len(train_pairs)))
    rng.shuffle(indices)

    for p in args.percents:
        assert 0 < p <= 100, "percent must be in (0,100]"
        k = max(1, round(len(train_pairs) * p / 100.0))
        labeled_idx   = set(indices[:k])
        labeled_pairs = [train_pairs[i] for i in range(len(train_pairs)) if i in labeled_idx]
        unlabeled_pairs = [train_pairs[i] for i in range(len(train_pairs)) if i not in labeled_idx]

        # Filenames follow the project's default convention
        l_path = lists_dir / f"train_{p}%_labeled.txt"
        u_path = lists_dir / f"train_{p}%_unlabeled.txt"
        write_list(labeled_pairs, l_path)
        write_list(unlabeled_pairs, u_path)

        print(f"[{p}%] train: {len(train_pairs)} -> labeled={len(labeled_pairs)}, unlabeled={len(unlabeled_pairs)}")
        print(f"  wrote: {l_path}")
        print(f"  wrote: {u_path}")

    print(f"Also wrote: {lists_dir/'val.txt'} and {lists_dir/'test.txt'}")
    print("Done.")

if __name__ == "__main__":
    main()
