# scripts/download_all.py
import os
import argparse
from datasets import load_dataset

def save_to_txt(dataset, field_name, output_path):
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, "w", encoding="utf-8") as f:
        for item in dataset:
            text = item.get(field_name, "").strip()
            if text:
                f.write(text + "\n\n")

def download_bookcorpus(output_path):
    print("📚 Downloading BookCorpus...")
    ds = load_dataset("bookcorpus", split="train", trust_remote_code=True)
    save_to_txt(ds, "text", output_path)

def download_wikipedia(output_path, lang="en"):
    print(f"📘 Downloading Wikipedia ({lang})...")
    ds = load_dataset("wikipedia", f"20220301.{lang}", split="train", trust_remote_code=True)
    save_to_txt(ds, "text", output_path)

def download_openwebtext(output_path):
    print("🌐 Downloading OpenWebText...")
    ds = load_dataset("openwebtext", split="train")
    save_to_txt(ds, "text", output_path)

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--datasets", nargs="+", default=["bookcorpus"], 
                        help="List of datasets to download. Options: bookcorpus, wikipedia, openwebtext")
    parser.add_argument("--output_dir", type=str, default="data/raw", help="Where to save the raw text files.")
    args = parser.parse_args()

    for ds in args.datasets:
        out_path = os.path.join(args.output_dir, f"{ds}.txt")
        if ds == "bookcorpus":
            download_bookcorpus(out_path)
        elif ds == "wikipedia":
            download_wikipedia(out_path)
        elif ds == "openwebtext":
            download_openwebtext(out_path)
        else:
            print(f"❌ Unsupported dataset: {ds}")

if __name__ == "__main__":
    main()
