# download_model.py
import os
import argparse
from huggingface_hub import snapshot_download
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError

def download_hf_model(model_id: str, target_dir: str, token: str = None, revision: str = None):
    """
    Downloads a model from Hugging Face Hub to a specified target directory.

    Args:
        model_id (str): The ID of the model on Hugging Face Hub (e.g., "bert-base-uncased").
        target_dir (str): The base directory where the model folder will be created.
                          A subfolder named after the model_id (or its last part) will be created here.
        token (str, optional): Hugging Face API token for private models. Defaults to None.
        revision (str, optional): Specific model version (branch, tag, or commit hash). Defaults to 'main'.
    """
    # Create the target base directory if it doesn't exist
    os.makedirs(target_dir, exist_ok=True)

    # Define the full path for the model download
    # snapshot_download will download into a folder named after the repo_id by default if local_dir
    # points to an existing directory and local_dir_use_symlinks is False (or local_dir doesn't exist).
    # For clarity, let's make the specific model path explicit.
    # We'll use the last part of the model_id as the folder name, e.g., "bert-base-uncased" for "bert-base-uncased"
    # or "Llama-2-7b-chat-hf" for "meta-llama/Llama-2-7b-chat-hf"
    model_folder_name = model_id.split("/")[-1]
    model_download_path = os.path.join(target_dir, model_folder_name)

    print(f"Attempting to download model '{model_id}' to '{model_download_path}'...")

    try:
        snapshot_download(
            repo_id=model_id,
            local_dir=model_download_path,
            local_dir_use_symlinks=False,  # Set to False to copy files instead of symlinking
            resume_download=True,
            token=token,
            revision=revision,
            # ignore_patterns=["*.safetensors"], # Example: if you only want pytorch_model.bin
        )
        print(f"Model '{model_id}' downloaded successfully to '{model_download_path}'.")
        print("\nDownloaded files:")
        for root, _, files in os.walk(model_download_path):
            for f_name in files:
                print(os.path.join(root, f_name).replace(model_download_path, f".{os.sep}"))


    except RepositoryNotFoundError:
        print(f"Error: Repository '{model_id}' not found on Hugging Face Hub.")
    except HfHubHTTPError as e:
        print(f"Error downloading '{model_id}': {e}")
        print("This could be due to a typo, a private model requiring a token, or network issues.")
    except Exception as e:
        print(f"An unexpected error occurred: {e}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Download a model from Hugging Face Hub.")
    parser.add_argument(
        "model_id",
        type=str,
        help="The Hugging Face model ID (e.g., 'bert-base-uncased', 'meta-llama/Llama-2-7b-chat-hf')."
    )
    parser.add_argument(
        "target_directory",
        type=str,
        help="The base directory where the model subfolder will be created (e.g., './bert', '/data/models')."
    )
    parser.add_argument(
        "--token",
        type=str,
        default=None,
        help="Optional Hugging Face API token for accessing private models. "
             "You can also login via `huggingface-cli login`."
    )
    parser.add_argument(
        "--revision",
        type=str,
        default=None, # Defaults to 'main' branch
        help="Optional specific model version (branch, tag, or commit hash)."
    )

    args = parser.parse_args()

    # If no token is provided via argument, huggingface_hub will try to use a cached token
    # from `huggingface-cli login`.
    download_hf_model(args.model_id, args.target_directory, args.token, args.revision)