import hashlib
import json
import os
import time
import warnings
from typing import Dict, List, Optional

import requests
from huggingface_hub import snapshot_download

warnings.filterwarnings("ignore")
HF_ENDPOINT = "https://hf-mirror.com"
os.environ["HF_ENDPOINT"] = HF_ENDPOINT
os.environ["HF_TOKEN"] = "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"

def process_directory(directory_url: str, prefix: str = "") -> List[Dict]:
    response = requests.get(directory_url, verify=False).json()
    files = []
    for item in response:
        if item["type"] == "file":
            item["path"] = f"{prefix}/{item['path']}".lstrip("/")
            files.append(item)
        elif item["type"] == "directory":
            sub_dir_url = f"{directory_url}/{item['path']}"
            sub_prefix = f"{prefix}/{item['path']}".lstrip("/")
            files.extend(process_directory(sub_dir_url, sub_prefix))
    return files


def get_file_list(
    repo_id: str, model_dir: str, repo_type: Optional[str] = "model"
) -> List[Dict]:
    # 提取
    if repo_type in ["model", "dataset"]:
        main_url = f"{HF_ENDPOINT}/api/{repo_type}s/{repo_id}/tree/main"
    else:
        raise ValueError("repo_type must be 'model' or 'dataset'")
    initial_files = requests.get(main_url, verify=False).json()
    all_files = []
    large_files_links = []
    for item in initial_files:
        if item["type"] == "file":
            all_files.append(item)
        elif item["type"] == "directory":
            directory_url = f"{main_url}/{item['path']}"
            all_files.extend(process_directory(directory_url))
    final_files = []
    for file in all_files:
        file_lfs = file["lfs"]["oid"] if "lfs" in file else ""
        final_files.append(
            {
                "type": file["type"],
                "path": file["path"],
                "oid": file["oid"],
                "lfs": file_lfs,
            }
        )
        if "lfs" in file and file["lfs"]:
            if repo_type == "model":
                # 如果是模型，其下载链接不用加models
                download_url = f"{HF_ENDPOINT}/{repo_id}/resolve/main/{file['path']}"
            elif repo_type == "dataset":
                # 如果是数据集，其下载链接要加datasets
                download_url = (
                    f"{HF_ENDPOINT}/{repo_type}s/{repo_id}/resolve/main/{file['path']}"
                )
            large_files_links.append(download_url)

    check_json = os.path.join(model_dir, "check_lfs.json")
    with open(check_json, "w", encoding="utf-8") as outfile:
        json.dump(final_files, outfile, ensure_ascii=False, indent=4)
    links_txt = os.path.join(model_dir, "large_files_download_links.txt")
    with open(links_txt, "w", encoding="utf-8") as linkfile:
        linkfile.write("\n".join(large_files_links))
    print(f"===>大文件下载链接见文件: {links_txt}\n请使用迅雷进行下载，速度更快。")
    return final_files


def sha256_checksum(file_path, expected_sha256):
    """计算并校验文件的SHA256值"""
    sha256_hash = hashlib.sha256()
    with open(file_path, "rb") as f:
        for byte_block in iter(lambda: f.read(4096), b""):
            sha256_hash.update(byte_block)
    return sha256_hash.hexdigest() == expected_sha256


def check_download_file(model_dir: str, check_large_files_only: bool = False):
    check_json = os.path.join(model_dir, "check_lfs.json")
    if os.path.exists(check_json):
        with open(check_json, "r", encoding="utf-8") as infile:
            file_list = json.load(infile)
        print(f"在本地找到{len(file_list)}个文件")
    else:
        file_list = get_file_list(repo_id, model_dir)
        print(f"未在本地找到check_lfs.json, 尝试请求获取{len(file_list)}个文件")
    if check_large_files_only:
        print("仅检查大文件，需要点时间，请耐心等待哟~")
    error_file = []
    for file in file_list:
        # 因为check_lfs.json的路径是linux，有时候检查文件是windows,使用normpath来兼容不同的操作系统
        file_path = os.path.normpath(os.path.join(model_dir, file["path"]))
        try:
            if "lfs" in file and file["lfs"]:
                res = sha256_checksum(file_path, file["lfs"])
                if not res:
                    error_file.append(file["path"])
                    print(
                        f"错误! 大文件{file_path}完整性校验不通过，文件已被篡改或损坏"
                    )
                else:
                    print(f"通过! 大文件{file_path}完整性校验通过!")
            else:
                if not check_large_files_only:
                    if not os.path.exists(file_path):
                        print(f"错误! 小文件{file_path}不存在，请检查")
                    else:
                        print(f"通过! 小文件{file_path}存在，")
        except Exception as e:
            print(f"处理文件{file_path}时遇到错误:{str(e)}")
            error_file.append(file["path"])
    if error_file:
        print(f"\n\n错误文件列表(共{len(error_file)}个文件存在存储错误:\n{error_file})")
    return error_file

def download_small_model(
    repo_id, model_dir=None, repo_type="model", max_retries=3, retry_delay=5
):
    """带有重试机制的下载模型快照，忽略指定类型的文件"""
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    attempt = 0
    success = False
    download_path = None

    while attempt < max_retries:
        try:
            print(f"正在尝试下载（尝试次数：{attempt + 1}/{max_retries}）...")
            # 使用 ignore_patterns 参数忽略特定类型的文件
            download_path = snapshot_download(
                repo_id=repo_id,
                repo_type=repo_type,
                local_dir=model_dir,
                local_dir_use_symlinks=False,
                ignore_patterns=[
                    "*.pt",
                    "*.safetensors",
                    "*.bin",
                    "*.gguf",
                    "*.tar.gz",
                ],
                max_workers=16,
            )
            success = True  # 标记下载成功
            break  # 成功后退出循环
        except Exception as e:
            print(f"下载失败：{str(e)}. 等待 {retry_delay} 秒后重试...")
            time.sleep(retry_delay)
            attempt += 1

    if success:
        print(f"模型已成功下载到 {download_path}")
        return download_path
    else:
        print("超出最大重试次数，下载失败")
        raise Exception("下载失败，请检查网络或模型仓库是否可用")


if __name__ == "__main__":
    base_dir = "/d/HF_HOME"
    # # 下载模型
    # repo_id = "BAAI/bge-large-zh-v1.5"  # 直接复制页面上的repo_id
    repo_id = "Qwen/Qwen2.5-7B-Instruct"
    repo_type = "model"  # 下载模型用model

    # # # 下载数据集示例
    # repo_id = "UCSC-VLAA/MedTrinity-25M"
    # repo_type = "dataset" # 下载数据集用dataset

    model_dir = repo_id.split("/")[-1]
    model_dir = os.path.join(base_dir, repo_id.split("/")[-1])
    # 使用normpath()函数来处理路径，确保路径的格式正确，比如在windows和linux互切
    model_dir = os.path.normpath(model_dir)
    os.makedirs(model_dir, exist_ok=True)
    # 获取文件列表
    get_file_list(repo_id, model_dir, repo_type)
    download_small_model(repo_id, model_dir, repo_type)
    check_download_file(model_dir, check_large_files_only=True)
