import os
import requests
from tqdm import tqdm
import json
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed, TimeoutError
import threading
import time

HF_API = "https://hf-mirror.com/api/models"
REPO_URL = "https://hf-mirror.com/{repo}/tree/main"
RAW_URL = "https://hf-mirror.com/{repo}/resolve/main/{path}"

def get_repo_files(repo_name):
    """获取仓库文件列表"""
    url = f"{HF_API}/{repo_name}"
    try:
        response = requests.get(url)
        response.raise_for_status()
        return response.json()
    except Exception as e:
        print(f"获取仓库 {repo_name} 文件列表失败: {str(e)}")
        return None

def download_file(url, save_path, timeout=None):
    """下载单个文件并显示进度
    Args:
        url: 文件下载URL
        save_path: 本地保存路径
        timeout: 超时时间(秒)，None表示根据文件大小自动计算
    """
    try:
        # 预请求获取文件大小
        head_resp = requests.head(url, allow_redirects=True)
        file_size = int(head_resp.headers.get('content-length', 0))
        
        # 动态计算超时时间(每MB 5秒，最低300秒)
        if timeout is None:
            timeout = max(300, (file_size // (1024*1024)) * 5)
            
        # 设置超时
        response = requests.get(url, stream=True, timeout=(10, timeout))  # 连接超时10秒，读取超时动态
        response.raise_for_status()
        
        total_size = int(response.headers.get('content-length', 0))
        block_size = 1024 * 1024  # 1MB
        
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        
        # 创建进度条
        with threading.Lock():
            bar = tqdm(
                desc=os.path.basename(save_path),
                total=total_size,
                unit='B',
                unit_scale=True,
                unit_divisor=1024,
                leave=False
            )
        
        start_time = time.time()
        with open(save_path, 'wb') as f:
            for data in response.iter_content(block_size):
                # 检查超时
                if time.time() - start_time > timeout:
                    raise TimeoutError(f"下载超时({timeout}秒)")
                f.write(data)
                with threading.Lock():
                    bar.update(len(data))
        
        with threading.Lock():
            bar.close()
        return True
    except Exception as e:
        with threading.Lock():
            print(f"下载 {url} 失败: {str(e)}")
        return False

def get_all_download_tasks(repos, output_dir=None):
    """获取所有下载任务"""
    if output_dir is None:
        output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
        
    all_tasks = []
    for repo_name in repos:
        model_name = repo_name.split('/')[-1]
        repo_dir = os.path.join(output_dir, model_name)
        
        files = get_repo_files(repo_name)
        if not files:
            continue
            
        for file_info in files.get('siblings', []):
            file_path = file_info['rfilename']
            if file_path.startswith('.git/'):
                continue
                
            save_path = os.path.join(repo_dir, file_path)
            if not os.path.exists(save_path):
                url = RAW_URL.format(repo=repo_name, path=file_path)
                all_tasks.append((url, save_path))
                
    return all_tasks

def main():
    repos = [
        "mit-han-lab/svdq-fp4-flux.1-dev",
        # "mit-han-lab/svdq-int4-flux.1-schnell",
        # "mit-han-lab/svdq-int4-flux.1-dev",
        # "mit-han-lab/svdq-int4-flux.1-canny-dev",
        # "mit-han-lab/svdq-int4-flux.1-fill-dev",
        # "mit-han-lab/svdq-int4-flux.1-depth-dev",
        # "mit-han-lab/svdq-flux.1-schnell-pix2pix-turbo",
        # "mit-han-lab/svdq-int4-shuttle-jaguar",
        # "mit-han-lab/svdq-int4-sana-1600m"
    ]
    
    print("开始下载MIT-HAN-LAB模型仓库...")
    all_tasks = get_all_download_tasks(repos)
    print(f"总下载文件数: {len(all_tasks)}")
    
    # 使用线程池处理下载任务，限制并发数避免带宽竞争
    success = True
    retry_count = 3  # 每个文件最大重试次数
    
    with ThreadPoolExecutor(max_workers=4) as executor:  # 减少并发数提高稳定性
        futures = []
        for url, save_path in all_tasks:
            # 带重试的下载任务
            for attempt in range(retry_count):
                future = executor.submit(download_file, url, save_path)
                futures.append(future)
                if future.result():  # 如果成功则跳出重试循环
                    break
                print(f"第{attempt+1}次重试下载: {os.path.basename(save_path)}")
                  
        for future in as_completed(futures):
            if not future.result():
                success = False
    
    if success:
        print("\n所有文件下载完成!")
    else:
        print("\n部分文件下载失败!")
if __name__ == "__main__":
    main()