#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/01 19:01
# @Author  : JasonLiu
# @File    : huggingface_model_download.py

import os
import requests
from tqdm import tqdm

os.environ["http_proxy"] = "proxy.nioint.com:8080"
os.environ["https_proxy"] = "proxy.nioint.com:8080"

proxies = {"http": "proxy.nioint.com:8080",
           "https": "proxy.nioint.com:8080", }

# 设置要下载的模型名称
# model_name = "meta-llama/Llama-2-13b-hf"
# model_name = "meta-llama/Llama-2-7b-chat-hf"
model_dir = "/home/shilin.zhuang.o/asr"
model_name = "openai/whisper-large-v3"

# 获取模型文件列表
api_url = f"https://huggingface.co/api/models/{model_name}"
response = requests.get(api_url)
model_info = response.json()
file_list = [file["rfilename"] for file in model_info["siblings"]]
print("file_list=", file_list)
# 创建目录来保存模型文件
os.makedirs(model_name, exist_ok=True)

# 下载每个文件
# 可能中间因为网络原因断开，此时只需要跳过或者指定目录文件进行下载
skip_list = ["model.fp32-00001-of-00002.safetensors", "model.fp32-00002-of-00002.safetensors", "model.safetensors.index.fp32.json"]
need_list = ["model-00001-of-00007.safetensors", "model-00002-of-00007.safetensors"]
for file_name in file_list:
    if file_name in skip_list:
        continue
    # if file_name not in need_list:
    #     continue
    print("start download file_name=", file_name)
    url = f"https://huggingface.co/{model_name}/resolve/main/{file_name}"
    with requests.get(url, stream=True) as response:
        total_size = int(response.headers.get("content-length", 0))
        block_size = 1024
        progress_bar = tqdm(total=total_size, unit="iB", unit_scale=True)
        model_file_name = os.path.join(model_dir, model_name)
        temp_file_name = os.path.join(model_file_name, file_name)
        os.makedirs(os.path.dirname(temp_file_name), exist_ok=True)
        with open(temp_file_name, "wb") as f:
            for data in response.iter_content(block_size):
                progress_bar.update(len(data))
                f.write(data)
        progress_bar.close()


