from modelscope.hub.api import HubApi
import os
import json

# Install
"""
apt install git-lfs
pip install modelscope pyyaml fire huggingface_hub
"""
# Usage
"""
export HF_ENDPOINT=https://hf-mirror.com
python3 upload.py \
        --hf_name=neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8 \
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d 
python3 upload.py \
        --hf_name=neuralmagic/DeepSeek-Coder-V2-Lite-Base-FP8 \
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d 
python3 upload.py \
        --hf_name=neuralmagic/Meta-Llama-3.1-8B-FP8 \
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d 
python3 upload.py \
        --hf_name=neuralmagic/Meta-Llama-3.1-70B-FP8 \
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d 
python3 upload.py \
        --hf_name=neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8\
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d 
python3 upload.py \
        --hf_name=neuralmagic/pixtral-12b-FP8-dynamic\
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d 

python3 upload.py \
        --hf_name=Qwen2.5-Coder-32B-GPTQ-Int4\
        --modelscope_token=cda1d053-5c2a-4155-b87a-966ff5eb919d
"""


def run(hf_name="neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8",
        modelscope_token="cda1d053-5c2a-4155-b87a-966ff5eb919d",
        modelscope_user="jackle"):
    # 获取path中的模型名字
    model_name = hf_name.split("/")[-1]
    # os.system(
    #     f"huggingface-cli download --resume-download {hf_name} --local-dir {model_name}")

    MODEL_ID = f"{modelscope_user}/{model_name}"
    LOCAL_MODEL_DIR = model_name

    configuration = os.path.join(LOCAL_MODEL_DIR, "configuration.json")
    with open(configuration, "w") as f:
        f.write(json.dumps({"framework": "Pytorch", "task": "other"}))

    api = HubApi()
    api.login(modelscope_token)
    api.push_model(
        model_id=MODEL_ID,
        model_dir=LOCAL_MODEL_DIR
    )


if __name__ == "__main__":
    from fire import Fire
    Fire(run)