AMKCode's picture
added card
4149fa9
raw
history blame
4.34 kB
import os
import shutil
import subprocess
import signal
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
import gradio as gr
from huggingface_hub import HfApi
from huggingface_hub import ModelCard
from textwrap import dedent
HF_PATH = "https://huggingface.co/"
CONV_TEMPLATES = [
"llama-3",
"llama-3_1",
"chatml",
"chatml_nosystem",
"qwen2",
"open_hermes_mistral",
"neural_hermes_mistral",
"llama_default",
"llama-2",
"mistral_default",
"gpt2",
"codellama_completion",
"codellama_instruct",
"vicuna_v1.1",
"conv_one_shot",
"redpajama_chat",
"rwkv_world",
"rwkv",
"gorilla",
"gorilla-openfunctions-v2",
"guanaco",
"dolly",
"oasst",
"stablelm",
"stablecode_completion",
"stablecode_instruct",
"minigpt",
"moss",
"LM",
"stablelm-3b",
"gpt_bigcode",
"wizardlm_7b",
"wizard_coder_or_math",
"glm",
"custom", # for web-llm only
"phi-2",
"phi-3",
"phi-3-vision",
"stablelm-2",
"gemma_instruction",
"orion",
"llava",
"hermes2_pro_llama3",
"hermes3_llama-3_1",
"tinyllama_v1_0",
"aya-23",
]
QUANTIZATIONS = ["q0f16",
"q0f32",
"q3f16_1",
"q4f16_1",
"q4f32_1",
"q4f16_awq"]
def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None):
if not oauth_token.token:
raise ValueError("Log in to Huggingface to use this")
api = HfApi(token=oauth_token.token)
model_dir_name = hf_model_id.split("/")[1]
mlc_model_name = model_dir_name + "-" + quantization + "-" + "MLC"
os.system("mkdir -p dist/models")
os.system("git lfs install")
api.snapshot_download(repo_id=hf_model_id, local_dir=f"./dist/models/{model_dir_name}")
os.system("mlc_llm convert_weight ./dist/models/" + model_dir_name + "/" + \
" --quantization " + quantization + \
" -o dist/" + mlc_model_name)
os.system("mlc_llm gen_config ./dist/models/" + model_dir_name + "/" + \
" --quantization " + quantization + " --conv-template " + conv_template + \
" -o dist/" + mlc_model_name + "/")
# push to HF
user_name = api.whoami()["name"]
created_repo_url = api.create_repo(repo_id=f"{user_name}/{mlc_model_name}", private=True)
created_repo_id = created_repo_url.repo_id
api.upload_large_folder(folder_path=f"./dist/{mlc_model_name}",
repo_id=f"{user_name}/{mlc_model_name}",
repo_type="model")
# push model card to HF
card = ModelCard.load(hf_model_id, token=oauth_token.token)
if not card.data.tags:
card.data.tags = []
card.data.tags.append("mlc-ai")
card.data.tags.append("MLC-Weight-Conversion")
card.data.base_model = hf_model_id
card.text = dedent(
f"""
# {created_repo_id}
This model was compiled using MLC-LLM with {quantization} quantization from [{hf_model_id}]({HF_PATH}{hf_model_id}).
The conversion was done using the [MLC-Weight-Conversion](https://huggingface.co/spaces/mlc-ai/MLC-Weight-Conversion) space.
To run this model, please first install [MLC-LLM](https://llm.mlc.ai/docs/install/mlc_llm.html#install-mlc-packages).
To chat with the model on your terminal:
```bash
mlc_llm chat HF://{created_repo_id}
```
For more information on how to use MLC-LLM, please visit the MLC-LLM [documentation](https://llm.mlc.ai/docs/index.html).
"""
)
card.save("./dist/README.md")
api.upload_file(path_or_fileobj="./dist/README.md",
path_in_repo="README.md",
repo_id=created_repo_id,
repo_type="model")
os.system("rm -rf dist/")
return "Successful"
with gr.Blocks() as demo:
gr.LoginButton()
model_id = gr.Textbox(label="HF Model ID")
conv = gr.Dropdown(CONV_TEMPLATES, label="Conversation Template")
quant = gr.Dropdown(QUANTIZATIONS, label="Quantization Method")
btn = gr.Button("Convert to MLC")
out = gr.Textbox(label="Conversion Result")
btn.click(fn=button_click , inputs=[model_id, conv, quant], outputs=out)
demo.launch()