|
import gradio as gr |
|
import spaces |
|
import torch |
|
from torch.cuda.amp import autocast |
|
import subprocess |
|
from huggingface_hub import InferenceClient |
|
import os |
|
import psutil |
|
|
|
|
|
|
|
""" |
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
|
""" |
|
|
|
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_and_dispatch |
|
from accelerate import Accelerator |
|
|
|
|
|
subprocess.run( |
|
"pip install psutil", |
|
|
|
shell=True, |
|
) |
|
|
|
import bitsandbytes as bnb |
|
|
|
|
|
|
|
from datetime import datetime |
|
|
|
|
|
subprocess.run( |
|
"pip install flash-attn --no-build-isolation", |
|
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, |
|
shell=True, |
|
) |
|
|
|
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
|
|
|
|
|
|
token=os.getenv('token') |
|
print('token = ',token) |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
|
|
model_id = "microsoft/Phi-3-medium-4k-instruct" |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
|
|
model_id, |
|
|
|
token= token, |
|
trust_remote_code=True) |
|
|
|
|
|
accelerator = Accelerator() |
|
|
|
model = AutoModelForCausalLM.from_pretrained(model_id, token= token, |
|
|
|
torch_dtype=torch.bfloat16, |
|
|
|
|
|
attn_implementation="flash_attention_2", |
|
low_cpu_mem_usage=True, |
|
trust_remote_code=True, |
|
device_map='cuda', |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
model = accelerator.prepare(model) |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
pipe = pipeline( |
|
"text-generation", |
|
model=model, |
|
tokenizer=tokenizer, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
|
def str_to_json(str_obj): |
|
json_obj = json.loads(str_obj) |
|
return json_obj |
|
|
|
|
|
@spaces.GPU(duration=90) |
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
): |
|
|
|
|
|
|
|
messages = [] |
|
json_obj = str_to_json(message) |
|
print(json_obj) |
|
|
|
messages= json_obj |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generation_args = { |
|
"max_new_tokens": max_tokens, |
|
"return_full_text": False, |
|
"temperature": temperature, |
|
"do_sample": False, |
|
} |
|
|
|
output = pipe(messages, **generation_args) |
|
print(output[0]['generated_text']) |
|
gen_text=output[0]['generated_text'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yield gen_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
demo = gr.ChatInterface( |
|
respond, |
|
additional_inputs=[ |
|
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), |
|
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
|
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), |
|
gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
label="Top-p (nucleus sampling)", |
|
), |
|
], |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |