Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from peft import PeftModel | |
base_model_name = "hfl/llama-3-chinese-8b-instruct-v3" | |
adapter_model_name = "tiankuo1111/LLAMA3-TEST" | |
# 加载 tokenizer | |
tokenizer = AutoTokenizer.from_pretrained(base_model_name) | |
# 加载基础模型到 CPU | |
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, torch_dtype=torch.float32, device_map=None) | |
# 加载 LoRA 适配器 | |
model = PeftModel.from_pretrained(base_model, adapter_model_name) | |
# 运行测试 | |
def chat_with_model(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") | |
with torch.no_grad(): | |
output = model.generate(**inputs, max_new_tokens=100) | |
return tokenizer.decode(output[0], skip_special_tokens=True) | |
iface = gr.Interface(fn=chat_with_model, inputs="text", outputs="text", title="LoRA Model Chatbot") | |
iface.launch() | |