CMLL's picture
Update app.py
0464b4c verified
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import gradio as gr
# Set the device
device = "cpu" # replace with your device: "cpu", "cuda", "mps"
# Initialize model and tokenizer
peft_model_id = "CMLM/ZhongJing-2-1_8b"
base_model_id = "Qwen/Qwen1.5-1.8B-Chat"
model = AutoModelForCausalLM.from_pretrained(base_model_id, device_map="auto")
model.load_adapter(peft_model_id)
tokenizer = AutoTokenizer.from_pretrained(
"CMLM/ZhongJing-2-1_8b",
padding_side="right",
trust_remote_code=True,
pad_token=''
)
def get_model_response(question):
# Create the prompt without context
prompt = f"Question: {question}"
messages = [
{"role": "system", "content": "You are a helpful TCM medical assistant named 仲景中医大语言模型, created by 医哲未来 of Fudan University."},
{"role": "user", "content": prompt}
]
# Prepare the input
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)
# Generate the response
generated_ids = model.generate(
model_inputs.input_ids,
max_new_tokens=512
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
# Decode the response
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
# Define a Gradio interface without the context parameter
def chat_interface(question):
response = get_model_response(question)
return response
iface = gr.Interface(
fn=chat_interface,
inputs=["text"],
outputs="text",
title="仲景GPT-V2-1.8B",
description="博极医源,精勤不倦。Unlocking the Wisdom of Traditional Chinese Medicine with AI."
)
# Launch the interface with sharing enabled
iface.launch(share=True)