import gradio as gr | |
from transformers import AutoTokenizer, AutoModel | |
def chatglm2(q): | |
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) | |
model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cpu') | |
model = model.eval() | |
response, history = model.chat(tokenizer, q) | |
return response | |
page = gr.Interface(fn=chatglm2, inputs=[gr.Text(label="请提问")],outputs=gr.Label(label="回答:")) | |
page.launch() |