# import gradio as gr # from transformers import BertTokenizer, BartForConditionalGeneration # # # tokenizer = BertTokenizer.from_pretrained("HIT-TMG/dialogue-bart-large-chinese") # model = BartForConditionalGeneration.from_pretrained("HIT-TMG/dialogue-bart-large-chinese") # # # def generate_reponse(history): # history_prefix = "对话历史:" # history = history_prefix + history # # input_ids = tokenizer(history, return_tensors='pt').input_ids # output_ids = model.generate(input_ids)[0] # # return tokenizer.decode(output_ids, skip_special_tokens=True) # # # iface = gr.Interface(fn=generate_reponse, inputs="text", outputs="text") # iface.launch() import gradio as gr from transformers import BertTokenizer, BartForConditionalGeneration title = "标题" description = "描述" tokenizer = BertTokenizer.from_pretrained("HIT-TMG/dialogue-bart-large-chinese") model = BartForConditionalGeneration.from_pretrained("HIT-TMG/dialogue-bart-large-chinese") def chat(history): history_prefix = "对话历史:" history = history_prefix + history input_ids = tokenizer(history, return_tensors='pt').input_ids output_ids = model.generate(input_ids)[0] return tokenizer.decode(output_ids, skip_special_tokens=True) chatbot = gr.Chatbot().style(color_map=("green", "pink")) demo = gr.Interface( chat, inputs=gr.Textbox(lines=2, placeholder="输入你的对话历史"), title=title, description=description, outputs =["text"] ) if __name__ == "__main__": demo.launch()