import gradio as gr import torch import random import time from transformers import pipeline generator = pipeline( 'text-generation', model="heegyu/bluechat-v0", device="cuda:0" if torch.cuda.is_available() else 'cpu' ) def query(message, chat_history, max_turn=4): prompt = [] if len(chat_history) > max_turn: chat_history = chat_history[-max_turn:] for user, bot in chat_history: prompt.append(f" {user}") prompt.append(f" {bot}") prompt.append(f" {message}") prompt = "\n".join(prompt) + "\n" output = generator( prompt, # repetition_penalty=1.3, # no_repeat_ngram_size=2, eos_token_id=2, # \n max_new_tokens=128, do_sample=True, top_p=0.9, )[0]['generated_text'] print(output) response = output[len(prompt):] return response.strip() with gr.Blocks() as demo: chatbot = gr.Chatbot() msg = gr.Textbox() clear = gr.Button("Clear") def respond(message, chat_history): bot_message = query(message, chat_history) #random.choice(["How are you?", "I love you", "I'm very hungry"]) chat_history.append((message, bot_message)) # time.sleep(1) return "", chat_history msg.submit(respond, [msg, chatbot], [msg, chatbot]) clear.click(lambda: None, None, chatbot, queue=False) demo.launch()