import gradio as gr from huggingface_hub import InferenceClient def format_chat_prompt(message, chat_history,instruction): prompt = f"System:{instruction}" for turn in chat_history: user_message, bot_message = turn prompt = f"{prompt}\nUser: {user_message}\nAssistant:{bot_message}" prompt= f"{prompt}\nUser: {message}\nAssistant:" return prompt def respond(message, chat_history,instruction,model,temperature): if model == "Llama2-Chat": model = "meta-llama/Llama-2-7b-chat-hf" else: model="CultriX/MistralTrix-v1" client = InferenceClient(model=f"{model}",timeout=30) formatted_prompt = format_chat_prompt(message,chat_history,instruction) bot_message = client.text_generation(formatted_prompt, max_new_tokens=1024, stop_sequences=["\nUser:", "<|endoftext|>"], temperature=temperature) chat_history.append((message,bot_message)) return "",chat_history with gr.Blocks() as demo: chatbot = gr.Chatbot(height=240) #visual param msg = gr.Textbox(label='prompt') with gr.Accordion(label="Advanced options",open=False): model = gr.Radio(["MistralAI"]) system = gr.Textbox(label="System message", lines=2, value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.") temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.7, step=0.1) btn = gr.Button("Submit") clear = gr.ClearButton(components=[msg,chatbot], value="Clear console") btn.click(respond, inputs=[msg,chatbot,system,model,temperature], outputs=[msg, chatbot]) msg.submit(respond, inputs=[msg, chatbot,system,model,temperature], outputs=[msg, chatbot]) demo.queue(max_size=20).launch()