File size: 1,841 Bytes
5e92b5c
15ca90e
5e92b5c
47f529b
 
 
 
 
 
 
 
ed88d89
47f529b
 
 
369aa95
47f529b
d12881f
47f529b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ed88d89
 
47f529b
f1fe436
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from huggingface_hub import InferenceClient

def format_chat_prompt(message, chat_history,instruction):
    prompt = f"System:{instruction}"
    for turn in chat_history:
        user_message, bot_message = turn
        prompt = f"{prompt}\nUser: {user_message}\nAssistant:{bot_message}"
    prompt= f"{prompt}\nUser: {message}\nAssistant:"
    return prompt

def respond(message, chat_history,instruction,model,temperature):
    if model == "Llama2-Chat":
        model = "meta-llama/Llama-2-7b-chat-hf"
    else:
        model="CultriX/MistralTrix-v1"
    
    client = InferenceClient(model=f"{model}",timeout=30)
    formatted_prompt = format_chat_prompt(message,chat_history,instruction)
    bot_message = client.text_generation(formatted_prompt, max_new_tokens=1024,
                                         stop_sequences=["\nUser:", "<|endoftext|>"],
                                         temperature=temperature)
    chat_history.append((message,bot_message))
    return "",chat_history

with gr.Blocks() as demo:
    chatbot = gr.Chatbot(height=240) #visual param
    msg = gr.Textbox(label='prompt')
    with gr.Accordion(label="Advanced options",open=False):
        model = gr.Radio(["MistralAI"])
        system = gr.Textbox(label="System message", lines=2, value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.")
        temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.7, step=0.1)
    btn = gr.Button("Submit")
    clear = gr.ClearButton(components=[msg,chatbot], value="Clear console")

    btn.click(respond, inputs=[msg,chatbot,system,model,temperature], outputs=[msg, chatbot])
    msg.submit(respond, inputs=[msg, chatbot,system,model,temperature], outputs=[msg, chatbot])

demo.queue(max_size=20).launch()