File size: 2,718 Bytes
743d1bd
04b933e
ec89555
1e3869c
1854dfd
9a692e8
7e5beaf
9c9ed59
04b933e
 
b0a938e
04b933e
b0a938e
04b933e
 
7d03deb
b5cdba7
 
04b933e
 
 
 
 
 
 
 
 
ec89555
 
 
b765dcc
ec89555
04b933e
 
 
 
 
b0a938e
04b933e
 
 
 
 
7d03deb
04b933e
076fc13
04b933e
 
966c09f
fe66a68
 
 
 
 
 
 
 
966c09f
 
 
fe66a68
966c09f
b3fde27
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from huggingface_hub import InferenceClient
import gradio as gr
import datetime

# Initialize the InferenceClient
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"\[INST\] {user_prompt} \[/INST\]"
        prompt += f" {bot_response}</s> "
    prompt += f"\[INST\] {message} \[/INST\]"
    return prompt

def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0):
    temperature = max(float(temperature), 1e-2)
    top_p = float(top_p)
    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    # Get current time
    now = datetime.datetime.now()
    formatted_time = now.strftime("%H.%M.%S, %B, %Y")
    system_prompt = f"server log: ~This message was sent at {formatted_time}. The actual year is 2024.~"

    formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    for response in stream:
        output += response.token.text
    yield output
    return output

additional_inputs = [
    gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
    gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
    gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
    gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
    gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
]

with gr.Blocks(theme=gr.themes.Soft()):
    chatbot = gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
    with gr.Row():
        gr.Chatbot(chatbot)
        with gr.Column():
            for input_component in additional_inputs:
                input_component.render()
    gr.Button("Submit").click(generate, inputs=[chatbot.text_input, chatbot.value, additional_inputs[0]], outputs=[chatbot])
    app = gr.Interface(
        fn=generate,
        title="ConvoLite",
        concurrency_limit=20,
        theme=gr.themes.Soft(),
    )
    app.launch(show_api=False)