File size: 1,420 Bytes
0a0f8d3
 
6f91c68
 
0a0f8d3
1304134
0a0f8d3
 
 
1304134
0a0f8d3
 
 
 
6f91c68
0a0f8d3
 
6f91c68
0a0f8d3
 
 
 
 
 
 
 
6f91c68
0a0f8d3
 
 
6f91c68
0a0f8d3
 
 
1304134
0a0f8d3
6f91c68
0a0f8d3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from llama_index.llms.ollama import Ollama
from llama_index.core.llms import ChatMessage
import gradio as gr

llm = Ollama(model="llama3", request_timeout=120.0)

def get_completion(prompt):
    response = llm.complete(prompt)
    return response

def chat_with_llm(messages):
    chat_messages = [ChatMessage(role=msg["role"], content=msg["content"]) for msg in messages]
    response = llm.chat(chat_messages)
    return response

def generate_response(prompt):
    return get_completion(prompt)

def generate_chat_response(history):
    messages = [{"role": "system", "content": "You are a pirate with a colorful personality"}]
    for item in history:
        messages.append({"role": "user", "content": item[0]})
        if item[1]:
            messages.append({"role": "assistant", "content": item[1]})
    response = chat_with_llm(messages)
    return response["content"]

single_input = gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here...")
single_output = gr.outputs.Textbox()
single_interface = gr.Interface(fn=generate_response, inputs=single_input, outputs=single_output, title="LLM Single Completion")

chat_input = gr.inputs.Chatbot()
chat_output = gr.outputs.Textbox()
chat_interface = gr.Interface(fn=generate_chat_response, inputs=chat_input, outputs=chat_output, title="LLM Chat")

app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"])

app.launch()