Spaces:
Sleeping
Sleeping
import gradio as gr | |
import openai | |
# Initialize the OpenAI client with your proxy API | |
client = openai.OpenAI( | |
api_key= openai_api_key, | |
base_url="https://aigptx.top/" | |
) | |
# Function to handle predictions | |
def predict(inputs, top_p, temperature, openai_api_key, system_prompt, chat_counter, chatbot=[], history=[]): | |
# Build the system prompt if provided | |
messages = [] | |
if system_prompt: | |
messages.append({"role": "system", "content": system_prompt}) | |
# Add previous conversation history | |
if chat_counter != 0: | |
for data in chatbot: | |
messages.append({"role": "user", "content": data[0]}) | |
messages.append({"role": "assistant", "content": data[1]}) | |
# Add the current user input to the messages | |
messages.append({"role": "user", "content": inputs}) | |
payload = { | |
"model": "gpt-3.5-turbo", | |
"messages": messages, | |
"temperature": temperature, | |
"top_p": top_p, | |
"n": 1, | |
"stream": True, | |
"presence_penalty": 0, | |
"frequency_penalty": 0, | |
} | |
# Set the chat counter | |
chat_counter += 1 | |
history.append(inputs) | |
# Using the proxy API to get the response | |
response = client.Completions.create( | |
model=payload["model"], | |
messages=payload["messages"], | |
temperature=payload["temperature"], | |
top_p=payload["top_p"], | |
stream=payload["stream"], | |
presence_penalty=payload["presence_penalty"], | |
frequency_penalty=payload["frequency_penalty"] | |
) | |
token_counter = 0 | |
partial_words = "" | |
for chunk in response: | |
if 'choices' in chunk: | |
delta = chunk['choices'][0]['delta'] | |
if 'content' in delta: | |
partial_words += delta['content'] | |
if token_counter == 0: | |
history.append(" " + partial_words) | |
else: | |
history[-1] = partial_words | |
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)] | |
token_counter += 1 | |
yield chat, history, chat_counter | |
# Function to reset the textbox | |
def reset_textbox(): | |
return gr.update(value='') | |
# UI Components | |
title = """<h1 align="center">Customizable Chatbot with OpenAI API</h1>""" | |
description = """ | |
Explore the outputs of a GPT-3.5 model, with the ability to customize system prompts, enter your OpenAI API key, and interact with a history of conversation logs. | |
""" | |
with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;} | |
#chatbot {height: 520px; overflow: auto;}""") as demo: | |
gr.HTML(title) | |
with gr.Column(elem_id="col_container"): | |
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here") | |
system_prompt = gr.Textbox(placeholder="Enter system prompt (optional)", label="System Prompt", lines=2) | |
chatbot = gr.Chatbot(elem_id='chatbot') | |
inputs = gr.Textbox(placeholder="Type your message here!", label="Input", lines=1) | |
send_btn = gr.Button("Send") | |
state = gr.State([]) | |
chat_counter = gr.Number(value=0, visible=False, precision=0) | |
reset_btn = gr.Button("Reset Chat") | |
# Input parameters for OpenAI API | |
with gr.Accordion("Model Parameters", open=False): | |
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (Nucleus Sampling)") | |
temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature") | |
# Submit input for model prediction with the send button | |
send_btn.click(predict, [inputs, top_p, temperature, openai_api_key, system_prompt, chat_counter, chatbot, state], | |
[chatbot, state, chat_counter]) | |
reset_btn.click(reset_textbox, [], [inputs]) | |
inputs.submit(reset_textbox, [], [inputs]) | |
demo.queue().launch(debug=True) | |