claude_front_end_for_api / gradio_openai.py
Omar Solano
fix UI
8c851ec
import os
import logging
import gradio as gr
from openai import OpenAI
from dotenv import load_dotenv
load_dotenv(".env")
logging.basicConfig(level=logging.INFO)
logging.getLogger("gradio").setLevel(logging.INFO)
logging.getLogger("httpx").setLevel(logging.WARNING)
client = OpenAI()
def generate_completion(input, history):
messages = [
{
"role": "system",
"content": "You are a world-class assistant.",
}
]
if history:
for entry in history:
if len(entry) == 2:
messages.append(
{
"role": "user",
"content": entry[0],
}
)
messages.append(
{
"role": "assistant",
"content": entry[1],
}
)
messages.append(
{
"role": "user",
"content": input,
}
)
response = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages, # type: ignore
stream=True,
temperature=0,
max_tokens=4000,
) # type: ignore
answer_str: str = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
answer_str += chunk.choices[0].delta.content
else:
answer_str += ""
yield answer_str
demo = gr.ChatInterface(fn=generate_completion)
if __name__ == "__main__":
demo.queue()
demo.launch()