Spaces:
Sleeping
Sleeping
File size: 1,020 Bytes
c4ef1ac c6e1096 c4ef1ac c6e1096 c4ef1ac c6e1096 c4ef1ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from openai import OpenAI
import gradio as gr
import os
import dotenv
dotenv.load_dotenv()
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
print(OPENAI_API_KEY)
client = OpenAI(api_key=OPENAI_API_KEY)
def predict(message, history):
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human })
history_openai_format.append({"role": "assistant", "content":assistant})
history_openai_format.append({"role": "user", "content": message})
response = client.chat.completions.create(
# model='gpt-3.5-turbo',
# model="gpt-4-turbo",
model="gpt-4o",
messages= history_openai_format,
temperature=1.0,
stream=True
)
partial_message = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
partial_message = partial_message + chunk.choices[0].delta.content
yield partial_message
gr.ChatInterface(predict).launch() |