| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| OpenAI.api_key = os.getenv("OPENAI_API_KEY") | |
| api_key = os.getenv("OPENAI_API_KEY") | |
| client = OpenAI(api_key=api_key) | |
| def predict(message, history): | |
| history_openai_format = [] | |
| for human, assistant in history: | |
| history_openai_format.append({"role": "user", "content": human}) | |
| history_openai_format.append({"role": "assistant", "content": assistant}) | |
| history_openai_format.append({"role": "user", "content": message}) | |
| response = client.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=history_openai_format, | |
| temperature=1.0, | |
| stream=True, | |
| ) | |
| partial_message = "" | |
| for chunk in response: | |
| if chunk.choices[0].delta.content is not None: | |
| partial_message = partial_message + chunk.choices[0].delta.content | |
| yield partial_message | |
| chat = gr.ChatInterface(predict, fill_height=True) | |