gpt-35-turbo / app.py
freddyaboulton's picture
Update app.py
281d8fb
import gradio as gr
import os
import openai
import gradio as gr
from gradio import ChatInterface
import time
# Get the value of the openai_api_key from environment variable
openai.api_key = os.getenv("OPENAI_API_KEY")
# Import things that are needed generically from langchain
from langchain import LLMMathChain, SerpAPIWrapper
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool, StructuredTool, Tool, tool
from langchain.tools import MoveFileTool, format_tool_to_openai_function
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.utilities import WikipediaAPIWrapper
from langchain.tools import AIPluginTool
def predict(inputs, chatbot):
messages = []
for conv in chatbot:
user = conv[0]
messages.append({"role": "user", "content":user })
assistant = conv[1]
messages.append({"role": "assistant", "content":assistant})
messages.append({"role": "user", "content": inputs})
# a ChatCompletion request
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages= messages, # example : [{'role': 'user', 'content': "What is life? Answer in three words."}],
temperature=1.0,
stream=True # for streaming the output to chatbot
)
partial_message = ""
for chunk in response:
if len(chunk['choices'][0]['delta']) != 0:
print(chunk['choices'][0]['delta']['content'])
partial_message = partial_message + chunk['choices'][0]['delta']['content']
yield partial_message
interface = gr.ChatInterface(predict)
with gr.Blocks() as demo:
gr.Markdown("""
# GPT 3.5 Discord Bot powered by gradio!
To use this space as a discord bot, first install the gradio_client
```bash
pip install gradio_client
```
Then run the following command
```python
client = grc.Client.duplicate("gradio-discord-bots/gpt-35-turbo", private=False, secrets={"OPENAI_API_KEY": "<your-key-here>"}, sleep_timeout=2880)
client.deploy_discord(api_names=["chat"])
""")
with gr.Row(visible=False):
interface.render()
demo.queue(concurrency_count=100).launch()