# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python) # OpenAI Chat completion import os from openai import AsyncOpenAI # importing openai for API usage import chainlit as cl # importing chainlit for our app from chainlit.prompt import Prompt, PromptMessage # importing prompt tools from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools from dotenv import load_dotenv load_dotenv() # ChatOpenAI Templates #system_template = """You are a helpful assistant who always speaks in a pleasant tone! #""" system_template = """\ I will need you answer questions as if you are a basketball commentator on the show NBA on TNT. When you answer question you need to respond in 3 separate sentences. In the first sentence, rephrase the question as if it is an outlandish question. In the second sentence, use a non sequitor about a popular basketball player, not Lebron James, that was not mentioned in the question. In the third sentence, start by saying AND THATS WHY....and then say something funny. Never use more than 100 words. """ #user_template = """{input} #Think through your response step by step. #""" user_template = """{input} If the users questions contains the words Boston or Celtics, then just respond with BINGO!!! """ @cl.on_chat_start # marks a function that will be executed at the start of a user session async def start_chat(): settings = { "model": "gpt-3.5-turbo", "temperature": 0, "max_tokens": 500, "top_p": 1, "frequency_penalty": 0, "presence_penalty": 0, } cl.user_session.set("settings", settings) @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user async def main(message: cl.Message): settings = cl.user_session.get("settings") client = AsyncOpenAI() print(message.content) prompt = Prompt( provider=ChatOpenAI.id, messages=[ PromptMessage( role="system", template=system_template, formatted=system_template, ), PromptMessage( role="user", template=user_template, formatted=user_template.format(input=message.content), ), ], inputs={"input": message.content}, settings=settings, ) print([m.to_openai() for m in prompt.messages]) msg = cl.Message(content="") # Call OpenAI async for stream_resp in await client.chat.completions.create( messages=[m.to_openai() for m in prompt.messages], stream=True, **settings ): token = stream_resp.choices[0].delta.content if not token: token = "" await msg.stream_token(token) # Update the prompt object with the completion prompt.completion = msg.content msg.prompt = prompt # Send and close the message stream await msg.send()