Spaces:
Sleeping
Sleeping
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python) | |
# OpenAI Chat completion | |
import os | |
import openai | |
from openai import AsyncOpenAI # importing openai for API usage | |
import chainlit as cl # importing chainlit for our app | |
#from chainlit.prompt import Prompt, PromptMessage # importing prompt tools | |
from chainlit import message | |
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools | |
from dotenv import load_dotenv | |
load_dotenv() | |
# import openai | |
# from openai import AsyncOpenAI | |
from chainlit.playground.providers import ChatOpenAI | |
import chainlit as cl | |
OPENAI_API_KEY="sk-proj-r0mIMTDm41HXzATQVROcT3BlbkFJjEokAFlqLT2tS0RwBt6O" | |
# ChatOpenAI Templates | |
# system_template = """You are a helpful assistant who always speaks in a pleasant tone! | |
# """ | |
# user_template = """{input} | |
# Think through your response step by step. | |
# """ | |
# @cl.on_chat_start # marks a function that will be executed at the start of a user session | |
# async def start_chat(): | |
# settings = { | |
# "model": "gpt-3.5-turbo", | |
# "temperature": 0, | |
# "max_tokens": 500, | |
# "top_p": 1, | |
# "frequency_penalty": 0, | |
# "presence_penalty": 0, | |
# } | |
# cl.user_session.set("settings", settings) | |
# @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user | |
# async def main(message: cl.Message): | |
# settings = cl.user_session.get("settings") | |
# client = AsyncOpenAI() | |
# print(message.content) | |
# prompt = Prompt( | |
# provider=ChatOpenAI.id, | |
# messages=[ | |
# PromptMessage( | |
# role="system", | |
# template=system_template, | |
# formatted=system_template, | |
# ), | |
# PromptMessage( | |
# role="user", | |
# template=user_template, | |
# formatted=user_template.format(input=message.content), | |
# ), | |
# ], | |
# inputs={"input": message.content}, | |
# settings=settings, | |
# ) | |
# print([m.to_openai() for m in prompt.messages]) | |
# msg = cl.Message(content="") | |
# # Call OpenAI | |
# async for stream_resp in await client.chat.completions.create( | |
# messages=[m.to_openai() for m in prompt.messages], stream=True, **settings | |
# ): | |
# token = stream_resp.choices[0].delta.content | |
# if not token: | |
# token = "" | |
# await msg.stream_token(token) | |
# # Update the prompt object with the completion | |
# prompt.completion = msg.content | |
# msg.prompt = prompt | |
# # Send and close the message stream | |
# await msg.send() | |
client = AsyncOpenAI(api_key="sk-proj-r0mIMTDm41HXzATQVROcT3BlbkFJjEokAFlqLT2tS0RwBt6O") | |
# template = "Hello, {name}!" | |
# variables = {"name": "John"} | |
# settings = { | |
# "model": "gpt-3.5-turbo", | |
# "temperature": 0, | |
# # ... more settings | |
# } | |
#------------------------------------------------------------- | |
# @cl.step(type="llm") | |
# async def call_llm(): | |
# generation = cl.ChatGeneration( | |
# provider=ChatOpenAI.id, | |
# variables=variables, | |
# settings=settings, | |
# messages=[ | |
# { | |
# "content": template.format(**variables), | |
# "role":"user" | |
# }, | |
# ], | |
# ) | |
# # Make the call to OpenAI | |
# response = await client.chat.completions.create( | |
# messages=generation.messages, **settings | |
# ) | |
# generation.message_completion = { | |
# "content": response.choices[0].message.content, | |
# "role": "assistant" | |
# } | |
# # Add the generation to the current step | |
# cl.context.current_step.generation = generation | |
# return generation.message_completion["content"] | |
# @cl.on_chat_start | |
# async def start(): | |
# await call_llm() | |
#------------------------------------------------------------- | |
#****** | |
# @cl.on_message | |
# async def on_message(message: cl.Message): | |
# msg = cl.Message(content="") | |
# await msg.send() | |
# # do some work | |
# await cl.sleep(2) | |
# msg.content = f"Processed message {message.content}" | |
# await msg.update() | |
#------------------------------------ | |
#************************** | |
from openai import AsyncOpenAI | |
import chainlit as cl | |
client = AsyncOpenAI(api_key="sk-proj-r0mIMTDm41HXzATQVROcT3BlbkFJjEokAFlqLT2tS0RwBt6O") | |
settings = { | |
"model": "gpt-3.5-turbo", | |
"temperature": 0, | |
"max_tokens": 500, | |
"top_p": 1, | |
"frequency_penalty": 0, | |
"presence_penalty": 0, | |
} | |
def start_chat(): | |
cl.user_session.set( | |
"message_history", | |
[{"role": "system", "content": "You are a helpful assistant who always speaks in a pleasant tone!."}], | |
) | |
cl.user_session.set( | |
"user_template", | |
"{input}\nThink through your response step by step.\n" | |
) | |
cl.user_session.set( | |
"settings", | |
settings | |
) | |
async def main(message: cl.Message): | |
message_history = cl.user_session.get("message_history") | |
message_history.append({"role": "user", "content": message.content}) | |
msg = cl.Message(content="") | |
await msg.send() | |
stream = await client.chat.completions.create( | |
messages=message_history, stream=True, **settings | |
) | |
async for part in stream: | |
if token := part.choices[0].delta.content or "": | |
await msg.stream_token(token) | |
message_history.append({"role": "assistant", "content": msg.content}) | |
await msg.update() | |
# import chainlit as cl | |
#----------------------------------------------- | |
# @cl.on_message | |
# async def on_message(msg: cl.Message): | |
# if not msg.elements: | |
# await cl.Message(content="No file attached").send() | |
# return | |
# # Processing images exclusively | |
# images = [file for file in msg.elements if "image" in file.mime] | |
# # Read the first image | |
# with open(images[0].path, "r") as f: | |
# pass | |
# await cl.Message(content=f"Received {len(images)} image(s)").send() | |