Spaces:
Sleeping
Sleeping
File size: 2,166 Bytes
94f3920 e5c2dc2 9790882 94f3920 4b7abc4 94f3920 4b7abc4 0a26a50 4b7abc4 94f3920 e5c2dc2 94f3920 e5c2dc2 94f3920 4b7abc4 94f3920 9790882 94f3920 e5c2dc2 9790882 e5c2dc2 94f3920 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
from langchain_community.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import RunnableConfig
import chainlit as cl
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_openai import ChatOpenAI
# Function to read text from a file
def read_text_file(file_path):
try:
with open(file_path, 'r') as file:
content = file.read()
return content
except FileNotFoundError:
return "File not found."
except Exception as e:
return f"An error occurred: {e}"
@cl.on_chat_start
async def on_chat_start():
prompt = read_text_file("prompt")
# print(prompt)
#embeddings = OpenAIEmbeddings(model="text-embedding-3-large")
#vectorstore = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
#retriever = vectorstore.as_retriever(search_kwargs={"k": 1})
chat_model = ChatOpenAI(streaming=True, model="gpt-4")
prompt_template = ChatPromptTemplate.from_messages(
[
(
"system", prompt
),
("human", "You are looking to create notes for the chapter, {topic} to teach effectively in the class"),
]
)
parser = StrOutputParser()
runnable_chain = (
{
"topic": RunnablePassthrough(),
}
| prompt_template
| chat_model
| parser
)
cl.user_session.set("runnable", runnable_chain)
@cl.on_message
async def on_message(message: cl.Message):
runnable = cl.user_session.get("runnable") # type: Runnable
msg = cl.Message(content="")
async for chunk in runnable.astream(
message.content,
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
):
await msg.stream_token(chunk)
await msg.send() |