Spaces:
Sleeping
Sleeping
from langchain_community.chat_models import ChatOpenAI | |
from typing import * | |
from langchain.tools import BaseTool | |
import chainlit as cl | |
from chainlit.sync import run_sync | |
from datasets import load_dataset | |
from langchain.document_loaders import CSVLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
import os | |
import pandas as pd # the Colab runtime will already have this library installed - no need to `pip install` | |
from langchain_openai import OpenAIEmbeddings | |
from langchain.embeddings import CacheBackedEmbeddings | |
from langchain.storage import LocalFileStore | |
from langchain_community.vectorstores import FAISS | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.prompts import ChatPromptTemplate | |
from langchain_openai import ChatOpenAI | |
def start(): | |
os.makedirs("embedding_cache", exist_ok=True) | |
store = LocalFileStore("embedding_cache") | |
openai_api_key = os.getenv('OPENAI_API_KEY') | |
primary_embedder = OpenAIEmbeddings(api_key=openai_api_key) | |
embedder = CacheBackedEmbeddings(primary_embedder, store) | |
vector_store = FAISS.load_local("vector_store", primary_embedder, allow_dangerous_deserialization=True) | |
# Create the components (chefs) | |
prompt_template = ChatPromptTemplate.from_messages( | |
[ | |
("system", "You are a helpful AI bot."), | |
("human", "{user_input}"), | |
] | |
) | |
retriever = vector_store.as_retriever() | |
chat_model = ChatOpenAI(api_key=openai_api_key) | |
parser = StrOutputParser() | |
chain = prompt_template | chat_model | parser | |
cl.user_session.set("chain", chain) | |
async def on_message(message: cl.Message): | |
# This function will be called whenever a new message is received | |
user_message = message.content | |
print(f"User message: {user_message}") | |
chain = cl.user_session.get("chain") | |
res = chain.invoke({"user_input": user_message }) | |
await cl.Message(content=res).send() | |