1st_langchain / app.py
jfeng1115's picture
minor
7d15da5
import pdb
import pandas as pd
import pinecone
from dotenv import load_dotenv
import injection
from langchain.vectorstores import Pinecone as pineconeVectorStore
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
import chainlit as cl
load_dotenv()
index_name = 'moviereviews'
import pinecone
# create a new index
pinecone.init(
api_key='f8f8aa14-3087-44f8-a933-1aa0d335221a',
environment='gcp-starter'
)
index = pinecone.Index('moviereviews')
# create embedder
store = LocalFileStore("./cache/")
core_embeddings_model = OpenAIEmbeddings()
embedder = CacheBackedEmbeddings.from_bytes_store(
core_embeddings_model, store, namespace=core_embeddings_model.model
)
# inject data if have not
# injection.inject(index, embedder, "./data/barbie.csv")
# create vectorstore from index and embedder
text_field = "text"
vectorstore = pineconeVectorStore(
index, embedder.embed_query, text_field
)
model_name = "gpt-4"
from langchain.llms.openai import OpenAIChat
llm = OpenAIChat(model=model_name, temperature=0)
retriever = vectorstore.as_retriever()
from langchain.chains import RetrievalQA
from langchain.callbacks import StdOutCallbackHandler
handler = StdOutCallbackHandler()
qa_with_sources_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
callbacks=[handler],
return_source_documents=True
)
@cl.on_chat_start # marks a function that will be executed at the start of a user session
def start_chat():
cl.user_session.set(
"message_history",
[{"role": "system", "content": "You are a helpful assistant."}],
)
settings = {
"temperature": 0.7, # higher value increases output diveresity/randomness
"max_tokens": 500, # maximum length of output response
"top_p": 1, # choose only the top x% of possible words to return
"frequency_penalty": 0, # higher value will result in the model being more conservative in its use of repeated tokens.
"presence_penalty": 0, # higher value will result in the model being more likely to generate tokens that have not yet been included in the generated text
}
cl.user_session.set("settings", settings)
@cl.on_message # this function will be called every time a user inputs a message in the UI
async def main(message: str):
message_history = cl.user_session.get("message_history")
result = qa_with_sources_chain({"query" : message})
msg = cl.Message(content="")
msg.content = result["result"]
await msg.send()