Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,9 +14,16 @@ from langchain.vectorstores import Chroma
|
|
| 14 |
openai_api_key = os.getenv("openai_token")
|
| 15 |
embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
llm_name = "gpt-3.5-turbo"
|
| 22 |
|
|
@@ -25,7 +32,7 @@ llm = ChatOpenAI(model_name=llm_name, temperature=0.7,
|
|
| 25 |
|
| 26 |
qa_chain = RetrievalQA.from_chain_type(
|
| 27 |
llm,
|
| 28 |
-
retriever=vectordb.as_retriever()
|
| 29 |
)
|
| 30 |
|
| 31 |
|
|
|
|
| 14 |
openai_api_key = os.getenv("openai_token")
|
| 15 |
embedding = OpenAIEmbeddings(openai_api_key=openai_api_key)
|
| 16 |
|
| 17 |
+
@st.cache_resource
|
| 18 |
+
def get_vectordb():
|
| 19 |
+
embedding = OpenAIEmbeddings(openai_api_key=os.getenv("openai_token"))
|
| 20 |
+
return Chroma(persist_directory="./chroma_db", embedding_function=embedding)
|
| 21 |
+
|
| 22 |
+
vectordb = get_vectordb()
|
| 23 |
+
|
| 24 |
+
# # Setup vector database
|
| 25 |
+
# persist_directory = './chroma_db'
|
| 26 |
+
# vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
|
| 27 |
|
| 28 |
llm_name = "gpt-3.5-turbo"
|
| 29 |
|
|
|
|
| 32 |
|
| 33 |
qa_chain = RetrievalQA.from_chain_type(
|
| 34 |
llm,
|
| 35 |
+
retriever=vectordb.as_retriever(search_kwargs={"k": 5})
|
| 36 |
)
|
| 37 |
|
| 38 |
|