Update app.py
Browse files
app.py
CHANGED
@@ -17,6 +17,7 @@ from langchain_community.retrievers import PineconeHybridSearchRetriever
|
|
17 |
from langchain_groq import ChatGroq
|
18 |
from langchain.retrievers import ContextualCompressionRetriever
|
19 |
from langchain.retrievers.document_compressors import FlashrankRerank
|
|
|
20 |
|
21 |
# Load environment variables
|
22 |
load_dotenv(".env")
|
@@ -74,7 +75,8 @@ retriever = PineconeHybridSearchRetriever(
|
|
74 |
)
|
75 |
|
76 |
# Initialize LLM
|
77 |
-
llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0, max_tokens=1024, max_retries=2)
|
|
|
78 |
|
79 |
|
80 |
# Initialize Reranker
|
|
|
17 |
from langchain_groq import ChatGroq
|
18 |
from langchain.retrievers import ContextualCompressionRetriever
|
19 |
from langchain.retrievers.document_compressors import FlashrankRerank
|
20 |
+
from langchain_community.chat_models import ChatPerplexity
|
21 |
|
22 |
# Load environment variables
|
23 |
load_dotenv(".env")
|
|
|
75 |
)
|
76 |
|
77 |
# Initialize LLM
|
78 |
+
# llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0, max_tokens=1024, max_retries=2)
|
79 |
+
llm = ChatPerplexity(temperature=0, pplx_api_key=GROQ_API_KEY, model="llama-3.1-sonar-large-128k-online", max_tokens=1024, max_retries=2)
|
80 |
|
81 |
|
82 |
# Initialize Reranker
|