|
from langchain_community.vectorstores import FAISS
|
|
from langchain_core.documents import Document
|
|
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
|
|
import os
|
|
from google import genai
|
|
from google.genai import types
|
|
|
|
|
|
import os
|
|
|
|
def index_text():
|
|
|
|
os.environ["NVIDIA_API_KEY"] ="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"
|
|
|
|
nvidia_embeddings = NVIDIAEmbeddings(
|
|
model="nvidia/llama-3.2-nv-embedqa-1b-v2",
|
|
truncate="NONE"
|
|
)
|
|
vectorstore = FAISS.load_local("nvidia_faiss_index", embeddings=nvidia_embeddings,allow_dangerous_deserialization=True)
|
|
return vectorstore
|
|
|
|
|
|
def answer_query(query, vectorstore):
|
|
RAG_TEMPLATE = """
|
|
#CONTEXT:
|
|
{context}
|
|
|
|
QUERY:
|
|
{query}
|
|
|
|
Use the provided context to answer the user query. Only use the provided context to answer the query.
|
|
If you do not know the answer, or it's not contained in the provided context, respond with "I don't know".
|
|
"""
|
|
os.environ["GEMINI_API_KEY"] = "AIzaSyCP3iHlrhG-aDKaZlNKzYE3yXA-7pLGCxM"
|
|
client = genai.Client()
|
|
|
|
retriever = vectorstore.as_retriever()
|
|
search_results = retriever.invoke(query, k=2)
|
|
|
|
|
|
context = " ".join([doc.page_content for doc in search_results])
|
|
|
|
|
|
prompt = RAG_TEMPLATE.format(context=context, query=query)
|
|
|
|
|
|
response = client.models.generate_content(
|
|
model="gemini-2.5-pro",
|
|
contents=prompt,
|
|
config=types.GenerateContentConfig(),
|
|
)
|
|
|
|
return response.text
|
|
|