Pinecone / app.py
Ganesh43's picture
Update app.py
1545516 verified
raw
history blame
No virus
1.85 kB
import streamlit as st
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from transformers import AutoModel
from pinecone import Pinecone
import os # For environment variable access
# Replace with your Space's environment variable name for API key
API_KEY = os.environ.get("PINECONE_API_KEY")
#pc = Pinecone(api_key=PINECONE_API_KEY)
# Connect to Pinecone using the API key retrieved from the Space's environment variable
#client = IndexClient(api_key=API_KEY)
pc = Pinecone(api_key=API_KEY)
# Load pre-trained model (replace with your chosen model)
model =SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
def process_and_search(query):
# Preprocess user input (example: tokenization, normalization)
#preprocessed_query = preprocess_query(query) # Replace with your implementation
# Encode the preprocessed query using the pre-trained model
encoded_query = model.embed_query(query, return_tensors="pt")
# Perform vector search in Pinecone
results = pc.query(INDEX_NAME, encoded_query.cpu().numpy())
# Process search results (example: extract answers, format display)
processed_results = []
for result in results:
# Example processing: extract answer from metadata
answer = result["metadata"]["answer"] # Adapt based on your data structure
processed_results.append(answer)
return processed_results
st.title("Pinecone Search App")
user_query = st.text_area("Enter your question:", height=100)
if st.button("Search"):
if user_query:
# Process, search, and display results (call the process_and_search function)
answers = process_and_search(user_query)
st.write("Search Results:")
for answer in answers:
st.write(f"- {answer}")
else:
st.error("Please enter a question.")