from pinecone import Pinecone, ServerlessSpec from llama_index.vector_stores.pinecone import PineconeVectorStore from dotenv import load_dotenv import os load_dotenv() # Pinecone Vector Database pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY")) pc_index_name = "llama-integration-pinecone" # pc_index_name = "openai-embeddings" pc_indexes = pc.list_indexes() # Check if the index already exists def index_exists(index_name): for index in pc_indexes: if index["name"] == index_name: return True return False # Create the index if it doesn't exist if not index_exists(pc_index_name): pc.create_index( name=pc_index_name, dimension=1536, spec=ServerlessSpec(cloud="aws", region="us-east-1"), ) # Initialize your index pinecone_index = pc.Index(pc_index_name) # Define the vector store pinecone_vector_store = PineconeVectorStore(pinecone_index=pinecone_index)