import os from pathlib import Path from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding from llama_index import (load_index_from_storage, ServiceContext, StorageContext, VectorStoreIndex) from llama_index import download_loader, SimpleDirectoryReader from llama_index.retrievers import RecursiveRetriever from llama_index.query_engine import RetrieverQueryEngine from llama_index.llms import Anyscale from fastapi import FastAPI app = FastAPI() # Define the inference model llm = Anyscale(model="HuggingFaceH4/zephyr-7b-beta", api_key=os.getenv("ANYSCALE_API_KEY")) # Define the embedding model used to embed the query. # query_embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") embed_model = VoyageEmbedding(model_name="voyage-01", voyage_api_key=os.getenv("VOYAGE_API_KEY")) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) if "index" in os.listdir(): storage_context = StorageContext.from_defaults(persist_dir=Path("./index")) else: dir_reader = SimpleDirectoryReader(Path('./docs')) documents = dir_reader.load_data() index = VectorStoreIndex.from_documents(documents, service_context=service_context) index.storage_context.persist(Path('./index')) storage_context = StorageContext.from_defaults(persist_dir=Path("./index")) # Load the vector stores that were created earlier. index = load_index_from_storage(storage_context=storage_context, service_context=service_context) # Define query engine: index_engine = index.as_retriever(similarity_top_k=4) index_retriever = RecursiveRetriever("vector",retriever_dict={"vector": index_engine}) query_engine = RetrieverQueryEngine.from_args(index_retriever, service_context=service_context) # Deploy the Ray Serve application. @app.get("/generate") def generate(query: str): return str(query_engine.query(query)) if __name__ == '__main__': uvicorn.run('main:app', reload=True)