# main.py # This is the main file that runs the Sanic web server. from sanic import Sanic, response from retriever import get_ensemble_retriever from llm_chain import create_rag_chain app = Sanic("VibbaBackend") @app.before_server_start async def setup_model(app_instance, loop): """ Initializes the retriever and RAG chain and attaches them to the application context before the server starts. """ print("Server starting up... Initializing model pipeline.") retriever = get_ensemble_retriever() rag_chain = create_rag_chain(retriever) app_instance.ctx.rag_chain = rag_chain print("Model pipeline is ready.") @app.get("/") async def home(request): """ Root endpoint showing app name and description. """ html_content = """
Welcome to the VibbaBackend service! 🚀
This backend powers a Retrieval-Augmented Generation (RAG) pipeline
using an ensemble retriever and a large language model.
Available endpoints:
/getResponse?question=Your+query – Get an answer to your question.