# utils.py

import logging
from typing import Optional, Dict
import torch
from llama_index.core.schema import TextNode
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core import VectorStoreIndex

logger = logging.getLogger(__name__)


def build_retriever(
        corpus: Dict[str, str],
        embed_model: BaseEmbedding,
        corpus_embeddings: Optional[torch.Tensor] = None,
        k: int = 10,
) -> BaseRetriever:
    """
    Builds a VectorStoreIndex retriever from a corpus.

    Args:
        corpus: A dictionary mapping document ID to text.
        embed_model: The embedding model to use.
        corpus_embeddings: Pre-computed embeddings for the corpus (optional).
        k: The number of top similar items to retrieve.

    Returns:
        A configured retriever instance.
    """
    logger.info(f"Building retriever with top_k={k}...")

    nodes = []
    # If using pre-computed NUDGE embeddings, they are already ordered
    # according to the corpus dictionary.
    if corpus_embeddings is not None:
        corpus_items = list(corpus.items())
        for i, (id_, text) in enumerate(corpus_items):
            nodes.append(
                TextNode(
                    id_=id_, text=text, embedding=corpus_embeddings[i].tolist()
                )
            )
    else:
        for id_, text in corpus.items():
            nodes.append(TextNode(id_=id_, text=text))

    # Using corpus_embeddings=None here because we've manually set them in the nodes
    index = VectorStoreIndex(
        nodes=nodes,
        embed_model=embed_model if corpus_embeddings is None else "local:BAAI/bge-small-en-v1.5",
        show_progress=True,
    )

    logger.info("Retriever built successfully.")
    return index.as_retriever(similarity_top_k=k)