import os

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai_like import OpenAILike

class RAGSystem:
    def __init__(self, data_dir="data"):
        # 使用本地嵌入模型
        self.embedding_model = HuggingFaceEmbedding(
            model_name="./local_embeddings/bge-small-en-v1.5"
        )

        # 使用阿里云 Qwen 作为 LLM（通过 OpenAI-Like 接口）
        self.llm = OpenAILike(
            model="qwen2.5-math-72b-instruct",
            api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            is_chat_model=True,
            is_function_calling_model=True,
            context_window=32768,
            max_new_tokens=1024,
            temperature=0.7,
        )


        self.index = self._build_index(data_dir)

    def _build_index(self, data_dir):
        documents = SimpleDirectoryReader(data_dir).load_data()
        return VectorStoreIndex.from_documents(
            documents,
            embed_model=self.embedding_model,
            llm=self.llm
        )

    def query(self, question):
        engine = self.index.as_query_engine(llm=self.llm)
        response = engine.query(question)
        return str(response), [str(node.node.metadata) for node in response.source_nodes]