from framework.LLM_controller.interface import LLMcontroller
from framework.LLM_controller.providers import deepseek, ollama, openai,ryzenai
from tool.config import config as tool_config
from framework.prompt.DocLoader import docloaders,regist_docloader
from langchain.schema import Document

config = tool_config.get_config("config.toml")

llm =  LLMcontroller()

llm.register_model("deepseek", deepseek.handler_factory)
llm.register_model("openai", openai.handler_factory)
llm.register_model("ollama", ollama.handler_factory)
llm.register_model("ryzenai", ryzenai.handler_factory)

def get_context_with_doctype(user:str, question:str, doctype:str="wrongQuestion",file_id=None):
    """
    生成问题的接口
    """
    regist_docloader(user)
    docloader = docloaders[user]
    if(doctype == "retrieve"):
        retriever = docloader.vectostore.as_retriever()
        doc = retriever.invoke(question)
        context_str = "\n".join(doc.page_content for doc in doc)
        return context_str
    else:
        all_data = docloader.vectostore.get()
        docs_by_file = {}

        # 分组：doctype -> file_id -> 文档
        for content, metadata in zip(all_data["documents"], all_data["metadatas"]):
            if metadata.get("doctype") == doctype:
                fid = metadata.get("file_id", "unknown")
                if fid not in docs_by_file:
                    docs_by_file[fid] = []
                docs_by_file[fid].append(Document(page_content=content, metadata=metadata))

        # 如果指定了 file_id，只返回该文件
        if file_id is not None:
            file_docs = list(docs_by_file.values())[file_id%len(docs_by_file)]
            context_str = "\n".join(doc.page_content for doc in file_docs)
            return context_str
        else:
            # 否则返回所有文件的内容
            context_str = ""
            for file_docs in docs_by_file.values():
                context_str += "\n".join(doc.page_content for doc in file_docs) + "\n"
            return context_str.strip()
        