import json
import os
# from framework.frontend.recoder import VoiceRecorder
from framework.LLM_controller.interface import LLMcontroller
from framework.LLM_controller.providers import deepseek, ollama, openai, ryzenai
# from webui.views.llm_serve.llm_serve import llm
from framework.prompt.DocLoader import DocLoader, docloaders, regist_docloader
from tool.config import config as tool_config

# recoder = VoiceRecorder()
config = tool_config.get_config("config.toml")

def get_text_from_file(path: str, doc_loader: DocLoader):
    """
    从任意文档中提取文本内容（支持图像、PDF、Word、文本、Excel、HTML、JSON等）
    :param path: 文件路径
    :param doc_loader: 一个 DocLoader 实例
    :return: 提取的文本内容或错误信息
    """
    if not path or not os.path.exists(path):
        return {"error": "文件路径无效或文件不存在"}

    try:
        ext = os.path.splitext(path)[1].lower()
        if ext not in doc_loader.loader_map:
            return {"error": f"不支持的文件类型: {ext}"}

        loader_func = doc_loader.loader_map[ext]
        # html/htm/url 需要使用 file:// 前缀
        file_arg = f"file://{path}" if ext in ['.html', '.htm', '.url'] else path
        documents = loader_func(file_arg)

        if not documents:
            return {"error": "未能识别到任何文本"}

        # 返回合并后的文本
        all_text = "\n\n".join(doc.page_content for doc in documents)
        return all_text

    except Exception as e:
        return ""


def qa(path:str,question:str,user:str):
    llm =  LLMcontroller()
    llm.register_model("deepseek", deepseek.handler_factory)
    llm.register_model("openai", openai.handler_factory)
    llm.register_model("ollama", ollama.handler_factory)
    llm.register_model("ryzenai", ryzenai.handler_factory)
    user_config = config
    RAG_TEMPLATE = """
    根据下面的题目，回答学生的问题
    {context}
    学生问题：
    {question}
    """
    regist_docloader(user)
    context_str = get_text_from_file(path, docloaders[user])
    # context_str = "\n".join(doc.page_content for doc in docs)
    print(f"Context for question: {context_str}")
    if not context_str:
        return "未能识别到任何文本内容，请检查文件格式或内容。"
    llm.acceptQuery(RAG_TEMPLATE.format(context=context_str,question=question),user_config["LLM"]["API_PORT"],model=user_config["LLM"]["MODEL_NAME"])
    yield context_str
    for chunk in llm.stream():
        yield json.loads(chunk)["message"]["content"]
