import json
import re
from webui.views.llm_serve.llm_serve import get_context_with_doctype, llm
from tool.config.config import get_config

config = get_config("config.toml")

def extract_outside_think_regex(s):
    parts = re.split(r"<think>.*?</think>", s, flags=re.S)
    return "".join(parts).strip()

def question_gen(user:str,question_num:int=0) -> str:
    RAG_TEMPLATE = """上下文信息如下：{context}
上下文中提供了一些题目，请根据这些题目，生成一道英语试题，格式为:('title':title,'options':('A':xxx,'B':xxx,...),'answer':answer)"""
    context_str = get_context_with_doctype(user, "", doctype="wrongQuestion",file_id=question_num)
    print(f"Context for question generation: {context_str}")
    
    prompt = RAG_TEMPLATE.format(context=context_str)
    llm.acceptQuery(prompt,config["LLM"]["API_PORT"],model=config["LLM"]["MODEL_NAME"])
    simple_answer = ""
    for chunk in llm.stream():
        chunk = json.loads(chunk)["message"]["content"]
        print(chunk, end="", flush=True)
        simple_answer += chunk
    
    FORMAT_TEMPLATE = """题目：{simple_answer};
请将题目格式化为以下格式,以json的格式进行输出。
格式：{{'title':title,'options':{{'A':xxx,'B':xxx,...}},'answer':answer}}"""
    llm.acceptQuery(FORMAT_TEMPLATE.format(simple_answer=extract_outside_think_regex(simple_answer)),config["LLM"]["API_PORT"],model=config["LLM"]["MODEL_NAME"])
    question = ""
    for chunk in llm.stream():
        chunk = json.loads(chunk)["message"]["content"]
        print(chunk, end="", flush=True)
        question += chunk
    return extract_outside_think_regex(question)

def listening_gen(user:str):
    """
    生成听力的接口
    """
    RAG_TEMPLATE = """
    请根据上下文中单词信息，生成一道英语听力题
    上下文信息如下：
    <context>
    {context}
    </context>
    """
    context_str = get_context_with_doctype(user, "", doctype="words")
    if not context_str:
        yield "未能识别到任何单词内容，请检查文件格式或内容。"
        return
    prompt = RAG_TEMPLATE.format(context=context_str)
    llm.acceptQuery(prompt, config["LLM"]["API_PORT"], model=config["LLM"]["MODEL_NAME"])
    for chunk in llm.stream():
        chunk = json.loads(chunk)["message"]["content"]
        print(chunk, end="", flush=True)
        yield chunk







