from llms.zhipu import chat
from mapper.conversation_history import get_conversation_history_by_thread_id
from mapper.thread import get_thread_by_id
from prompts.interview_eval_prompt import STRUCT_PROMPT, SUMMARY_MD_PROMPT
from utils.parse_utils import parse_markdown_json_content

from concurrent.futures import ThreadPoolExecutor
import json  # 可能需要用于 JSON 解析


def interview_eval_result(thread_id: int):
    thread = get_thread_by_id(thread_id)
    job_description = thread.job_description
    resume_summary = thread.resume_summary

    # 获取对话历史
    history_list = ""
    messages = get_conversation_history_by_thread_id(thread_id, include_roles=['user', 'assistant'])
    for msg in messages:
        if msg.role == 'user':
            history_list += f"用户: {msg.message}\n"
        else:
            history_list += f"面试官: {msg.message}\n"

    # 定义任务1: 生成结构化结果
    def generate_struct_result():
        struct_prompt = STRUCT_PROMPT.format(
            jd_detail=job_description,
            resume_summary=resume_summary,
            interview_record=history_list
        )
        message_list = [{'role': 'user', 'content': struct_prompt}]
        struct_result = chat(message_list)
        return parse_markdown_json_content(struct_result)

    # 定义任务2: 生成 Markdown 格式结果
    def generate_md_result():
        md_prompt = SUMMARY_MD_PROMPT.format(
            jd_detail=job_description,
            resume_summary=resume_summary,
            interview_record=history_list
        )
        message_list = [{'role': 'user', 'content': md_prompt}]
        return chat(message_list)

    # 使用 ThreadPoolExecutor 并发执行任务
    with ThreadPoolExecutor() as executor:
        future_struct = executor.submit(generate_struct_result)  # 提交第一个任务
        future_md = executor.submit(generate_md_result)  # 提交第二个任务

        # 获取任务结果
        struct_result_json = future_struct.result()
        md_result = future_md.result()

    return {'struct_result': struct_result_json, 'md_result': md_result}
