import json
import uuid

from llms.zhipu import stream_chat, chat
from logger.log_config import logger
from mapper.conversation_history import save_conversation_history, get_conversation_history_by_thread_id
from mapper.thread import get_thread_by_id
from prompts.role_play_prompt import ROLE_PLAY_USER_PROMPT, ROLE_PLAY_SYSTEM_PROMPT
from prompts.state_machine_prompt_v2 import SM_SYSTEM_PROMPT, SM_INIT_USER_PROMPT

MEETING_END = "[结束面试]"
CANDIDATE_INIT_RESPONSE = '候选人暂未开始回答'


def generate_stream(thread_id, incoming_message):
    thread = get_thread_by_id(thread_id)
    job_description = thread.job_description
    questions = thread.questions
    resume_summary = thread.resume_summary
    system_prompt = SM_SYSTEM_PROMPT.format(jd_detail=job_description, reference_questions=questions)
    request_id = str(uuid.uuid4())
    # 获取历史对话信息
    messages = get_conversation_history_by_thread_id(thread_id, include_roles=['system', 'user', 'assistant'])

    if not messages or len(messages) == 0:
        # 首次对话
        sm_first_user_prompt = SM_INIT_USER_PROMPT.format(resume_summary=resume_summary)
        message_list = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': sm_first_user_prompt}]

        interviewer_output = chat(message_list).strip()
        save_conversation_history(thread_id, 'system', system_prompt, request_id)
        save_conversation_history(thread_id, 'user', sm_first_user_prompt, request_id)
        save_conversation_history(thread_id, 'interviewer', interviewer_output, request_id)

        if MEETING_END in interviewer_output:
            yield "data: MEETING_END\n\n"
            return

        role_play_prompt = ROLE_PLAY_USER_PROMPT.format(
            resume_summary=resume_summary,
            interviewer_output=interviewer_output,
            candidate_response=CANDIDATE_INIT_RESPONSE
        )
        # 使用辅助方法处理对话
        yield from process_interview_round(thread_id, ROLE_PLAY_SYSTEM_PROMPT, role_play_prompt, request_id)

    else:
        # 非首次对话，查询特定角色的历史对话
        system_history = []
        for msg in messages:
            system_history.append({'role': msg.role, 'content': msg.message})

        # Save user message
        system_history.append({'role': 'user', 'content': incoming_message})
        save_conversation_history(thread_id, 'user', incoming_message, request_id)

        next_message = chat(system_history).strip()
        save_conversation_history(thread_id, 'assistant', next_message, request_id)

        if MEETING_END in next_message:
            yield "data: MEETING_END\n\n"
            return

        role_play_prompt = ROLE_PLAY_USER_PROMPT.format(
            resume_summary=resume_summary,
            interviewer_output=next_message,
            candidate_response=incoming_message
        )
        # 使用辅助方法处理对话
        yield from process_interview_round(thread_id, ROLE_PLAY_SYSTEM_PROMPT, role_play_prompt, request_id)


def process_interview_round(thread_id, system_prompt, role_play_prompt, request_id):
    full_response = ""
    for chat_chunk in stream_chat(
            [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': role_play_prompt}]):
        content = chat_chunk.choices[0].delta.content
        full_response += content  # 拼接内容
        yield "{}".format(chat_chunk.model_dump_json(exclude_unset=False))  # 对外层传递完整的 json 数据
    # 保存完整的累加响应
    save_conversation_history(thread_id, 'role_play', full_response, request_id)
