#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : lei.wang
import asyncio
import json
from http import HTTPStatus

import dashscope
import pytz
from app.core.pkg.log import logger
from app.schemes.score import UserScoreUpdate
from app.services.chat_messages import ChatMessagesService
from app.services.user import UserScoreService
from app.util.function_tools import *
from app.util.guiqi import *
from app.util.model_provider.huoshan.llm import VolcenginesChatCompletion
from app.util.promqts import question_prompt, ask_birthdate_prompt
from app.util.qiangruo import *
from app.util.mingge import *
from app import config
from app.util.model_provider.model_provider_factory import llm_obj

dashscope.api_key = config.get('llmModel').get('DASHBOGO_API_KEY')


def build_question_prompt(birthdate: str, sex: str, question: str) -> str:
    """
    根据出生时间、性别和用户问题生成用于大模型回答的 prompt，
    包含八字、五行、神煞等命理信息以及当前时间等。
    """
    logger.debug(f"生成 question_prompt，birthdate={birthdate}, sex={sex}, question={question}")
    bazi, input_time = get_bazi(birthdate)
    if bazi is None:
        logger.error(f"获取八字失败，birthdate={birthdate}")
        return None
    bazi_sfzk, wuxing_scale, wuxing_score = wuxingliliang(bazi)
    qiangruo = get_qiangruo(bazi)
    shensha = get_shensha(bazi, sex)
    dayun_data = get_dayun(birthdate, sex)
    changsheng = get_changsheng(bazi, dayun_data)
    guiqi_ = guiqi_level(bazi)
    mingge = get_mingge(bazi)
    canggan = get_canggan(bazi)
    shishen = get_shishen(bazi)
    age = get_age(birthdate)
    beijing_tz = pytz.timezone("Asia/Shanghai")
    beijing_time = datetime.now(beijing_tz)
    current_time = get_current_time()
    now_bazi = get_bazi(beijing_time.strftime("%Y-%m-%d %H:%M:%S"), mark=False)
    prompt = question_prompt.format(
        birthdate=birthdate,
        yangli_birthdate=input_time,
        bazi=bazi,
        wuxing_scale=wuxing_scale,
        wuxing_score=wuxing_score,
        qiangruo=qiangruo,
        shensha=shensha,
        dayun_data=dayun_data,
        changsheng=changsheng,
        guiqi_=guiqi_,
        mingge=mingge,
        canggan=canggan,
        shishen=shishen,
        age=age,
        sex=sex,
        question=question,
        current_time=current_time,
        current_hour_pillar=now_bazi
    )
    logger.info(f"生成的 question_prompt 完成。")
    return prompt


def ask_birthdate(question):
    """
    当查询不到用户出生信息时，调用此函数构造提醒用户提供出生年月日时和性别的对话。
    提示语模板中要求用户按照指定格式"%Y-%m-%d %H:%M:%S#sex"提供数据。
    """
    logger.debug(f"进入 ask_birthdate，问题：{question}")
    prompt = ask_birthdate_prompt.format(question=question)
    logger.info(f"生成 ask_birthdate 的 Prompt: {prompt}")
    try:
        response = dashscope.Generation.call(
            "deepseek-r1",
            messages=[{'role': 'user', 'content': prompt}],
            result_format='message',
            stream=False,
            incremental_output=False
        )
        if response.status_code == HTTPStatus.OK:
            response_text = response.output.choices[0]['message']['content']
            logger.info("ask_birthdate 调用大模型接口成功。")
            return response_text
        else:
            error_msg = (f"请求ID: {response.request_id}, 状态码: {response.status_code}, "
                         f"错误码: {response.code}, 错误信息: {response.message}")
            logger.error(f"ask_birthdate 大模型接口返回错误: {error_msg}")
            return error_msg
    except Exception as e:
        logger.error(f"ask_birthdate 出错：{str(e)}")
        return f"ask_birthdate 出错：{str(e)}"


async def process_question(birthdate: str, sex: str, question: str, db, user_payload_id: str, stream=False) -> str:
    """
    根据出生信息和用户问题生成 prompt，并调用大模型接口返回回答。
    
    参数:
        birthdate: 用户出生日期
        sex: 用户性别
        question: 用户问题
        db: 数据库会话
        user_payload_id: 用户ID
        stream: 是否启用流式返回，默认为False
        
    返回:
        如果stream=False，则返回完整的回答字符串
        如果stream=True，则返回生成器，可以通过迭代获取回答片段
    """
    logger.debug("进入 process_question。")
    prompt = build_question_prompt(birthdate, sex, question)
    logger.debug("准备调用大模型获取回答。")

    # build system promqt
    messages = llm_obj.build_system_promqt(prompt)

    if config.get('chat_round').get('enable'):
        # get history messages with payload id
        history_messages = await ChatMessagesService.get_chat_messages_with_promqt_structure(
            db, user_payload_id
        )
        # if exists history ,add it
        if history_messages:
            messages.extend(history_messages)

    # update user score first
    ask_score = config.get('score').get('ask_question')
    try:
        await UserScoreService(db).update_user_score(UserScoreUpdate(payload_id=user_payload_id, used_score=ask_score))
        logger.info(f"用户({user_payload_id})积分更新成功，扣除了{ask_score}积分。")
    except Exception as e:
        logger.error(f"用户({user_payload_id})积分更新失败: {str(e)}")
        raise
    
    if stream:
        # 使用流式方式返回结果
        ai_response_stream = asyncio.create_task(asyncio.to_thread(llm_obj.completion_stream, messages))
        # 创建用于存储完整响应的变量
        full_response = ""
        
        # 使用异步生成器返回流式响应
        async def response_generator():
            nonlocal full_response
            try:
                async for chunk in await ai_response_stream:
                    full_response += chunk
                    yield chunk
                
                # 完成后保存对话记录
                await ChatMessagesService.create_chat(db, user_payload_id, question, full_response)
                logger.debug("process_question 流式响应完成并保存对话。")
            except Exception as e:
                logger.error(f"流式响应处理出错: {str(e)}")
                yield f"处理出错: {str(e)}"
        
        return response_generator()
    else:
        # 非流式方式，获取完整响应
        ai_response = await asyncio.to_thread(llm_obj.completion, messages)
        logger.debug("process_question 返回结果。")

        # save llm and user chat qa
        await ChatMessagesService.create_chat(db, user_payload_id, question, ai_response)
        
        return ai_response


async def async_get_ai_response_stream(prompt: str):
    """
    异步包装 get_ai_response_stream，避免阻塞事件循环
    """
    logger.debug("进入 async_get_ai_response_stream。")
    return await asyncio.to_thread(get_ai_response_stream, prompt)


def get_ai_response_stream(prompt):
    """
    调用大模型接口，采用流式响应获取回答内容
    """
    logger.debug(f"进入 get_ai_response_stream，Prompt: {prompt[:50]}...")
    response_text = ""
    try:
        response_generator = dashscope.Generation.call(
            "deepseek-r1",
            messages=[{'role': 'user', 'content': prompt}],
            result_format='message',
            stream=True,
            incremental_output=True
        )
        for message in response_generator:
            if hasattr(message, "output") and message.output.choices:
                response_text += message.output.choices[0]['message']['content']
        logger.info("get_ai_response_stream 大模型接口流式调用成功。")
    except Exception as e:
        logger.error(f"流式响应处理出错：{str(e)}")
        return f"流式响应处理出错：{str(e)}"
    # write_to_markdown(content=response_text)
    if response_text:
        logger.debug(f"get_ai_response_stream 返回结果: {response_text[:50]}...")
        return response_text
    else:
        logger.warning("get_ai_response_stream 未能获取有效响应。")
        return "未能获取有效响应"
