#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : lei.wang
from ast import main
import asyncio
import dashscope
from http import HTTPStatus

from sqlalchemy.util import await_fallback

from app import config
from app.core.pkg.log import logger


dashscope.api_key = config.get('llmModel').get('DASHBOGO_API_KEY')


class TongYiChatCompletion:

    def __init__(self):
        self.model = 'deepseek-r1'

    def completion(self, messages):
        try:
            response = dashscope.Generation.call(
                model=self.model,
                messages=messages,
                stream=False,
                incremental_output=False
            )
            if response.status_code == HTTPStatus.OK:
                response_text = response.output.choices[0]['message']['content']
                return response_text
            else:
                error_msg = (f"请求ID: {response.request_id}, 状态码: {response.status_code}, "
                             f"错误码: {response.code}, 错误信息: {response.message}")
                logger.error(f"ask_birthdate 大模型接口返回错误: {error_msg}")
                return error_msg
        except Exception as e:
            logger.error(e)
            return str(e)
    
    def completion_stream(self, messages):
        """
        流式返回大模型回复
        
        参数:
            messages: 对话消息列表
            
        返回:
            异步生成器，每次产生回复的一个片段
        """
        try:
            # 使用通义千问的流式API
            stream_response = dashscope.Generation.call(
                model=self.model,
                messages=messages,
                stream=True,
                incremental_output=True
            )
            
            async def async_generator():
                # 处理流式响应
                for chunk in stream_response:
                    if chunk.status_code == HTTPStatus.OK:
                        if hasattr(chunk, 'output') and chunk.output.choices:
                            delta = chunk.output.choices[0].get('message', {}).get('content', '')
                            if delta:
                                yield delta
                    else:
                        # 处理错误
                        error_msg = (f"请求ID: {chunk.request_id}, 状态码: {chunk.status_code}, "
                                    f"错误码: {chunk.code}, 错误信息: {chunk.message}")
                        logger.error(f"流式响应错误: {error_msg}")
                        yield f"获取回答出错: {error_msg}"
                        break
                
                logger.info("流式响应完成")
                
            return async_generator()
        except Exception as e:
            logger.error(f"流式响应出错: {str(e)}")
            
            # 返回一个只包含错误信息的生成器
            async def error_generator():
                yield f"获取回答时出错: {str(e)}"
                
            return error_generator()

    @staticmethod
    def build_system_promqt(prompt: str) -> list:
        return [
            {'role': "system", 'content': prompt.split("【用户提问】：")[0]},
            {"role": "user", "content": prompt.split("【用户提问】：")[1]},
        ]
