#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : lei.wang
import asyncio
from volcenginesdkarkruntime import Ark
from app import config
from app.core.pkg.log import logger


class VolcenginesChatCompletion:

    def __init__(self):
        self.model = "ep-20250623140024-wvt7k"  # 替换成Doubao-Seed-1.6(ep-20250623140024-wvt7k)  DeepSeek-R1(ep-20250206165256-8fqqx)
        self.client = Ark(
            api_key=config.get('llmModel').get('HuoShan_API_KEY'),
            timeout=120,
            max_retries=2
        )

    def completion(self, messages):
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            thinking={
                "type": "enabled"
            }
        )
        return response.choices[0].message.content
    
    def completion_stream(self, messages):
        """
        流式返回大模型回复
        
        参数:
            messages: 对话消息列表
            
        返回:
            异步生成器，每次产生回复的一个片段
        """
        try:
            # 使用火山模型的流式API
            stream_response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                thinking={
                    "type": "enabled"
                },
                stream=True,
            )
            
            async def async_generator():
                # 上一个块的内容，用于差异计算
                last_content = ""
                
                # 跟踪流式传输状态
                for chunk in stream_response:
                    if hasattr(chunk, 'choices') and chunk.choices:
                        # 获取当前块的内容
                        delta = chunk.choices[0].delta
                        if hasattr(delta, 'content') and delta.content:
                            # 只返回差异部分
                            yield delta.content
                
                logger.info("流式响应完成")
                
            return async_generator()
        except Exception as e:
            logger.error(f"流式响应出错: {str(e)}")
            
            # 返回一个只包含错误信息的生成器
            async def error_generator():
                yield f"获取回答时出错: {str(e)}"
                
            return error_generator()

    @staticmethod
    def build_system_promqt(prompt: str) -> list:
        return [
            {'role': "system", 'content': prompt.split("【用户提问】：")[0]},
            {"role": "user", "content": prompt.split("【用户提问】：")[1]},
        ]
