"""LLM服务模块"""

from __future__ import annotations

import os
from typing import AsyncGenerator, Dict, List

from config.settings import config
from config.prompts.system import prompt_manager


async def stream_chat_completion(messages: List[Dict], deep_research: bool) -> AsyncGenerator[str, None]:
    """流式聊天完成"""
    provider = config.llm.provider
    system_prompt = prompt_manager.get_system_prompt(deep_research)

    # 标准化消息格式
    normalized: List[Dict[str, str]] = []
    if system_prompt:
        normalized.append({"role": "system", "content": system_prompt})
    for m in messages:
        role = m.get("role", "user")
        content = m.get("content", "")
        normalized.append({"role": role, "content": content})

    if provider == "qwen":
        import dashscope
        from dashscope import Generation

        dashscope.api_key = config.llm.dashscope_api_key
        
        # 构建千问格式的消息
        qwen_messages = []
        for msg in normalized:
            if msg["role"] == "system":
                qwen_messages.append({"role": "system", "content": msg["content"]})
            elif msg["role"] == "user":
                qwen_messages.append({"role": "user", "content": msg["content"]})
            elif msg["role"] == "assistant":
                qwen_messages.append({"role": "assistant", "content": msg["content"]})

        # 使用千问流式API
        responses = Generation.call(
            model=config.llm.qwen_model,
            messages=qwen_messages,
            result_format='message',
            stream=True,
            temperature=0.2,
            max_tokens=1024
        )
        
        last_content = ""
        for response in responses:
            if response.status_code == 200:
                if hasattr(response.output, 'choices') and response.output.choices:
                    choice = response.output.choices[0]
                    if hasattr(choice, 'message') and hasattr(choice.message, 'content'):
                        current_content = choice.message.content
                        if current_content and current_content != last_content:
                            # 计算增量内容
                            if current_content.startswith(last_content):
                                new_content = current_content[len(last_content):]
                                if new_content:
                                    yield new_content
                            last_content = current_content
            else:
                yield f"[错误: {response.message}]"
                
    elif provider == "anthropic":
        import anthropic

        client = anthropic.Anthropic(api_key=config.llm.anthropic_api_key)
        with client.messages.stream(
            model=config.llm.anthropic_model,
            max_tokens=1024,
            messages=[{"role": m["role"], "content": m["content"]} for m in normalized if m["role"] != "system"],
            system=system_prompt or None,
        ) as stream:
            for event in stream:
                if event.type == "content_block_delta":
                    text = getattr(event.delta, "text", "")
                    if text:
                        yield text
    else:
        from openai import AsyncOpenAI

        client = AsyncOpenAI(api_key=config.llm.openai_api_key)
        stream = await client.chat.completions.create(
            model=config.llm.openai_model,
            messages=normalized,
            temperature=0.2,
            stream=True,
        )
        async for chunk in stream:
            delta = chunk.choices[0].delta
            if delta and delta.content:
                yield delta.content
