from typing import List, Dict, Any, AsyncGenerator, AsyncIterator
from scorpio.core.providers import LLMClient

from scorpio.core.common import  ModelParameter, get_logger
from scorpio.core.schemas import Message, MessageChunk
import json
logger = get_logger(__name__)

class GenerationService:
    def __init__(self, model_param: ModelParameter):
        self.provider=model_param.provider
        self.model_param=model_param
        llm_client = LLMClient(
            provider=model_param.provider, model_param=model_param)
        self.client=llm_client.client
    
    def _build_user_prompt(self, question: str, context: List[Dict[str, Any]]) -> List[Message]:
        """
        构建用户提示
        to-do: 1. prompt 模板化统一管理
        """
        """
        context_text = "\n\n".join([
            f"来源: {item.get('document_title', '未知')}\n内容: {item['content']}"
            for item in context
        ])
        """
        context_text=json.dumps(context)
        prompt=f"""你是一个智能助手，请根据提供的上下文信息回答用户的问题。
        如果上下文信息不足以回答问题，请如实告知用户，不要编造信息。
        请用中文回答，保持回答的专业性和准确性。

        上下文信息：
        ```json
        {context_text}
        ```
        """
        logger.info(f"### prompt:\n {prompt}\n")
        return [
            Message(
                provider=self.provider, 
                model=self.model_param.model,
                role="system", 
                content=prompt
            ),
            Message(
                provider=self.provider, 
                model=self.model_param.model,
                role="user", 
                content=question
            ),
        ]

    async def generate_answer(
        self, 
        question: str,
        retrieved_context:str,
    ) -> Dict[str, Any]:
        """生成回答"""
        logger.info(f"start to generate answer for question: {question}")
        messages = self._build_user_prompt(question, retrieved_context)
        answer = await self.client.response(
            messages=messages
        )
        return  answer
    
    async def generate_answer_stream(
        self, 
        question: str,
        retrieved_context:str,
    ) -> AsyncGenerator[str, Any]:
        """流式生成回答"""
        logger.info(f"start to generate stream answer for question: {question}")
        messages = self._build_user_prompt(question, retrieved_context)
        response:AsyncIterator[MessageChunk] = await self.client.response(
            messages=messages,
            stream=True
        )
        async for chunk in response:
            # 提取消息内容以便流式传输
            if hasattr(chunk, 'choice') and chunk.choice and hasattr(chunk.choice, 'delta') and chunk.choice.delta:
                # 对于流式响应，我们只发送实际的内容部分
                if hasattr(chunk.choice.delta, 'content') and chunk.choice.delta.content:
                    yield chunk.choice.delta.content
                # 如果有工具调用信息，也可以发送
                elif hasattr(chunk.choice.delta, 'tool_calls') and chunk.choice.delta.tool_calls:
                    yield chunk.model_dump_json()
            else:
                # 如果chunk结构不符合预期，将其序列化为JSON字符串
                yield chunk.model_dump_json()
