from pocketflow import AsyncNode 
from typing import List, AsyncIterator
from scorpio.core.schemas import Message, ModelResponse, MessageChunk
from scorpio.core.common import OutputAdaptor, get_logger
from scorpio.core.providers import LLMClient

logger = get_logger(__name__)


class AnswerAction(AsyncNode):
    async def prep_async(self, shared):
        """Get the question and context for answering."""
        return shared.get("question"), shared.get("context")
        
    async def exec_async(self, inputs):
        """to-do: call the LLM to generate a final answer."""
        question, context = inputs
        messages: List[Message]=context.get("messages")
        logger.info(f"shared node params: {self.params}")
        print(f"✍️ Crafting final answer...")
        
        cli: LLMClient= context.get("llm_client")
        response: ModelResponse=await cli.client.response(
            messages=messages, 
            tools=None,
        )
        message: Message=response.message
        # Create a prompt for the LLM to answer the question
        answer = f"""
# CONTEXT
Based on the following information, answer the question.
Question: {question}
Research: List MCPs...

## YOUR ANSWER:
Provide a comprehensive answer using the research results.
- **Thinking**

{message.thinking}

- **Content**

{message.text}
"""
        return answer
    
    async def post_async(self, shared, prep_res, exec_res):
        """Save the final answer and complete the flow."""
        # Save the answer in the shared store
        shared["answer"] = exec_res
        logger.info(f"✅ Answer generated successfully")
        # We're done - no need to continue the flow
        return "done" 

class StreamAnswerAction(AsyncNode):
    async def prep_async(self, shared):
        """Get the question and context for answering."""
        return shared.get("question"), shared.get("context")
        
    async def exec_async(self, inputs):
        """to-do: call the LLM to generate a final answer."""
        question, context = inputs
        messages: List[Message]=context.get("messages")
        logger.info(f"shared node params: {self.params}")
        print(f"✍️ Crafting final answer...")
        adaptor: OutputAdaptor=context.get("adaptor")
        cli: LLMClient= context.get("llm_client")
        _response: AsyncIterator[MessageChunk]=await cli.client.response(
            messages=messages, 
            tools=None,
            stream=True 
        )
        async for chunk in adaptor.output(_response):
            # 处理内容块
            # print(chunk)
            if chunk:
                n+=1
        logger.info(f"OutputAdaptor status is {adaptor.status}")
        response: ModelResponse=adaptor.response
        message=response.message
        # Create a prompt for the LLM to answer the question
        answer = f"""
# CONTEXT
Based on the following information, answer the question.
Question: {question}
Research: List MCPs...

## YOUR ANSWER:
Provide a comprehensive answer using the research results.
- **Thinking**

{message.thinking}

- **Content**

{message.text}
"""
        return answer
    
    async def post_async(self, shared, prep_res, exec_res):
        """Save the final answer and complete the flow."""
        # Save the answer in the shared store
        shared["answer"] = exec_res
        logger.info(f"✅ Answer generated successfully")
        # We're done - no need to continue the flow
        return "done" 