from pocketflow import AsyncNode
from typing import List, AsyncIterator
from .mcp import MCPClient
from scorpio.core.providers import LLMClient, LLMProvider
from scorpio.core.schemas import Message, ModelResponse, MessageChunk
from scorpio.core.common import (
    OutputAdaptor,
    get_logger
)
from pprint import pformat

logger = get_logger(__name__)

class PlanAction(AsyncNode):
    """
    Plan with call LLM and mark relevant tag, tag list
        - zero  : ✅ answer? 
        - single: ✅ LLM response only one tool, it will turn to Plan -> MCP iterations until no tool returned or meet max iteration
        - limit : ✅ meet max iteration  
        - multiple: LLM response more tools
    """
    async def prep_async(self, shared):
        """Get the search query from the shared store."""
        context = shared.get("context")
        cli: MCPClient=context.get("mcp_client")
        tools=cli.tools
        # to-do: improve prompt? Maybe in MCPAction
        return tools, context
        
    async def exec_async(self, inputs):
        """Search the web for the given query."""
        # Call the search utility function
        tools, context = inputs
        messages: List[Message]=context.get("messages")
        if context["max_iteration"]==1:
            return messages[-1] 
        context["max_iteration"]-=1
        logger.info(f"max iteration: {context["max_iteration"]}")
        cli: LLMClient= context.get("llm_client")
        response: ModelResponse=await cli.client.response(
            messages=messages, 
            tools=tools
        )
        context["_model_response"]=response
        logger.info(f"{response.provider}/{response.message.model}: execute LLM query successfully with created_time {response.created_at} ")
        messages.append(response.message)
        return response.message
    
    async def post_async(self, shared, prep_res, message: Message):
        """Save the search results and go back to the decision node."""
        # Add the search results to the context in the shared store
        #previous = shared.get("context", "")
        #shared["context"] = previous + "\n\nSEARCH: " + shared["search_query"] + "\nRESULTS: " + exec_res
        context = shared.get("context")
        mcp_client: MCPClient=context["mcp_client"]
        status="limit"
        if context["max_iteration"]==1:
            await mcp_client.cleanup()
            logger.info(f"📚 Meet the max iteration and turn to Pocketflow node: Answer with status: {status}")
            return status
        status="zero"
        if message.tool_calls is not None:
            if len(message.tool_calls)>0:
                status="single"
            logger.info(f"📚 LLM response message with tool call and register status: {status}")
        else:
            logger.info(f"📚 Answer directly and turn to Pocketflow node: Done with status: {status}")
            await mcp_client.cleanup()
        return status

class StreamPlanAction(AsyncNode):
    async def prep_async(self, shared):
        """Get the search query from the shared store."""
        context = shared.get("context")
        cli: MCPClient=context.get("mcp_client")
        tools=cli.tools
        # to-do: improve prompt? Maybe in MCPAction
        return tools, context
        
    async def exec_async(self, inputs):
        """Search the web for the given query."""
        # Call the search utility function
        tools, context = inputs
        messages: List[Message]=context.get("messages")
        adaptor: OutputAdaptor=context.get("adaptor")
        logger.info(f"max iteration: {context["max_iteration"]}")
        cli: LLMClient= context.get("llm_client")
        n=0
        if cli.provider in [LLMProvider.OLLAMA, LLMProvider.OPENAI]:
            _response: AsyncIterator[MessageChunk]=await cli.client.response(
                messages=messages, 
                tools=tools,
                stream=True
            )
            async for chunk in adaptor.output(_response):
                # 处理内容块
                #print(chunk)
                if chunk:
                    n+=1
            logger.info(f"OutputAdaptor status is {adaptor.status}")
            response: ModelResponse=adaptor.response
            logger.info(f"{response.provider}/{response.message.model} response created at {response.created_at} with message:\n{pformat(response.message)} ")
            messages.append(response.message)
        return messages[-1]
    
    async def post_async(self, shared, prep_res, message: Message):
        """Save the search results and go back to the decision node."""
        context = shared.get("context")
        mcp_client: MCPClient=context["mcp_client"]
        status="limit"
        if context["max_iteration"]==1:
            await mcp_client.cleanup()
            logger.info(f"Reach max iteration and turn to Answer with status: {status}")
            return status
        status="zero"
        if message.tool_calls is not None:
            if len(message.tool_calls)>0:
                status="single"
            logger.info(f"LLM streamable  response message with tool calls and register status: {status}")
        else:
            logger.info(f"Answer directly and turn to Done with status: {status}")
            await mcp_client.cleanup()
        return status
