import os

from collections.abc import AsyncIterable
from typing import Any, Literal

import httpx

from langchain_core.messages import AIMessage, ToolMessage,AIMessageChunk
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from pydantic import BaseModel


memory = MemorySaver()


@tool
def get_exchange_rate(
    currency_from: str = 'USD',
    currency_to: str = 'EUR',
    currency_date: str = 'latest',
):
    """Use this to get current exchange rate.

    Args:
        currency_from: The currency to convert from (e.g., "USD").
        currency_to: The currency to convert to (e.g., "EUR").
        currency_date: The date for the exchange rate or "latest". Defaults to
            "latest".

    Returns:
        A dictionary containing the exchange rate data, or an error message if
        the request fails.
    """
    try:
        response = httpx.get(
            f'https://api.frankfurter.app/{currency_date}',
            params={'from': currency_from, 'to': currency_to},
        )
        response.raise_for_status()

        data = response.json()
        if 'rates' not in data:
            return {'error': 'Invalid API response format.'}
        return data
    except httpx.HTTPError as e:
        return {'error': f'API request failed: {e}'}
    except ValueError:
        return {'error': 'Invalid JSON response from API.'}


class ResponseFormat(BaseModel):
    """Respond to the user in this format."""

    status: Literal['input_required', 'completed', 'error'] = 'input_required'
    message: str


class ProcessAgent:
    """CurrencyAgent - a specialized assistant for currency convesions."""

    # SYSTEM_INSTRUCTION = (
    #     'You are a specialized assistant for currency conversions. '
    #     "Your sole purpose is to use the 'get_exchange_rate' tool to answer questions about currency exchange rates. "
    #     'If the user asks about anything other than currency conversion or exchange rates, '
    #     'politely state that you cannot help with that topic and can only assist with currency-related queries. '
    #     'Do not attempt to answer unrelated questions or use tools for other purposes.'
    # )
    SYSTEM_INSTRUCTION = (
        '你是一位专门处理流程治理平台相关知识的助手。'
        '如果用户询问的内容不是流程治理平台相关问题，'
        '请礼貌地说明你无法提供该方面的帮助，仅能协助处理与流程治理平台相关的查询。'
        '如果用户询问的内容是流程治理平台相关问题，'
        '请提供相关信息。'
        '请勿尝试回答无关问题，也不要将工具用于其他目的。'
    )

    FORMAT_INSTRUCTION = (
        'Set response status to input_required if the user needs to provide more information to complete the request.'
        'Set response status to error if there is an error while processing the request.'
        'Set response status to completed if the request is complete.'
    )

    def __init__(self):
        model_source = os.getenv('model_source', 'google')
        # if model_source == 'google':
        #     self.model = ChatGoogleGenerativeAI(model='gemini-2.0-flash')
        # else:
        #     self.model = ChatOpenAI(
        #         model=os.getenv('TOOL_LLM_NAME'),
        #         openai_api_key=os.getenv('API_KEY', 'EMPTY'),
        #         openai_api_base=os.getenv('TOOL_LLM_URL'),
        #         temperature=0,
        #     )
        config = {
            "api_base_url": "https://open.bigmodel.cn/api/paas/v4",
            "api_key": "e440104944fe4defabccd2620e5dff6c.xj8mlkvxPdr0HYff",
        }
        model_name = "glm-4-flash-250414"
        self.model = ChatOpenAI(
            model=model_name,
            openai_api_key=config['api_key'],
            openai_api_base=config['api_base_url'],
            temperature=0,
        )

        self.tools = []

        self.graph = create_react_agent(
            self.model,
            tools=self.tools,
            checkpointer=memory,
            prompt=self.SYSTEM_INSTRUCTION,
            # response_format=(self.FORMAT_INSTRUCTION, ResponseFormat),
        )

    # async def stream(self, query, context_id) -> AsyncIterable[dict[str, Any]]:
    #     inputs = {'messages': [('user', query)]}
    #     config = {'configurable': {'thread_id': context_id}}
    #
    #     for item in self.graph.stream(inputs, config, stream_mode='values'):
    #         message = item['messages'][-1]
    #         # print(message)
    #         if (
    #             isinstance(message, AIMessage)
    #             and message.tool_calls
    #             and len(message.tool_calls) > 0
    #         ):
    #             yield {
    #                 'is_task_complete': False,
    #                 'require_user_input': False,
    #                 'content': 'Looking up the exchange rates...',
    #             }
    #         elif isinstance(message, ToolMessage):
    #             yield {
    #                 'is_task_complete': False,
    #                 'require_user_input': False,
    #                 'content': 'Processing the exchange rates..',
    #             }
    #         elif isinstance(message, AIMessage):
    #             yield {
    #                 'is_task_complete': False,
    #                 'require_user_input': False,
    #                 'content': message.content
    #             }
    #
    #     yield self.get_agent_response(config)

    async def stream(self, query, context_id) -> AsyncIterable[dict[str, Any]]:
        inputs = {'messages': [('user', query)]}
        config = {'configurable': {'thread_id': context_id}}

        async for message, meta in self.graph.astream(inputs, config, stream_mode='messages'):
            if type(message) == AIMessageChunk:
                yield {
                    'is_task_complete': False,
                    'require_user_input': False,
                    'content': message.content,
                }
            elif type(message) == ToolMessage:
                yield {
                    'is_task_complete': False,
                    'require_user_input': False,
                    'content': 'Looking up the exchange rates...',
                }

        yield  {
                    'is_task_complete': True,
                    'require_user_input': False,
                    'content': 'Task completed.',
                }
        # yield self.get_agent_response(config)

    SUPPORTED_CONTENT_TYPES = ['text', 'text/plain']
