from functools import lru_cache
from typing import List, Literal

from jinja2 import Template
from langchain_chroma import Chroma
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.runnables import RunnableConfig
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langgraph.types import Command

from HomeBuddyAgent.utils.prompts import node_generate_prompt_device_call, prompt_for_feedback
from basic_executor.utils.state import State
from common.common_utils import get_model
from common.configuration import Configuration
from common.structs import DeviceModelFactory, DeviceCall, DeviceResult, DeviceCalls


@lru_cache(maxsize=4)
def _rag_loder():
    # with open("D:\\DevelopFiles\\pycharms\\Stimulate\\AI\\files\\function_file.json", encoding="UTF-8") as f:
    #     file = f.read()
    #
    # text_splitter = CharacterTextSplitter(
    #     separator="\n\n",
    #     chunk_size=5,
    #     chunk_overlap=2,
    #     length_function=len,
    #     is_separator_regex=False
    # )
    #
    # texts = text_splitter.create_documents([file])
    # vector_store = Chroma.from_documents(texts, OpenAIEmbeddings())
    persist_directory = r"D:\DevelopFiles\pycharms\Command_parser_langgraph\my_agent\ChromaDB\test"

    embeddings = OpenAIEmbeddings(
    )

    vector_store = Chroma(
        collection_name="vector_collection_for_agent",
        embedding_function=embeddings,
        persist_directory=persist_directory
    )
    return vector_store


def should_continue(state):
    messages = state["messages"]
    last_message = messages[-1]
    if not last_message.tool_calls:
        return "end"
    else:
        return "continue"


def retrieve(state: State):
    # print("retrieve")
    vector_store = _rag_loder()
    retrieved_docs = vector_store.similarity_search(state["question"], k=4)
    return {"context": retrieved_docs, "tool_using": False}


def generate(state: State, config: RunnableConfig) -> Command[Literal["action", "__end__"]]:
    """
    Generate answer

    Args:
        state (messages): The current state

    Returns:
         dict: The updated state with re-phrased question
         :param state:
         :param config:
    """
    configurable = Configuration.from_runnable_config(config)
    docs_content = state["device_configs"]
    messages = state["messages"]
    print(state["feed_back"])
    if not state["feed_back"]:
        print("---调用工具---")
        model = ChatOpenAI(
            model='gpt-4.1-nano',
            disable_streaming=True
        )
        # model = get_model(
        #     model_provider=configurable.structured_output_provider,
        #     model_name=configurable.structured_output_model
        # )
        factory = DeviceModelFactory()
        factory.generate_all(state['device_configs'])
        ConfigUnion = factory.get_union_type()
        DeviceCallsDynamic = DeviceCalls[ConfigUnion]
        DeviceCallsDynamic.__name__ = "DeviceCallsDynamic"
        print(f"{DeviceCallsDynamic.__name__}+++++++++++++++++++++++++++++++++++++++++++++++++")
        model = model.with_structured_output(DeviceCallsDynamic,
                                             )
        template = Template(node_generate_prompt_device_call, autoescape=False)
        query = template.render(
            {"question": state["question"], "device_configs": [docs_content],
             "additional_info": " "}
        )
        print(state["question"])
        print(query)
        response = model.invoke(messages + [HumanMessage(content=query)])
        # print("Raw response:", response["raw"])
        # print("Parsing error:", response["parsing_error"])
        return Command(
            update={
                "device_calls": response,
                "messages": [AIMessage(content=f"正在调用设备：{response}....")]
            },
            goto="action"
        )

    else:
        model = get_model(
            model_provider=configurable.writer_provider,
            model_name=configurable.writer_model
        )
        print("===========反馈=============")
        template = Template(prompt_for_feedback, autoescape=False)
        query = template.render(
            {"question": [state["question"]],
             "device_configs": [docs_content],
             # "additional_info": state["additional_info"],
             "device_call_result": state["device_call_results"]
             }
        )
        # response = model.invoke(messages + [SystemMessage(content=query)])
        response = model.invoke([SystemMessage(content=query)] + messages)
        return Command(
            update={
                "answer": response.content,
                "messages": messages + [SystemMessage(content=query)] + [response],
                "feed_back": False
            },
            goto="__end__"
        )


# def generate(state: State, config: RunnableConfig):
#     print("generate")
#     configurable = Configuration.from_runnable_config(config)
#     model = get_model(
#         model_provider=configurable.tool_call_provider,
#         model_name=configurable.tool_call_model
#     ).bind_tools(tools)
#     docs_content = "\n\n".join(doc.page_content for doc in state["device_configs"])
#     messages = state["messages"]
#     if not state["tool_using"]:
#         messages = messages + prompt_for_tool.invoke(
#             {"question": [state["question"]], "context": [docs_content]}).to_messages()
#         # print("messages",messages)
#     response = model.invoke(messages)
#     # llm.with_structured_output(
#     #
#     # )
#     # print(response)
#     return {
#         "answer": response.content,
#         "device_configs": [],
#         "messages": messages + [response]
#     }


def _process_device_call(device_commands: List[DeviceCall]) -> DeviceResult:
    """
    处理设备调用请求，并生成详细的操作结果反馈。

    参数：
    - device_commands: 设备操作命令列表，每个命令包含设备信息和参数。

    返回：
    - 字符串格式的操作结果，包含每个设备的名称、参数和状态。
    """

    print("==================设备模拟执行===================")
    print(device_commands)
    response = DeviceResult(success=True,
                            message="设备都已成功执行",
                            data={})
    return response


import uuid
import asyncio
import httpx
import json
from redis.asyncio import Redis
from langchain_core.messages import AIMessage

# 建议：将 Redis 客户端复用，而不是每次创建
REDIS_URL = "redis://localhost"

redis = Redis(host="localhost", port=6379, decode_responses=True,password="2168")

async def wait_for_device_result(call_id: str, timeout: int = 60):
    """
    轮询 Redis 等待设备执行结果。
    :param call_id: 唯一调用 ID
    :param timeout: 超时时间（秒）
    """
    key = f"device_result:{call_id}"
    try:
        for _ in range(timeout):
            result = await redis.get(key)
            if result:
                await redis.delete(key)
                return json.loads(result)
            await asyncio.sleep(1)
    except Exception as e:
        print(f"[Redis] 等待设备结果异常: {e}")
    raise asyncio.TimeoutError(f"等待设备响应超时：call_id={call_id}")


async def device_call(state: dict):
    """
    根据用户命令控制设备，发送设备请求，并从 Redis 获取返回结果。
    """
    print("====================device call===============")
    device_calls = state['device_calls'].device_calls

    if not device_calls:
        result = {"success": False, "message": "未检测到有效的设备调用信息", "data": {}}
        return {
            "feed_back": False,
            "messages": [AIMessage(content=f"设备调用失败：{result}")]
        }

    call_id = str(uuid.uuid4())
    device_calls_dicts = [
        dc.model_dump() if hasattr(dc, 'model_dump') else dc for dc in device_calls
    ]

    payload = {
        "type": "device_call",
        "call_id": call_id,
        "device_calls": device_calls_dicts
    }

    try:
        print(f"[device_call] 推送设备调用: call_id={call_id}")
        async with httpx.AsyncClient() as client:
            response = await client.post("http://127.0.0.1:8080/api/push-device-call", json=payload)
            response.raise_for_status()

        print(f"[device_call] 已推送，开始等待返回结果 call_id={call_id}")
        result = await wait_for_device_result(call_id)
        print(f"[device_call] 返回结果: {result}")
        return {
            "feed_back": True,
            "device_call_results": [
                AIMessage(content=f"设备调用成功，执行结果：{json.dumps(result, ensure_ascii=False)}")
            ]
        }

    except httpx.RequestError as e:
        return {
            "feed_back": False,
            "messages": [AIMessage(content=f"设备调用发送失败：{e}")],
        }

    except asyncio.TimeoutError:
        return {
            "feed_back": False,
            "messages": [AIMessage(content="设备操作超时，未收到前端响应")]
        }



# async def device_call(state: State):
#     """
#     工具描述：根据用户的自然语言命令控制智能家居设备，并返回操作结果供生成反馈。
#     LangGraph 流程执行到这里时，发送设备指令到前端，并等待其返回执行结果。
#     """
#     print("====================device call===============")
#     device_calls = state['device_calls'].device_calls
#     if not device_calls:
#         result = DeviceResult(success=False, message="未检测到有效的设备调用信息", data={})
#         return {
#             "feed_back": False,
#             "messages": [AIMessage(content=f"设备调用失败：{result}")]
#         }
#
#     # 生成唯一标识符
#     call_id = str(uuid.uuid4())
#
#     # # 创建 Future 用于阻塞等待响应
#     # future = asyncio.get_event_loop().create_future()
#     # device_response_futures[call_id] = future
#
#     device_calls_dicts = [dc.model_dump() if hasattr(dc, 'model_dump') else dc for dc in device_calls]
#
#     payload = {
#         "type": "device_call",
#         "call_id": call_id,
#         "device_calls": device_calls_dicts
#     }
#
#     try:
#         print(f"[device_call] 正在向前端推送设备指令，call_id={call_id}")
#         print("当前事件循环Langgraph:", asyncio.get_running_loop())
#         #发送 HTTP 请求到 FastAPI 的推送接口
#         async with httpx.AsyncClient() as client:
#             response = await client.post("http://127.0.0.1:8080/api/push-device-call", json=payload)
#             print("发完了")
#             response.raise_for_status()  # 抛出非 2xx 错误
#
#         print(f"[device_call] 已通过 HTTP 下发设备指令到前端，call_id={call_id}")
#
#         return {
#             "feed_back": True,
#             "device_call_results": [AIMessage(content=f"设备调用成功：{device_calls_dicts}")]
#         }
#         # # 等待前端响应（最多 60 秒）
#         # result = await asyncio.wait_for(future, timeout=60.0)
#         # return {
#         #     "feed_back": True,
#         #     "device_call_results": result
#         # }
#
#     except httpx.RequestError as e:
#         return {
#             "feed_back": False,
#             "messages": [AIMessage(content=f"设备调用发送失败：{e}")],
#         }
#
#     except asyncio.TimeoutError:
#         return {
#             "feed_back": False,
#             "messages": [AIMessage(content=f"设备操作超时，未收到前端响应")]
#         }
#
#     # finally:
#     #     await device_response_futures.pop(call_id, None)

# tool_node = ToolNode(tools)




