import redis
import json
import uuid
import time
from typing import Dict, Optional, List

# 标准aiohttp库（正确导入）
import aiohttp
import asyncio
from pydantic import BaseModel
from nacos import NacosClient
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
from aiobreaker import CircuitBreaker, CircuitBreakerError
from prometheus_client import Counter, Histogram, Gauge, start_http_server, CollectorRegistry
from loguru import logger


# -------------------------- 1. 监控指标定义（带自定义注册表） --------------------------
CUSTOM_REGISTRY = CollectorRegistry()

FLOW_TOTAL = Counter(
    "distributed_flow_total",
    "Total number of distributed agent flows",
    ["flow_type"],
    registry=CUSTOM_REGISTRY
)

FLOW_SUCCESS = Counter(
    "distributed_flow_success",
    "Number of successful distributed agent flows",
    ["flow_type"],
    registry=CUSTOM_REGISTRY
)
FLOW_FAILED = Counter(
    "distributed_flow_failed",
    "Number of failed distributed agent flows",
    ["flow_type", "error_type"],
    registry=CUSTOM_REGISTRY
)

FLOW_DURATION = Histogram(
    "distributed_flow_duration_seconds",
    "Duration of distributed agent flows",
    ["flow_type"],
    registry=CUSTOM_REGISTRY
)

LOCK_COMPETITION = Counter(
    "distributed_lock_competition",
    "Number of distributed lock competition events",
    ["resource"],
    registry=CUSTOM_REGISTRY
)

AGENT_HEALTH = Gauge(
    "agent_service_health",
    "Health status of agent services (1=healthy, 0=unhealthy)",
    ["agent_name"],
    registry=CUSTOM_REGISTRY
)


# -------------------------- 2. 结构化状态模型 --------------------------
class FlowState(BaseModel):
    flow_id: str
    trace_id: str
    user_query: str
    selected_agents: List[str] = []
    agent_results: Dict[str, str] = {}
    created_at: float = time.time()
    updated_at: float = time.time()
    final_result: str = ""
    multimodal_task: Optional[Dict] = None
    multimodal_result: str = ""
    popularity_score: float = 0.0
    safety_check_pass: bool = False
    safety_feedback: str = ""

    def dict(self, *args, **kwargs):
        data = super().dict(*args, **kwargs)
        data["created_at"] = float(self.created_at)
        data["updated_at"] = float(self.updated_at)
        return data

    @classmethod
    def from_dict(cls, data: Dict):
        return cls(
            flow_id=data["flow_id"],
            trace_id=data["trace_id"],
            user_query=data["user_query"],
            selected_agents=data.get("selected_agents", []),
            agent_results=data.get("agent_results", {}),
            created_at=data.get("created_at", time.time()),
            updated_at=data.get("updated_at", time.time()),
            final_result=data.get("final_result", ""),
            multimodal_task=data.get("multimodal_task"),
            multimodal_result=data.get("multimodal_result", ""),
            popularity_score=data.get("popularity_score", 0.0),
            safety_check_pass=data.get("safety_check_pass", False),
            safety_feedback=data.get("safety_feedback", "")
        )


# -------------------------- 3. Redis分布式状态+锁 --------------------------
class RedisDistributedState:
    def __init__(self, redis_host: str = "127.0.0.1", redis_port: int = 6379):
        self.redis = redis.Redis(host=redis_host, port=redis_port, decode_responses=True)
        self.prefix = "distributed_flow:"
        self.lock_prefix = "lock:"
        self.state_ttl = 3600
        self.lock_renew_interval = 3

    def generate_flow_id(self) -> str:
        return f"flow_{uuid.uuid4().hex[:16]}"

    def generate_trace_id(self) -> str:
        return f"trace_{uuid.uuid4().hex[:24]}"

    def save_state(self, flow_id: str, state: FlowState) -> bool:
        try:
            state_key = f"{self.prefix}{flow_id}"
            state.updated_at = time.time()
            self.redis.set(state_key, json.dumps(state.dict()))
            self.redis.expire(state_key, self.state_ttl)
            logger.debug(f"[Redis] 保存状态成功，flow_id={flow_id}")
            return True
        except Exception as e:
            logger.error(f"[Redis] 保存状态失败，flow_id={flow_id}，err={str(e)}")
            return False

    def get_state(self, flow_id: str) -> Optional[FlowState]:
        try:
            state_key = f"{self.prefix}{flow_id}"
            state_str = self.redis.get(state_key)
            if not state_str:
                logger.warning(f"[Redis] 状态不存在，flow_id={flow_id}")
                return None
            return FlowState.from_dict(json.loads(state_str))
        except Exception as e:
            logger.error(f"[Redis] 获取状态失败，flow_id={flow_id}，err={str(e)}")
            return None

    def get_lock(self, flow_id: str, timeout: int = 10) -> Optional["RedisLock"]:
        lock_key = f"{self.lock_prefix}{self.prefix}{flow_id}"
        lock_value = str(uuid.uuid4())
        if self.redis.set(lock_key, lock_value, nx=True, px=timeout * 1000):
            logger.debug(f"[Redis锁] 获取成功，flow_id={flow_id}，timeout={timeout}s")
            return RedisLock(
                redis_client=self.redis,
                lock_key=lock_key,
                lock_value=lock_value,
                renew_interval=self.lock_renew_interval
                # timeout=timeout
            )
        else:
            LOCK_COMPETITION.labels(resource=f"flow_{flow_id}").inc()
            logger.warning(f"[Redis锁] 获取失败（竞争），flow_id={flow_id}")
            return None


class RedisLock:
    def __init__(self, redis_client, lock_key: str, lock_value: str, renew_interval: int, timeout: int):
        self.redis = redis_client
        self.lock_key = lock_key
        self.lock_value = lock_value
        self.renew_interval = renew_interval
        self.timeout = timeout
        self.renew_task = None
        self.is_released = False

    async def _renew_lock(self):
        while not self.is_released:
            try:
                renew_script = """
                if redis.call('get', KEYS[1]) == ARGV[1] then
                    return redis.call('pexpire', KEYS[1], ARGV[2])
                else
                    return 0
                end
                """
                result = self.redis.eval(renew_script, 1, self.lock_key, self.lock_value, self.timeout * 1000)
                if result == 0:
                    logger.warning(f"[Redis锁续约] 锁已丢失，key={self.lock_key}")
                    break
                logger.debug(f"[Redis锁续约] 成功，key={self.lock_key}")
                await asyncio.sleep(self.renew_interval)
            except Exception as e:
                logger.error(f"[Redis锁续约] 失败，key={self.lock_key}，err={str(e)}")
                await asyncio.sleep(1)

    def acquire(self):
        self.renew_task = asyncio.create_task(self._renew_lock())

    def release(self):
        if self.is_released:
            return
        try:
            self.is_released = True
            if self.renew_task:
                self.renew_task.cancel()
            unlock_script = """
            if redis.call('get', KEYS[1]) == ARGV[1] then
                return redis.call('del', KEYS[1])
            else
                return 0
            end
            """
            result = self.redis.eval(unlock_script, 1, self.lock_key, self.lock_value)
            if result == 1:
                logger.debug(f"[Redis锁] 释放成功，key={self.lock_key}")
            else:
                logger.warning(f"[Redis锁] 释放失败（锁已丢失），key={self.lock_key}")
        except Exception as e:
            logger.error(f"[Redis锁] 释放异常，key={self.lock_key}，err={str(e)}")

    def __del__(self):
        self.release()


# -------------------------- 4. Nacos服务发现 --------------------------
class NacosServiceDiscovery:
    def __init__(self, server_addresses: str = "192.168.150.101:8848", namespace: str = "public"):
        self.nacos = NacosClient(server_addresses=server_addresses, namespace=namespace)
        self.instance_cache = {}
        self.cache_ttl = 30

    def get_healthy_instances(self, service_name: str) -> List[Dict]:
        try:
            cache_key = service_name
            now = time.time()
            if cache_key in self.instance_cache:
                cache_time, instances = self.instance_cache[cache_key]
                if now - cache_time < self.cache_ttl:
                    healthy_instances = [ins for ins in instances if ins["healthy"]]
                    AGENT_HEALTH.labels(agent_name=service_name).set(1 if healthy_instances else 0)
                    return healthy_instances

            instances = self.nacos.list_naming_instance(service_name)["hosts"]
            self.instance_cache[cache_key] = (now, instances)
            healthy_instances = [ins for ins in instances if ins["healthy"]]
            AGENT_HEALTH.labels(agent_name=service_name).set(1 if healthy_instances else 0)
            logger.debug(f"[Nacos] 拉取实例成功，service={service_name}，健康实例数={len(healthy_instances)}")
            return healthy_instances
        except Exception as e:
            AGENT_HEALTH.labels(agent_name=service_name).set(0)
            logger.error(f"[Nacos] 拉取实例失败，service={service_name}，err={str(e)}")
            return []

    def choose_instance(self, service_name: str) -> Optional[Dict]:
        instances = self.get_healthy_instances(service_name)
        if not instances:
            return None
        hash_key = f"{service_name}_{time.time() // 10}"
        index = hash(hash_key) % len(instances)
        return instances[index]


# -------------------------- 5. Agent调用器（彻底适配旧版aiobreaker） --------------------------
class AgentInvoker:
    def __init__(self, http_client: aiohttp.ClientSession, nacos_discovery: NacosServiceDiscovery):
        self.http_client = http_client
        self.nacos = nacos_discovery
        # 旧版本aiobreaker：只支持fail_max和timeout，无上下文管理器
        self.breaker_map = {
            "rag_agent_service": CircuitBreaker(fail_max=5),
            "mcp_agent_service": CircuitBreaker(fail_max=5)
        }

    @retry(
        stop=stop_after_attempt(2),
        wait=wait_exponential(multiplier=1, min=1, max=3),
        retry=retry_if_exception_type((asyncio.TimeoutError, aiohttp.ClientError)),
        reraise=True
    )
    async def _call_agent_http(self, agent_url: str, flow_id: str, trace_id: str, query: str) -> str:
        try:
            async with self.http_client.post(
                    agent_url,
                    json={"flow_id": flow_id, "query": query, "trace_id": trace_id},
                    timeout=10
            ) as resp:
                # 先读取原始响应文本（兼容非JSON格式）
                resp_text = await resp.text()

                # 处理错误状态码
                if resp.status != 200:
                    raise Exception(f"HTTP状态异常：{resp.status}，响应内容：{resp_text}")

                # 尝试解析JSON（兼容正确响应）
                try:
                    resp_data = json.loads(resp_text)
                    return resp_data.get("data", "无返回结果")
                except json.JSONDecodeError:
                    # 非JSON格式但状态码正确的情况（如直接返回文本结果）
                    return resp_text

        except Exception as e:
            logger.error(f"[Agent HTTP调用] 失败，url={agent_url}，err={str(e)}")
            raise

    # 核心修复：把需要熔断的逻辑封装成独立异步函数
    async def _invoke_with_circuit(self, agent_name: str, flow_id: str, trace_id: str, query: str) -> str:
        """熔断保护包裹的核心调用逻辑"""
        instance = self.nacos.choose_instance(agent_name)
        if not instance:
            raise Exception("无健康实例可用")

        agent_type = agent_name.split("_")[0]
        agent_url = f"http://{instance['ip']}:{instance['port']}/agent/{agent_type}/invoke"
        result = await self._call_agent_http(agent_url, flow_id, trace_id, query)
        logger.info(f"[Agent调用] 成功，agent={agent_name}，flow_id={flow_id}")
        return result

    async def invoke_agent(self, agent_name: str, flow_id: str, trace_id: str, query: str) -> str:
        breaker = self.breaker_map.get(agent_name, None)
        if not breaker:
            return f"不支持的Agent：{agent_name}"

        try:
            # 旧版本aiobreaker支持的调用方式：用breaker.call()包裹异步函数
            return await breaker.call(
                self._invoke_with_circuit,
                agent_name, flow_id, trace_id, query
            )
        except CircuitBreakerError:
            logger.warning(f"[熔断器] Agent[{agent_name}]已熔断")
            return f"Agent[{agent_name}]已熔断，暂时无法调用"
        except Exception as e:
            logger.error(f"[Agent调用] 失败，agent={agent_name}，err={str(e)}")
            return f"Agent[{agent_name}]调用失败：{str(e)}"


# -------------------------- 主函数 --------------------------
if __name__ == "__main__":
    # 启动监控服务
    start_http_server(8000, registry=CUSTOM_REGISTRY)
    logger.info("监控服务已启动：http://localhost:8000/metrics")

    async def main():
        # 初始化依赖
        nacos_discovery = NacosServiceDiscovery(server_addresses="192.168.150.101:8848", namespace="public")
        redis_state = RedisDistributedState()
        flow_id = redis_state.generate_flow_id()
        trace_id = redis_state.generate_trace_id()
        user_query = "什么是Redis Cluster？"

        logger.info(f"开始执行流程：flow_id={flow_id}，trace_id={trace_id}，query={user_query}")

        # 调用Agent
        async with aiohttp.ClientSession() as http_client:
            agent_invoker = AgentInvoker(http_client=http_client, nacos_discovery=nacos_discovery)
            rag_result = await agent_invoker.invoke_agent(
                agent_name="rag_agent_service",
                flow_id=flow_id,
                trace_id=trace_id,
                query=user_query
            )
            print(f"\n=== 流程执行完成 ===")
            print(f"flow_id: {flow_id}")
            print(f"RAG Agent结果: {rag_result}")

    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        logger.info("流程被用户中断")
    except Exception as e:
        logger.error(f"主流程执行失败：{str(e)}", exc_info=True)
        print(f"\n错误信息：{str(e)}")