import math

import aiohttp
import logging
import os
from datetime import datetime, timedelta
from typing import Dict, Any, Optional

# 从环境变量获取配置
PROMETHEUS_URL = os.getenv("PROMETHEUS_URL", "http://123.249.68.26:9090/api/v1")
METRIC_QUERY_WINDOW = int(os.getenv("METRIC_QUERY_WINDOW", "3600"))  # 1 hour
MAX_DATA_POINTS = int(os.getenv("MAX_DATA_POINTS", "100"))  # 最大数据点数
MIN_STEP = int(os.getenv("MIN_STEP", "15"))  # 最小步长（秒）
MAX_STEP = int(os.getenv("MAX_STEP", "3600"))  # 最大步长（1小时）


def _calculate_optimal_step(start_ts: int, end_ts: int) -> str:
    """计算最优步长以控制数据点数量"""
    # 直接使用时间戳计算时间差
    duration = end_ts - start_ts  # 单位：秒

    # 处理负时间差或零时间差
    if duration <= 0:
        return f"{MIN_STEP}s"

    # 计算基础步长
    base_step = max(MIN_STEP, min(MAX_STEP, duration / MAX_DATA_POINTS))

    # 对齐到常见步长（15s, 30s, 1m, 5m, 15m, 1h）
    aligned_steps = [15, 30, 60, 300, 900, 1800, 3600]

    # 找到第一个大于等于基础步长的对齐步长
    for step in aligned_steps:
        if step >= base_step:
            return f"{step}s"

    # 如果没有合适的对齐步长，使用向上取整
    return f"{math.ceil(base_step)}s"


class PrometheusConnector:
    """Prometheus监控数据连接器"""

    def __init__(self, base_url: str = PROMETHEUS_URL):
        self.session: Optional[aiohttp.ClientSession] = None
        self.connected = False
        self.base_url = base_url
        self.logger = logging.getLogger("prometheus-connector")

    async def connect(self):
        """建立Prometheus连接"""
        try:
            self.session = aiohttp.ClientSession()
            # 测试连接
            async with self.session.get(f"{self.base_url}/status/buildinfo") as resp:
                if resp.status != 200:
                    raise ConnectionError(f"连接失败，状态码：{resp.status}")
                data = await resp.json()
                self.logger.info(f"已连接到Prometheus (版本: {data['data']['version']})")
            self.connected = True
        except Exception as e:
            self.logger.error(f"连接Prometheus失败: {e}")
            raise

    async def close(self):
        """关闭连接"""
        if self.session:
            await self.session.close()
            self.connected = False
            self.logger.info("Prometheus连接已关闭")

    def is_connected(self) -> bool:
        return self.connected

    async def _execute_query(self, query: str, range_query: bool = False,
                             start: Optional[int] = None,
                             end: Optional[int] = None) -> Dict[str, Any]:
        """执行Prometheus查询"""
        if not self.connected:
            raise RuntimeError("未连接到Prometheus")

        endpoint = "/query_range" if range_query else "/query"
        params = {"query": query}
        self.logger.info(f"endpoint: {endpoint}: params: {params} start: {start}, end: {end}")

        if range_query:
            if not start or not end:
                raise ValueError("范围查询需要start和end参数")
            # 动态计算步长
            step = _calculate_optimal_step(start, end)
            params.update({
                "start": start,
                "end": end,
                "step": step
            })

        try:
            async with self.session.get(f"{self.base_url}{endpoint}", params=params) as resp:
                if resp.status != 200:
                    error = await resp.text()
                    self.logger.error(f"查询失败: {error}, 查询语句: {query}")
                    return {"status": "error", "error": error}
                return await resp.json()
        except Exception as e:
            self.logger.error(f"查询执行异常: {str(e)}, 查询语句: {query}")
            raise

    async def get_service_metrics(self, instance_id: str, service_type: str,
                                  start: Optional[int] = None,
                                  end: Optional[int] = None) -> Dict[str, Any]:
        """
        获取服务指标 (支持多种实例标识)

        参数:
            instance_id: 实例标识 (IP:port 或 domain:port)
            service_type: 服务类型 (mysql/redis)
        """
        metrics = {}
        is_range = start and end

        # 基础查询模板 - 使用instance标签过滤
        queries = {
            "mysql": {
                "connections": 'mysql_global_status_connections{instance="$instance"}',
                "queries_rate": 'rate(mysql_global_status_questions{instance="$instance"}[1m])',
                "active_threads": 'mysql_global_status_threads_connected{instance="$instance"}',
                "slow_queries": 'mysql_global_status_slow_queries{instance="$instance"}',
                "innodb_buffer_pool_hit_ratio": '(1 - (mysql_global_status_innodb_buffer_pool_reads{instance="$instance"} / '
                                                'mysql_global_status_innodb_buffer_pool_read_requests{instance="$instance"})) * 100',
                "innodb_row_lock_time": 'mysql_global_status_innodb_row_lock_time{instance="$instance"}',
                "innodb_row_lock_waits": 'mysql_global_status_innodb_row_lock_waits{instance="$instance"}',
                "table_locks_waited": 'mysql_global_status_table_locks_waited{instance="$instance"}',
                "bytes_sent": 'rate(mysql_global_status_bytes_sent{instance="$instance"}[1m])',
                "bytes_received": 'rate(mysql_global_status_bytes_received{instance="$instance"}[1m])',
                "aborted_connects": 'mysql_global_status_aborted_connects{instance="$instance"}',
                "created_tmp_disk_tables": 'mysql_global_status_created_tmp_disk_tables{instance="$instance"}',
                "created_tmp_tables": 'mysql_global_status_created_tmp_tables{instance="$instance"}',
                "select_full_join": 'mysql_global_status_select_full_join{instance="$instance"}',
                "select_full_range_join": 'mysql_global_status_select_full_range_join{instance="$instance"}',
                "sort_merge_passes": 'mysql_global_status_sort_merge_passes{instance="$instance"}',
                "sort_range": 'mysql_global_status_sort_range{instance="$instance"}',
                "sort_rows": 'mysql_global_status_sort_rows{instance="$instance"}',
                "sort_scan": 'mysql_global_status_sort_scan{instance="$instance"}',
                "Disk_Utilization": '(1 - node_filesystem_avail_bytes{device="/dev/vdb1"} / node_filesystem_size_bytes{device="/dev/vdb1"}) * 100'
            },
            "redis": {
                "memory_used": 'redis_memory_used_bytes{instance="$instance"} / 1024 / 1024',
                "memory_fragmentation_ratio": 'redis_memory_fragmentation_ratio{instance="$instance"}',
                "connected_clients": 'redis_connected_clients{instance="$instance"}',
                "blocked_clients": 'redis_blocked_clients{instance="$instance"}',
                "rejected_connections": 'redis_rejected_connections_total{instance="$instance"}',
                "evicted_keys": 'redis_evicted_keys_total{instance="$instance"}',
                "expired_keys": 'redis_expired_keys_total{instance="$instance"}',
                "keyspace_hits": 'redis_keyspace_hits_total{instance="$instance"}',
                "keyspace_misses": 'redis_keyspace_misses_total{instance="$instance"}',
                "instantaneous_ops_per_sec": 'redis_instantaneous_ops_per_sec{instance="$instance"}',
                "instantaneous_input_kbps": 'redis_instantaneous_input_kbps{instance="$instance"}',
                "instantaneous_output_kbps": 'redis_instantaneous_output_kbps{instance="$instance"}',
                "used_cpu_sys": 'redis_used_cpu_sys{instance="$instance"}',
                "used_cpu_user": 'redis_used_cpu_user{instance="$instance"}',
                "used_cpu_sys_children": 'redis_used_cpu_sys_children{instance="$instance"}',
                "used_cpu_user_children": 'redis_used_cpu_user_children{instance="$instance"}',
                "connected_slaves": 'redis_connected_slaves{instance="$instance"}',
                "repl_backlog_size": 'redis_repl_backlog_size{instance="$instance"}',
            }
        }

        service_queries = queries.get(service_type, {})
        for metric, query_template in service_queries.items():
            # 替换实例标识
            query = query_template.replace("$instance", instance_id)
            metrics[metric] = await self._execute_query(
                query, is_range, start, end
            )

        # 计算命中率 (针对Redis)
        if service_type == "redis":
            metrics["hit_rate"] = {
                "status": "success",
                "data": {
                    "resultType": "vector",
                    "result": [
                        {
                            "metric": {"instance": instance_id},
                            "value": [end, "0.0"] if end else [0, "0.0"]
                        }
                    ]
                }
            }
            try:
                hits = float(metrics["keyspace_hits"]["data"]["result"][0]["value"][1])
                misses = float(metrics["keyspace_misses"]["data"]["result"][0]["value"][1])
                hit_rate = hits / (hits + misses) * 100 if (hits + misses) > 0 else 0
                metrics["hit_rate"]["data"]["result"][0]["value"][1] = str(hit_rate)
            except Exception:
                pass

        return metrics

    async def get_node_metrics(self, host_ip: str,
                               start: Optional[int] = None,
                               end: Optional[int] = None) -> Dict[str, Any]:
        """
        获取节点指标 (支持多种实例标识)

        参数:
            host_ip: 服务器IP地址
        """
        metrics = {}
        is_range = start and end

        # 所有节点指标查询中，直接使用host_ip变量，不添加额外引号
        # CPU相关指标
        metrics["cpu_usage"] = await self._execute_query(
            f'100 - (avg by(instance)(rate(node_cpu_seconds_total{{host_ip="{host_ip}", mode="idle"}}[1m])) * 100)',
            is_range, start, end
        )

        # 内存相关指标（使用更准确的 MemAvailable）
        metrics["memory_usage"] = await self._execute_query(
            f'(1 - (node_memory_MemAvailable_bytes{{host_ip="{host_ip}"}} / node_memory_MemTotal_bytes{{host_ip="{host_ip}"}})) * 100',
            is_range, start, end
        )

        # 磁盘相关指标
        metrics["disk_usage_root"] = await self._execute_query(
            f'(node_filesystem_size_bytes{{host_ip="{host_ip}", mountpoint="/"}} - '
            f'node_filesystem_free_bytes{{host_ip="{host_ip}", mountpoint="/"}}) / '
            f'node_filesystem_size_bytes{{host_ip="{host_ip}", mountpoint="/"}} * 100',
            is_range, start, end
        )

        return metrics